repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
lukasz-migas/napari-1d
|
[
"b0f081a8711ae941b3e4b5c58c3aea56bd0e3277",
"b0f081a8711ae941b3e4b5c58c3aea56bd0e3277",
"b0f081a8711ae941b3e4b5c58c3aea56bd0e3277",
"b0f081a8711ae941b3e4b5c58c3aea56bd0e3277",
"b0f081a8711ae941b3e4b5c58c3aea56bd0e3277"
] |
[
"napari_plot/layers/multiline/_multiline_list.py",
"examples/plot-layer-types.py",
"examples/napari-and-1d-live-callback.py",
"napari_plot/_vispy/vendored/axis.py",
"napari_plot/_vispy/layers/scatter.py"
] |
[
"\"\"\"Container class for MultiLine data.\"\"\"\nimport typing as ty\n\nimport numpy as np\n\nfrom ._multiline_utils import get_data_limits, make_multiline_color, make_multiline_line\n\n\nclass MultiLineList:\n \"\"\"Multi-line class.\"\"\"\n\n def __init__(self):\n self._data = {\"xs\": [], \"ys\": []}\n self._color = np.empty((0, 4))\n\n def add(self, xs: ty.List, ys: ty.List[np.ndarray], color: np.ndarray):\n \"\"\"Add data to store.\"\"\"\n if len(ys) != len(color):\n raise ValueError(\"The number of `ys` must be equal to the number of colors.\")\n if len(xs) == 0 and self.n_lines == 0:\n raise ValueError(\"Cannot add `ys` to empty container.\")\n\n # check if adding data to existing stores\n if self.n_lines > 0:\n if self.xs_equal_ys:\n if len(xs) != len(ys) and len(xs) > 0:\n raise ValueError(\n \"Cannot add `xs` and `ys` arrays that are not of equal length if what is already present has\"\n \" equal number of arrays.\"\n )\n else:\n if len(xs) > 0:\n raise ValueError(\n \"Cannot add `xs` and `ys` arrays to layer that does not have equal number of `xs` and `ys`.\"\n )\n\n # make sure tha xs is iterable\n if len(xs) == 0:\n xs = [None] * len(ys)\n elif len(xs) == 1 and len(ys) > 1:\n xs.extend([None] * (len(ys) - 1))\n for x, y, _color in zip(xs, ys, color):\n self._add(x, y, _color)\n\n def _add(self, x, y, color):\n \"\"\"Append data to containers.\"\"\"\n if x is not None:\n self._data[\"xs\"].append(x)\n self._data[\"ys\"].append(y)\n if color is None:\n color = np.array([1, 1, 1, 1])\n self._color = np.vstack([self._color, color])\n\n @property\n def n_lines(self) -> int:\n \"\"\"Return the number of lines.\"\"\"\n return len(self._data[\"ys\"])\n\n @property\n def xs_equal_ys(self) -> bool:\n \"\"\"Flag that indicates whether there is equal number of `x` and `y` arrays.\"\"\"\n return len(self._data[\"xs\"]) == len(self._data[\"ys\"])\n\n @property\n def xs(self) -> ty.List[np.ndarray]:\n \"\"\"Get x-axis arrays.\"\"\"\n return self._data[\"xs\"]\n\n @xs.setter\n def xs(self, value):\n self._data[\"xs\"] = value\n\n @property\n def ys(self) -> ty.List[np.ndarray]:\n \"\"\"Get y-axis arrays.\"\"\"\n return self._data[\"ys\"]\n\n @ys.setter\n def ys(self, value):\n self._data[\"ys\"] = value\n\n @property\n def data(self):\n \"\"\"Return nicely formatted data.\"\"\"\n return\n\n @property\n def extent_data(self) -> np.ndarray:\n \"\"\"Get data extents.\"\"\"\n return get_data_limits(self.xs, self.ys)\n\n def get_display_lines(self):\n \"\"\"Return data in a manner that can be understood by vispy Line visual.\"\"\"\n return make_multiline_line(self.xs, self.ys, self.color)\n\n def get_display_color(self):\n \"\"\"Return color.\"\"\"\n return make_multiline_color(self.ys, self.color)\n\n @property\n def color(self):\n \"\"\"(N x 4) np.ndarray: Array of RGBA face colors for each shape\"\"\"\n return self._color\n\n @color.setter\n def color(self, color):\n self._set_color(color)\n\n def _set_color(self, colors):\n \"\"\"Set the face_color or edge_color property\n\n Parameters\n ----------\n colors : (N, 4) np.ndarray\n The value for setting edge or face_color. There must\n be one color for each shape\n \"\"\"\n n_lines = self.n_lines\n if not np.all(colors.shape == (n_lines, 4)):\n raise ValueError(\n f\"color must have shape ({n_lines}, 4)\",\n )\n\n for i, col in enumerate(colors):\n self.update_color(i, col)\n\n def update_color(self, index, color):\n \"\"\"Updates the face color of a single shape located at index.\n\n Parameters\n ----------\n index : int\n Location in list of the shape to be changed.\n color : str | tuple | np.ndarray\n If string can be any color name recognized by vispy or hex value if\n starting with `#`. If array-like must be 1-dimensional array with 3\n or 4 elements.\n \"\"\"\n self._color[index] = color\n",
"\"\"\"Display image and 1d plot.\"\"\"\nimport numpy as np\n\nimport napari_plot\n\nN_POINTS = 1000\nN_MIN = 0\nN_MAX = 300\n\n\ndef add_line():\n \"\"\"Line plot\"\"\"\n x = np.arange(N_POINTS)\n y = np.random.randint(N_MIN, N_MAX, N_POINTS)\n viewer1d.add_line(np.c_[x, y], name=\"Line\", visible=True)\n\n\ndef add_centroids():\n \"\"\"Centroids plot\"\"\"\n x = np.arange(N_POINTS)\n y = np.random.randint(N_MIN, N_MAX, N_POINTS)\n viewer1d.add_centroids(np.c_[x, y], color=(1.0, 0.0, 1.0, 1.0), name=\"Centroids\", visible=True)\n\n\ndef add_scatter():\n \"\"\"Centroids plot\"\"\"\n x = np.random.randint(N_MIN, N_MAX, N_POINTS // 2)\n y = np.random.randint(N_MIN, N_POINTS, N_POINTS // 2)\n viewer1d.add_scatter(np.c_[x, y], size=5, name=\"Scatter\", visible=True)\n\n\ndef add_region():\n \"\"\"Region plot\"\"\"\n regions = [\n ([25, 50], \"vertical\"),\n ([50, 400], \"horizontal\"),\n ([80, 90], \"vertical\"),\n ]\n viewer1d.add_region(regions, face_color=[\"red\", \"green\", \"cyan\"], opacity=0.5, name=\"Spans\", visible=True)\n\n\ndef add_infline():\n \"\"\"Inf line plot\"\"\"\n viewer1d.add_inf_line(\n [50, 15, 250],\n orientation=[\"vertical\", \"vertical\", \"horizontal\"],\n width=3,\n color=[\"red\", \"orange\", \"green\"],\n name=\"Infinite Line\",\n visible=True,\n )\n\n\nviewer1d = napari_plot.Viewer()\n\nadd_line()\nadd_centroids()\nadd_region()\nadd_scatter()\nadd_infline()\n\nnapari_plot.run()\n",
"\"\"\"Create simple callback that modifies the line visual.\"\"\"\nfrom skimage import data\nfrom skimage import measure\nimport numpy as np\nimport napari\nimport napari_plot\nfrom napari_plot._qt.qt_viewer import QtViewer\n\n\ndef _get_line_data(image, start, end):\n return measure.profile_line(image, start, end, mode=\"nearest\")\n\n\nviewer = napari.Viewer()\nchelsea = data.astronaut().mean(-1)\nviewer.add_image(chelsea)\nshapes_layer = viewer.add_shapes(\n [np.array([[11, 13], [250, 313]]), np.array([[100, 10], [10, 345]])],\n shape_type=\"line\",\n edge_width=5,\n edge_color=\"coral\",\n face_color=\"royalblue\",\n)\nshapes_layer.mode = \"select\"\n\nviewer1d = napari_plot.ViewerModel1D()\nviewer1d.axis.y_label = \"Intensity\"\nviewer1d.axis.x_label = \"\"\nviewer1d.text_overlay.visible = True\nviewer1d.text_overlay.position = \"top_right\"\n\nqt_viewer = QtViewer(viewer1d, parent=viewer.window.qt_viewer.parent())\n\nlines = []\nfor i, line in enumerate(shapes_layer.data):\n y = _get_line_data(chelsea, *line)\n lines.append(viewer1d.add_line(np.c_[np.arange(len(y)), y], name=str(i)))\n\n\n# hook the lines up to events\ndef _profile_lines(image, shape_layer):\n # only a single line for this example\n for i, line in enumerate(shape_layer.data):\n if i in shape_layer._selected_data:\n y = _get_line_data(image, *line)\n lines[i].data = np.c_[np.arange(len(y)), y]\n\n\n@shapes_layer.mouse_drag_callbacks.append\ndef _profile_lines_drag(layer, event):\n _profile_lines(chelsea, layer)\n yield\n while event.type == \"mouse_move\":\n _profile_lines(chelsea, layer)\n yield\n\n\nviewer.window.add_dock_widget(qt_viewer, area=\"bottom\", name=\"Line Widget\")\nnapari.run()\n",
"\"\"\"Reimplementation of axis-visual\"\"\"\nimport numpy as np\nimport vispy.visuals.axis\nfrom vispy.visuals.axis import Ticker as _Ticker\nfrom vispy.visuals.axis import _get_ticks_talbot\n\ndefault_tick_formatter = lambda x: \"%g\" % x # noqa\n\n\nclass Ticker(_Ticker):\n \"\"\"Monkey-patched Ticker class\"\"\"\n\n def __init__(self, axis, anchors=None, tick_format_func=default_tick_formatter):\n super().__init__(axis, anchors)\n self.tick_format_func = tick_format_func\n\n def _get_tick_frac_labels(self):\n \"\"\"Get the major ticks, minor ticks, and major labels\"\"\"\n minor_num = 4 # number of minor ticks per major division\n if self.axis.scale_type == \"linear\":\n domain = self.axis.domain\n if domain[1] < domain[0]:\n flip = True\n domain = domain[::-1]\n else:\n flip = False\n offset = domain[0]\n scale = domain[1] - domain[0]\n\n transforms = self.axis.transforms\n length = self.axis.pos[1] - self.axis.pos[0] # in logical coords\n n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi\n\n major = _get_ticks_talbot(domain[0], domain[1], n_inches, 2)\n labels = [self.tick_format_func(x) for x in major]\n majstep = major[1] - major[0]\n minor = []\n minstep = majstep / (minor_num + 1)\n minstart = 0 if self.axis._stop_at_major[0] else -1\n minstop = -1 if self.axis._stop_at_major[1] else 0\n for i in range(minstart, len(major) + minstop):\n maj = major[0] + i * majstep\n minor.extend(np.linspace(maj + minstep, maj + majstep - minstep, minor_num))\n major_frac = (major - offset) / scale\n minor_frac = (np.array(minor) - offset) / scale\n major_frac = major_frac[::-1] if flip else major_frac\n use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)\n major_frac = major_frac[use_mask]\n labels = [l for li, l in enumerate(labels) if use_mask[li]]\n minor_frac = minor_frac[(minor_frac > -0.0001) & (minor_frac < 1.0001)]\n elif self.axis.scale_type == \"logarithmic\":\n return NotImplementedError\n elif self.axis.scale_type == \"power\":\n return NotImplementedError\n return major_frac, minor_frac, labels\n\n\nvispy.visuals.axis.Ticker = Ticker\n",
"\"\"\"Scatter points layer\"\"\"\nimport typing as ty\n\nimport numpy as np\nfrom napari._vispy.layers.base import VispyBaseLayer\nfrom napari._vispy.utils.text import update_text\nfrom vispy.scene.visuals import Compound, Markers, Text\n\nif ty.TYPE_CHECKING:\n from ...layers import Scatter\n\n\nclass VispyScatterLayer(VispyBaseLayer):\n \"\"\"Line layer\"\"\"\n\n def __init__(self, layer: \"Scatter\"):\n # Create a compound visual with the following two sub-visuals:\n # Markers: The actual markers of each point\n # Text: Text line for each point\n node = Compound([Markers(), Text()])\n super().__init__(layer, node)\n\n self.layer.events.symbol.connect(self._on_data_change)\n self.layer.events.size.connect(self._on_data_change)\n self.layer.events.edge_width.connect(self._on_data_change)\n self.layer.events.edge_color.connect(self._on_data_change)\n self.layer.events.face_color.connect(self._on_data_change)\n self.layer.events.scaling.connect(self._on_data_change)\n self.layer.text.events.connect(self._on_text_change)\n\n self.reset()\n self._on_data_change()\n\n def _on_data_change(self, event=None):\n \"\"\"Set data\"\"\"\n set_data = self.node._subvisuals[0].set_data\n\n set_data(\n self.layer.data[:, ::-1],\n size=self.layer.size,\n edge_width=self.layer.edge_width,\n symbol=self.layer.symbol,\n edge_color=self.layer.edge_color,\n face_color=self.layer.face_color,\n scaling=self.layer.scaling,\n )\n self._on_text_change(update_node=False)\n self.node.update()\n\n def _on_text_change(self, update_node=True):\n \"\"\"Function to update the text node properties\n\n Parameters\n ----------\n update_node : bool\n If true, update the node after setting the properties\n \"\"\"\n ndisplay = 2\n if self.layer._text.visible is False:\n text_coords = np.zeros((1, ndisplay))\n text = []\n anchor_x = \"center\"\n anchor_y = \"center\"\n else:\n text_coords, anchor_x, anchor_y = self.layer._view_text_coords\n if len(text_coords) == 0:\n text_coords = np.zeros((1, ndisplay))\n text = self.layer._view_text\n text_node = self.node._subvisuals[-1]\n\n update_text(\n text_values=text,\n coords=text_coords,\n anchor=(anchor_x, anchor_y),\n rotation=self.layer._text.rotation,\n color=self.layer._text.color,\n size=self.layer._text.size,\n ndisplay=ndisplay,\n text_node=text_node,\n )\n if update_node:\n self.node.update()\n"
] |
[
[
"numpy.all",
"numpy.vstack",
"numpy.array",
"numpy.empty"
],
[
"numpy.arange",
"numpy.random.randint"
],
[
"numpy.array"
],
[
"numpy.array",
"numpy.sum",
"numpy.linspace"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fandulu/IHDA
|
[
"a859729cbb639a3233f3c8c7fce894bac27b19ee"
] |
[
"eval_metrics.py"
] |
[
"from __future__ import print_function, absolute_import\nimport os\nimport glob\nimport re\nimport sys\nimport os.path as osp\nimport numpy as np\n\nimport random\nfrom time import time\n\n\"\"\"Cross-Modality ReID\"\"\"\n\ndef eval_sbir(distmat, q_pids, g_pids, max_rank = 20):\n num_q, num_g = distmat.shape\n if num_g < max_rank:\n max_rank = num_g\n print(\"Note: number of gallery samples is quite small, got {}\".format(num_g))\n indices = np.argsort(distmat, axis=1)\n matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)\n\n # compute cmc curve for each query\n all_cmc = []\n all_AP = []\n num_valid_q = 0. # number of valid query\n \n # only two cameras\n q_camids = np.ones(num_q).astype(np.int32)\n g_camids = 2* np.ones(num_g).astype(np.int32)\n \n for q_idx in range(num_q):\n # get query pid and camid\n q_pid = q_pids[q_idx]\n q_camid = q_camids[q_idx]\n\n # remove gallery samples that have the same pid and camid with query\n order = indices[q_idx]\n remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)\n keep = np.invert(remove)\n\n # compute cmc curve\n raw_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches\n if not np.any(raw_cmc):\n # this condition is true when query identity does not appear in gallery\n continue\n\n cmc = raw_cmc.cumsum()\n cmc[cmc > 1] = 1\n\n all_cmc.append(cmc[:max_rank])\n num_valid_q += 1.\n\n # compute average precision\n # reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision\n num_rel = raw_cmc.sum()\n tmp_cmc = raw_cmc.cumsum()\n tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]\n tmp_cmc = np.asarray(tmp_cmc) * raw_cmc\n AP = tmp_cmc.sum() / num_rel\n all_AP.append(AP)\n\n assert num_valid_q > 0, \"Error: all query identities do not appear in gallery\"\n\n all_cmc = np.asarray(all_cmc).astype(np.float32)\n all_cmc = all_cmc.sum(0) / num_valid_q\n mAP = np.mean(all_AP)\n\n return all_cmc, mAP\n \n \n \ndef eval_pku(distmat, q_pids, g_pids, max_rank = 20):\n num_q, num_g = distmat.shape\n if num_g < max_rank:\n max_rank = num_g\n print(\"Note: number of gallery samples is quite small, got {}\".format(num_g))\n indices = np.argsort(distmat, axis=1)\n matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)\n\n # compute cmc curve for each query\n all_cmc = []\n all_AP = []\n num_valid_q = 0. # number of valid query\n \n # only two cameras\n q_camids = np.ones(num_q).astype(np.int32)\n g_camids = 2* np.ones(num_g).astype(np.int32)\n \n for q_idx in range(num_q):\n # get query pid and camid\n q_pid = q_pids[q_idx]\n q_camid = q_camids[q_idx]\n\n # remove gallery samples that have the same pid and camid with query\n order = indices[q_idx]\n remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)\n keep = np.invert(remove)\n\n # compute cmc curve\n raw_cmc = matches[q_idx][keep] # binary vector, positions with value 1 are correct matches\n if not np.any(raw_cmc):\n # this condition is true when query identity does not appear in gallery\n continue\n\n cmc = raw_cmc.cumsum()\n cmc[cmc > 1] = 1\n\n all_cmc.append(cmc[:max_rank])\n num_valid_q += 1.\n\n # compute average precision\n # reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision\n num_rel = raw_cmc.sum()\n tmp_cmc = raw_cmc.cumsum()\n tmp_cmc = [x / (i+1.) for i, x in enumerate(tmp_cmc)]\n tmp_cmc = np.asarray(tmp_cmc) * raw_cmc\n AP = tmp_cmc.sum() / num_rel\n all_AP.append(AP)\n\n assert num_valid_q > 0, \"Error: all query identities do not appear in gallery\"\n\n all_cmc = np.asarray(all_cmc).astype(np.float32)\n all_cmc = all_cmc.sum(0) / num_valid_q\n mAP = np.mean(all_AP)\n\n return all_cmc, mAP"
] |
[
[
"numpy.invert",
"numpy.asarray",
"numpy.ones",
"numpy.mean",
"numpy.any",
"numpy.argsort"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SeanMabli/aiinpy
|
[
"bd332fce454c489e236878c9da91bb86ec6dda14",
"bd332fce454c489e236878c9da91bb86ec6dda14",
"827e4f85861436c0332046fa8aa84e24153513d6",
"bd332fce454c489e236878c9da91bb86ec6dda14"
] |
[
"example/src/nn.py",
"example/test-nnderivative.py",
"example/old/mish.py",
"example/src/tanh.py"
] |
[
"import numpy as np\nfrom .binarystep import binarystep\nfrom .gaussian import gaussian\nfrom .identity import identity\nfrom .leakyrelu import leakyrelu\nfrom .mish import mish\nfrom .relu import relu\nfrom .selu import selu\nfrom .sigmoid import sigmoid\nfrom .silu import silu\nfrom .softmax import softmax\nfrom .softplus import softplus\nfrom .stablesoftmax import stablesoftmax\nfrom .tanh import tanh\n\nclass nn:\n def __init__(self, outshape, activation, learningrate, weightsinit=(-1, 1), biasesinit=(0, 0), inshape=None):\n self.weightsinit, self.biasesinit = weightsinit, biasesinit\n self.activation, self.learningrate = activation, learningrate\n self.inshape = inshape\n if inshape is not None:\n self.weights = np.random.uniform(weightsinit[0], weightsinit[1], (np.prod(inshape), np.prod(outshape)))\n self.biases = np.random.uniform(biasesinit[0], biasesinit[1], np.prod(outshape))\n self.outshape = outshape\n \n def __copy__(self):\n return type(self)(self.outshape, self.activation, self.learningrate, self.weightsinit, self.biasesinit, self.inshape)\n\n def __repr__(self):\n return 'nn(inshape=' + str(self.inshape) + ', outshape=' + str(self.outshape) + ', activation=' + str(self.activation.__repr__()) + ', learningrate=' + str(self.learningrate) + ', weightsinit=' + str(self.weightsinit) + ', biasesinit=' + str(self.biasesinit) + ')'\n\n def modelinit(self, inshape):\n if type(inshape) == tuple and len(inshape) == 1:\n inshape = inshape[0]\n self.inshape = inshape\n\n self.weights = np.random.uniform(self.weightsinit[0], self.weightsinit[1], (np.prod(inshape), np.prod(self.outshape)))\n self.biases = np.random.uniform(self.biasesinit[0], self.biasesinit[1], np.prod(self.outshape))\n return self.outshape\n\n def forward(self, input):\n self.input = input.flatten()\n out = self.weights.T @ self.input + self.biases\n self.out = self.activation.forward(out)\n self.derivative = self.activation.backward(out) # now it applys the derivative to the output without the activation function, check if this is right\n return self.out.reshape(self.outshape)\n\n def backward(self, outerror):\n outerror = outerror.flatten()\n outgradient = self.derivative * outerror if np.ndim(self.derivative) == 1 else self.derivative @ outerror\n inputerror = self.weights @ outerror\n self.biases += outgradient * self.learningrate\n self.weights += np.outer(self.input.T, outgradient) * self.learningrate\n return inputerror.reshape(self.inshape)",
"import numpy as np\nimport src as newai\nimport old as oldai\nimport warnings\nwarnings.filterwarnings(\"error\")\n\ninTrainData = np.random.choice(([0, 1]), (2, 100))\noutTrainData = np.zeros((2, 100))\nfor i in range(100):\n outTrainData[:, i] = [1, 0] if sum(inTrainData[:, i]) == 1 else [0, 1]\n\nyl = 0\nxl = 0\n\nfor _ in range(1000):\n try:\n activation = np.random.choice([newai.relu(), newai.leakyrelu(), newai.mish(), newai.selu(), newai.softplus(), newai.stablesoftmax(), newai.binarystep(), newai.gaussian(), newai.identity()], 3)\n # not included: newai.silu() (overflow error), newai.sigmoid(), newai.tanh()\n newmodel = newai.model(inshape=2, outshape=2, model=[\n newai.nn(outshape=16, activation=activation[0], learningrate=0.01),\n newai.nn(outshape=16, activation=activation[1], learningrate=0.01),\n newai.nn(outshape=2, activation=activation[2], learningrate=0.01)\n ])\n\n oldmodel = oldai.model(inshape=2, outshape=2, model=[\n oldai.nn(outshape=16, activation=activation[0], learningrate=0.01),\n oldai.nn(outshape=16, activation=activation[1], learningrate=0.01),\n oldai.nn(outshape=2, activation=activation[2], learningrate=0.01)\n ])\n\n y = np.sum(newmodel.train((inTrainData, outTrainData), 1000))\n x = 0\n for i in oldmodel.train((inTrainData, outTrainData), 1000):\n x += np.sum(abs(i))\n\n yl += abs(y)\n xl += abs(x)\n print(yl, xl)\n \n except:\n pass",
"import numpy as np\nfrom .tanh import tanh\n\nclass mish:\n def forward(self, input):\n return input * tanh.forward(np.log(1 + np.exp(input)))\n\n def backward(self, input):\n return (np.exp(input) * ((4 * np.exp(2 * input)) + np.exp(3 * input) + (4 * (1 + input)) + (np.exp(input) * (6 + (4 * input))))) / np.square(2 + (2 * np.exp(input)) + np.exp(2 * input))",
"import numpy as np\n\nclass tanh:\n def __repr__(self):\n return 'tanh()'\n\n # def forwardone(self, input):\n # return (np.exp(input) - np.exp(-input)) / (np.exp(input) + np.exp(-input))\n\n # def forwardtwo(self, input):\n # return (np.exp(2 * input) - 1) / (np.exp(2 * input) + 1)\n\n # def forwardthree(self, input):\n # a = np.exp(input)\n # return (a - 1 / a) / (a + 1 / a)\n\n def forward(self, input):\n a = np.exp(2 * input)\n return (a - 1) / (a + 1)\n\n # def backwardone(self, input):\n # return (4 * np.exp(2 * input)) / np.square(np.exp(2 * input) + 1)\n\n # def backwardtwo(self, input):\n # return 4 / np.square(np.exp(input) + np.exp(-input))\n\n def backward(self, input):\n a = np.exp(2 * input)\n return (4 * a) / np.square(a + 1)\n \n # def backwardfour(self, input):\n # a = np.exp(input)\n # return 4 / np.square(a ** 2 + 1 / a)\n\n# forward\n# import timeit\n# print(timeit.timeit('(np.exp(2 * np.random.uniform(-1, 1, (100, 100))) - 1) / (np.exp(2 * np.random.uniform(-1, 1, (100, 100))) + 1)', setup='import numpy as np', number=10000))\n# print(timeit.timeit('(np.exp(np.random.uniform(-1, 1, (100, 100))) - np.exp(-np.random.uniform(-1, 1, (100, 100)))) / (np.exp(np.random.uniform(-1, 1, (100, 100))) + np.exp(-np.random.uniform(-1, 1, (100, 100))))', setup='import numpy as np', number=10000))\n# print(timeit.timeit('a = np.exp(np.random.uniform(-1, 1, (100, 100))); (a - 1 / a) / (a + 1 / a)', setup='import numpy as np', number=10000))\n# print(timeit.timeit('a = np.exp(2 * np.random.uniform(-1, 1, (100, 100))); (a - 1) / (a + 1)', setup='import numpy as np', number=10000))\n\n# backward\n# import timeit\n# print(timeit.timeit('(4 * np.exp(2 * np.random.uniform(-1, 1, (100, 100)))) / np.square(np.exp(2 * np.random.uniform(-1, 1, (100, 100))) + 1)', setup='import numpy as np', number=10000)) # 2.7722288\n# print(timeit.timeit('4 / np.square(np.exp(np.random.uniform(-1, 1, (100, 100))) + np.exp(-np.random.uniform(-1, 1, (100, 100))))', setup='import numpy as np', number=10000)) # 2.6440156000000004\n# print(timeit.timeit('a = np.exp(2 * np.random.uniform(-1, 1, (100, 100))); (4 * a) / np.square(a + 1)', setup='import numpy as np', number=10000)) # 1.4674546\n# print(timeit.timeit('a = np.exp(np.random.uniform(-1, 1, (100, 100))); 4 / np.square(a ** 2 + 1 / a)', setup='import numpy as np', number=10000)) # 1.567536"
] |
[
[
"numpy.ndim",
"numpy.outer",
"numpy.prod"
],
[
"numpy.zeros",
"numpy.random.choice"
],
[
"numpy.exp"
],
[
"numpy.square",
"numpy.exp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
spmallick/depthai_blazepose
|
[
"bd181e33e20d90857790529f516d814cc1efcf46"
] |
[
"BlazeposeDepthaiEdge.py"
] |
[
"import numpy as np\nimport cv2\nfrom numpy.core.fromnumeric import trace\nimport mediapipe_utils as mpu\nfrom pathlib import Path\nfrom FPS import FPS, now\nimport depthai as dai\nimport marshal\nimport sys\nfrom string import Template\nfrom math import sin, cos\n\nSCRIPT_DIR = Path(__file__).resolve().parent\nPOSE_DETECTION_MODEL = str(SCRIPT_DIR / \"models/pose_detection_sh4.blob\")\nLANDMARK_MODEL_FULL = str(SCRIPT_DIR / \"models/pose_landmark_full_sh4.blob\")\nLANDMARK_MODEL_HEAVY = str(SCRIPT_DIR / \"models/pose_landmark_heavy_sh4.blob\")\nLANDMARK_MODEL_LITE = str(SCRIPT_DIR / \"models/pose_landmark_lite_sh4.blob\")\nDETECTION_POSTPROCESSING_MODEL = str(SCRIPT_DIR / \"custom_models/DetectionBestCandidate_sh1.blob\")\nDIVIDE_BY_255_MODEL = str(SCRIPT_DIR / \"custom_models/DivideBy255_sh1.blob\")\nTEMPLATE_MANAGER_SCRIPT = str(SCRIPT_DIR / \"template_manager_script.py\")\n\n\ndef to_planar(arr: np.ndarray, shape: tuple) -> np.ndarray:\n return cv2.resize(arr, shape).transpose(2,0,1).flatten()\n\nclass BlazeposeDepthai:\n \"\"\"\n Blazepose body pose detector\n Arguments:\n - input_src: frame source, \n - \"rgb\" or None: OAK* internal color camera,\n - \"rgb_laconic\": same as \"rgb\" but without sending the frames to the host,\n Note that as we are in Edge mode, input sources coming from the host like a image or a video is not supported \n - pd_model: Blazepose detection model blob file (if None, takes the default value POSE_DETECTION_MODEL),\n - pd_score: confidence score to determine whether a detection is reliable (a float between 0 and 1).\n - pp_model: detection postprocessing model blob file (if None, takes the default value DETECTION_POSTPROCESSING_MODEL),,\n - lm_model: Blazepose landmark model blob file\n - None or \"full\": the default blob file LANDMARK_MODEL_FULL,\n - \"lite\": the default blob file LANDMARK_MODEL_LITE,\n - \"831\": the full model from previous version of mediapipe (0.8.3.1) LANDMARK_MODEL_FULL_0831,\n - a path of a blob file. \n - lm_score_thresh : confidence score to determine whether landmarks prediction is reliable (a float between 0 and 1).\n - xyz: boolean, when True get the (x, y, z) coords of the reference point (center of the hips) (if the device supports depth measures).\n - crop : boolean which indicates if square cropping is done or not\n - smoothing: boolean which indicates if smoothing filtering is applied\n - filter_window_size and filter_velocity_scale:\n The filter keeps track (on a window of specified size) of\n value changes over time, which as result gives velocity of how value\n changes over time. With higher velocity it weights new values higher.\n - higher filter_window_size adds to lag and to stability\n - lower filter_velocity_scale adds to lag and to stability\n\n - internal_fps : when using the internal color camera as input source, set its FPS to this value (calling setFps()).\n - internal_frame_height : when using the internal color camera, set the frame height (calling setIspScale()).\n The width is calculated accordingly to height and depends on value of 'crop'\n - stats : boolean, when True, display some statistics when exiting. \n - trace: boolean, when True print some debug messages \n - force_detection: boolean, force person detection on every frame (never use landmarks from previous frame to determine ROI) \n \"\"\"\n def __init__(self, input_src=\"rgb\",\n pd_model=None, \n pd_score_thresh=0.5,\n pp_model=None,\n lm_model=None,\n lm_score_thresh=0.7,\n xyz = False,\n crop=False,\n smoothing= True,\n filter_window_size=5,\n filter_velocity_scale=10,\n stats=False, \n internal_fps=None,\n internal_frame_height=1080,\n trace=False,\n force_detection=False):\n\n self.pd_model = pd_model if pd_model else POSE_DETECTION_MODEL\n self.pp_model = pp_model if pd_model else DETECTION_POSTPROCESSING_MODEL\n self.divide_by_255_model = DIVIDE_BY_255_MODEL\n print(f\"Pose detection blob file : {self.pd_model}\")\n self.rect_transf_scale = 1.25\n if lm_model is None or lm_model == \"full\":\n self.lm_model = LANDMARK_MODEL_FULL\n elif lm_model == \"lite\":\n self.lm_model = LANDMARK_MODEL_LITE\n elif lm_model == \"heavy\":\n self.lm_model = LANDMARK_MODEL_HEAVY\n else:\n self.lm_model = lm_model\n print(f\"Landmarks using blob file : {self.lm_model}\")\n\n self.pd_score_thresh = pd_score_thresh\n self.lm_score_thresh = lm_score_thresh\n self.smoothing = smoothing\n self.crop = crop\n self.internal_fps = internal_fps\n self.stats = stats\n self.presence_threshold = 0.5\n self.visibility_threshold = 0.5\n\n self.trace = trace\n self.force_detection = force_detection\n\n self.device = dai.Device()\n self.xyz = False\n \n if input_src == None or input_src == \"rgb\" or input_src == \"rgb_laconic\":\n self.input_type = \"rgb\" # OAK* internal color camera\n self.laconic = input_src == \"rgb_laconic\" # Camera frames are not sent to the host \n if xyz:\n # Check if the device supports stereo\n cameras = self.device.getConnectedCameras()\n if dai.CameraBoardSocket.LEFT in cameras and dai.CameraBoardSocket.RIGHT in cameras:\n self.xyz = True\n else:\n print(\"Warning: depth unavailable on this device, 'xyz' argument is ignored\")\n\n if internal_fps is None: \n if \"full\" in str(self.lm_model):\n self.internal_fps = 18 if self.xyz else 20\n elif \"heavy\" in str(lm_model):\n self.internal_fps = 7 if self.xyz else 8\n else: # \"lite\"\n self.internal_fps = 22 if self.xyz else 26\n else:\n self.internal_fps = internal_fps\n print(f\"Internal camera FPS set to: {self.internal_fps}\")\n\n self.video_fps = self.internal_fps # Used when saving the output in a video file. Should be close to the real fps\n \n if self.crop:\n self.frame_size, self.scale_nd = mpu.find_isp_scale_params(internal_frame_height)\n self.img_h = self.img_w = self.frame_size\n self.pad_w = self.pad_h = 0\n self.crop_w = (int(round(1920 * self.scale_nd[0] / self.scale_nd[1])) - self.img_w) // 2\n\n else:\n width, self.scale_nd = mpu.find_isp_scale_params(internal_frame_height * 1920 / 1080, is_height=False)\n self.img_h = int(round(1080 * self.scale_nd[0] / self.scale_nd[1]))\n self.img_w = int(round(1920 * self.scale_nd[0] / self.scale_nd[1]))\n self.pad_h = (self.img_w - self.img_h) // 2\n self.pad_w = 0\n self.frame_size = self.img_w\n self.crop_w = 0\n\n print(f\"Internal camera image size: {self.img_w} x {self.img_h} - pad_h: {self.pad_h}\")\n\n else:\n print(\"Invalid input source:\", input_src)\n sys.exit()\n\n self.nb_kps = 33\n\n if self.smoothing:\n self.filter_landmarks = mpu.LandmarksSmoothingFilter(\n frequency=self.video_fps,\n min_cutoff=0.05,\n beta=80,\n derivate_cutoff=1\n )\n # landmarks_aux corresponds to the 2 landmarks used to compute the ROI in next frame\n self.filter_landmarks_aux = mpu.LandmarksSmoothingFilter(\n frequency=self.video_fps,\n min_cutoff=0.01,\n beta=10,\n derivate_cutoff=1\n )\n self.filter_landmarks_world = mpu.LandmarksSmoothingFilter(\n frequency=self.video_fps,\n min_cutoff=0.1,\n beta=40,\n derivate_cutoff=1,\n disable_value_scaling=True\n )\n if self.xyz:\n self.filter_xyz = mpu.LowPassFilter(alpha=0.25)\n\n # Define and start pipeline\n usb_speed = self.device.getUsbSpeed()\n self.device.startPipeline(self.create_pipeline())\n print(f\"Pipeline started - USB speed: {str(usb_speed).split('.')[-1]}\")\n\n # Define data queues \n if not self.laconic:\n self.q_video = self.device.getOutputQueue(name=\"cam_out\", maxSize=1, blocking=False)\n self.q_manager_out = self.device.getOutputQueue(name=\"manager_out\", maxSize=1, blocking=False)\n # For debugging\n # self.q_pre_pd_manip_out = self.device.getOutputQueue(name=\"pre_pd_manip_out\", maxSize=1, blocking=False)\n # self.q_pre_lm_manip_out = self.device.getOutputQueue(name=\"pre_lm_manip_out\", maxSize=1, blocking=False)\n\n self.fps = FPS()\n\n self.nb_pd_inferences = 0\n self.nb_lm_inferences = 0\n self.nb_lm_inferences_after_landmarks_ROI = 0\n self.nb_frames_no_body = 0\n\n def create_pipeline(self):\n print(\"Creating pipeline...\")\n # Start defining a pipeline\n pipeline = dai.Pipeline()\n pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_4)\n self.pd_input_length = 224\n self.lm_input_length = 256\n\n # ColorCamera\n print(\"Creating Color Camera...\")\n cam = pipeline.create(dai.node.ColorCamera) \n cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n cam.setInterleaved(False)\n cam.setIspScale(self.scale_nd[0], self.scale_nd[1])\n cam.setFps(self.internal_fps)\n cam.setBoardSocket(dai.CameraBoardSocket.RGB)\n\n if self.crop:\n cam.setVideoSize(self.frame_size, self.frame_size)\n cam.setPreviewSize(self.frame_size, self.frame_size)\n else: \n cam.setVideoSize(self.img_w, self.img_h)\n cam.setPreviewSize(self.img_w, self.img_h)\n\n if not self.laconic:\n cam_out = pipeline.create(dai.node.XLinkOut)\n cam_out.setStreamName(\"cam_out\")\n cam_out.input.setQueueSize(1)\n cam_out.input.setBlocking(False)\n cam.video.link(cam_out.input)\n\n\n # Define manager script node\n manager_script = pipeline.create(dai.node.Script)\n manager_script.setScript(self.build_manager_script())\n\n if self.xyz:\n print(\"Creating MonoCameras, Stereo and SpatialLocationCalculator nodes...\")\n # For now, RGB needs fixed focus to properly align with depth.\n # This value was used during calibration\n cam.initialControl.setManualFocus(130)\n\n mono_resolution = dai.MonoCameraProperties.SensorResolution.THE_400_P\n left = pipeline.createMonoCamera()\n left.setBoardSocket(dai.CameraBoardSocket.LEFT)\n left.setResolution(mono_resolution)\n left.setFps(self.internal_fps)\n\n right = pipeline.createMonoCamera()\n right.setBoardSocket(dai.CameraBoardSocket.RIGHT)\n right.setResolution(mono_resolution)\n right.setFps(self.internal_fps)\n\n stereo = pipeline.createStereoDepth()\n stereo.setConfidenceThreshold(230)\n # LR-check is required for depth alignment\n stereo.setLeftRightCheck(True)\n stereo.setDepthAlign(dai.CameraBoardSocket.RGB)\n stereo.setSubpixel(False) # subpixel True -> latency\n # MEDIAN_OFF necessary in depthai 2.7.2. \n # Otherwise : [critical] Fatal error. Please report to developers. Log: 'StereoSipp' '533'\n # stereo.setMedianFilter(dai.StereoDepthProperties.MedianFilter.MEDIAN_OFF)\n\n spatial_location_calculator = pipeline.createSpatialLocationCalculator()\n spatial_location_calculator.setWaitForConfigInput(True)\n spatial_location_calculator.inputDepth.setBlocking(False)\n spatial_location_calculator.inputDepth.setQueueSize(1)\n\n left.out.link(stereo.left)\n right.out.link(stereo.right) \n\n stereo.depth.link(spatial_location_calculator.inputDepth)\n\n manager_script.outputs['spatial_location_config'].link(spatial_location_calculator.inputConfig)\n spatial_location_calculator.out.link(manager_script.inputs['spatial_data'])\n\n # Define pose detection pre processing (resize preview to (self.pd_input_length, self.pd_input_length))\n print(\"Creating Pose Detection pre processing image manip...\")\n pre_pd_manip = pipeline.create(dai.node.ImageManip)\n pre_pd_manip.setMaxOutputFrameSize(self.pd_input_length*self.pd_input_length*3)\n pre_pd_manip.setWaitForConfigInput(True)\n pre_pd_manip.inputImage.setQueueSize(1)\n pre_pd_manip.inputImage.setBlocking(False)\n cam.preview.link(pre_pd_manip.inputImage)\n manager_script.outputs['pre_pd_manip_cfg'].link(pre_pd_manip.inputConfig)\n\n # For debugging\n # pre_pd_manip_out = pipeline.createXLinkOut()\n # pre_pd_manip_out.setStreamName(\"pre_pd_manip_out\")\n # pre_pd_manip.out.link(pre_pd_manip_out.input)\n\n # Define pose detection model\n print(\"Creating Pose Detection Neural Network...\")\n pd_nn = pipeline.create(dai.node.NeuralNetwork)\n pd_nn.setBlobPath(self.pd_model)\n # Increase threads for detection\n # pd_nn.setNumInferenceThreads(2)\n pre_pd_manip.out.link(pd_nn.input)\n \n # Define pose detection post processing \"model\"\n print(\"Creating Pose Detection post processing Neural Network...\")\n post_pd_nn = pipeline.create(dai.node.NeuralNetwork)\n post_pd_nn.setBlobPath(self.pp_model)\n pd_nn.out.link(post_pd_nn.input)\n post_pd_nn.out.link(manager_script.inputs['from_post_pd_nn'])\n\n # Define link to send result to host \n manager_out = pipeline.create(dai.node.XLinkOut)\n manager_out.setStreamName(\"manager_out\")\n manager_script.outputs['host'].link(manager_out.input)\n\n # Define landmark pre processing image manip\n print(\"Creating Landmark pre processing image manip...\") \n pre_lm_manip = pipeline.create(dai.node.ImageManip)\n pre_lm_manip.setMaxOutputFrameSize(self.lm_input_length*self.lm_input_length*3)\n pre_lm_manip.setWaitForConfigInput(True)\n pre_lm_manip.inputImage.setQueueSize(1)\n pre_lm_manip.inputImage.setBlocking(False)\n cam.preview.link(pre_lm_manip.inputImage)\n\n # For debugging\n # pre_lm_manip_out = pipeline.createXLinkOut()\n # pre_lm_manip_out.setStreamName(\"pre_lm_manip_out\")\n # pre_lm_manip.out.link(pre_lm_manip_out.input)\n \n manager_script.outputs['pre_lm_manip_cfg'].link(pre_lm_manip.inputConfig)\n\n # Define normalization model between ImageManip and landmark model\n # This is a temporary step. Could be removed when support of setFrameType(RGBF16F16F16p) in ImageManip node\n print(\"Creating DiveideBy255 Neural Network...\") \n divide_nn = pipeline.create(dai.node.NeuralNetwork)\n divide_nn.setBlobPath(self.divide_by_255_model)\n pre_lm_manip.out.link(divide_nn.input) \n\n # Define landmark model\n print(\"Creating Landmark Neural Network...\") \n lm_nn = pipeline.create(dai.node.NeuralNetwork)\n lm_nn.setBlobPath(self.lm_model)\n # lm_nn.setNumInferenceThreads(1)\n \n divide_nn.out.link(lm_nn.input) \n lm_nn.out.link(manager_script.inputs['from_lm_nn'])\n\n print(\"Pipeline created.\")\n\n return pipeline \n\n def build_manager_script(self):\n '''\n The code of the scripting node 'manager_script' depends on :\n - the NN model (full, lite, 831),\n - the score threshold,\n - the video frame shape\n So we build this code from the content of the file template_manager_script.py which is a python template\n '''\n # Read the template\n with open(TEMPLATE_MANAGER_SCRIPT, 'r') as file:\n template = Template(file.read())\n \n # Perform the substitution\n code = template.substitute(\n _TRACE = \"node.warn\" if self.trace else \"#\",\n _pd_score_thresh = self.pd_score_thresh,\n _lm_score_thresh = self.lm_score_thresh,\n _force_detection = self.force_detection,\n _pad_h = self.pad_h,\n _img_h = self.img_h,\n _frame_size = self.frame_size,\n _crop_w = self.crop_w,\n _rect_transf_scale = self.rect_transf_scale,\n _IF_XYZ = \"\" if self.xyz else '\"\"\"',\n _buffer_size = 2910 if self.xyz else 2863,\n _visibility_threshold = self.visibility_threshold\n )\n # Remove comments and empty lines\n import re\n code = re.sub(r'\"{3}.*?\"{3}', '', code, flags=re.DOTALL)\n code = re.sub(r'#.*', '', code)\n code = re.sub('\\n\\s*\\n', '\\n', code)\n # For debugging\n if self.trace:\n with open(\"tmp_code.py\", \"w\") as file:\n file.write(code)\n\n return code\n\n def is_present(self, body, lm_id):\n return body.presence[lm_id] > self.presence_threshold\n\n def lm_postprocess(self, body, lms, lms_world):\n # lms : landmarks sent by Manager script node to host (list of 39*5 elements for full body or 31*5 for upper body)\n lm_raw = np.array(lms).reshape(-1,5)\n # Each keypoint have 5 information:\n # - X,Y coordinates are local to the body of\n # interest and range from [0.0, 255.0].\n # - Z coordinate is measured in \"image pixels\" like\n # the X and Y coordinates and represents the\n # distance relative to the plane of the subject's\n # hips, which is the origin of the Z axis. Negative\n # values are between the hips and the camera;\n # positive values are behind the hips. Z coordinate\n # scale is similar with X, Y scales but has different\n # nature as obtained not via human annotation, by\n # fitting synthetic data (GHUM model) to the 2D\n # annotation.\n # - Visibility, after user-applied sigmoid denotes the\n # probability that a keypoint is located within the\n # frame and not occluded by another bigger body\n # part or another object.\n # - Presence, after user-applied sigmoid denotes the\n # probability that a keypoint is located within the\n # frame.\n\n # Normalize x,y,z. Scaling in z = scaling in x = 1/self.lm_input_length\n lm_raw[:,:3] /= self.lm_input_length\n # Apply sigmoid on visibility and presence (if used later)\n body.visibility = 1 / (1 + np.exp(-lm_raw[:,3]))\n body.presence = 1 / (1 + np.exp(-lm_raw[:,4]))\n\n # body.norm_landmarks contains the normalized ([0:1]) 3D coordinates of landmarks in the square rotated body bounding box\n body.norm_landmarks = lm_raw[:,:3]\n # Now calculate body.landmarks = the landmarks in the image coordinate system (in pixel) (body.landmarks)\n src = np.array([(0, 0), (1, 0), (1, 1)], dtype=np.float32)\n dst = np.array([ (x, y) for x,y in body.rect_points[1:]], dtype=np.float32) # body.rect_points[0] is left bottom point and points going clockwise!\n mat = cv2.getAffineTransform(src, dst)\n lm_xy = np.expand_dims(body.norm_landmarks[:self.nb_kps,:2], axis=0)\n lm_xy = np.squeeze(cv2.transform(lm_xy, mat)) \n\n # A segment of length 1 in the coordinates system of body bounding box takes body.rect_w_a pixels in the\n # original image. Then we arbitrarily divide by 4 for a more realistic appearance.\n lm_z = body.norm_landmarks[:self.nb_kps,2:3] * body.rect_w_a / 4\n lm_xyz = np.hstack((lm_xy, lm_z))\n\n # World landmarks are predicted in meters rather than in pixels of the image\n # and have origin in the middle of the hips rather than in the corner of the\n # pose image (cropped with given rectangle). Thus only rotation (but not scale\n # and translation) is applied to the landmarks to transform them back to\n # original coordinates.\n body.landmarks_world = np.array(lms_world).reshape(-1,3)\n sin_rot = sin(body.rotation)\n cos_rot = cos(body.rotation)\n rot_m = np.array([[cos_rot, sin_rot], [-sin_rot, cos_rot]])\n body.landmarks_world[:,:2] = np.dot(body.landmarks_world[:,:2], rot_m)\n\n if self.smoothing:\n timestamp = now()\n object_scale = body.rect_w_a\n lm_xyz[:self.nb_kps] = self.filter_landmarks.apply(lm_xyz[:self.nb_kps], timestamp, object_scale)\n lm_xyz[self.nb_kps:] = self.filter_landmarks_aux.apply(lm_xyz[self.nb_kps:], timestamp, object_scale)\n body.landmarks_world = self.filter_landmarks_world.apply(body.landmarks_world, timestamp)\n\n body.landmarks = lm_xyz.astype(np.int)\n # If we added padding to make the image square, we need to remove this padding from landmark coordinates and from rect_points\n if self.pad_h > 0:\n body.landmarks[:,1] -= self.pad_h\n for i in range(len(body.rect_points)):\n body.rect_points[i][1] -= self.pad_h\n # if self.pad_w > 0:\n # body.landmarks[:,0] -= self.pad_w\n # for i in range(len(body.rect_points)):\n # body.rect_points[i][0] -= self.pad_w \n \n \n def next_frame(self):\n\n self.fps.update()\n \n if self.laconic:\n video_frame = np.zeros((self.frame_size, self.frame_size, 3), dtype=np.uint8)\n else:\n in_video = self.q_video.get()\n video_frame = in_video.getCvFrame() \n\n # For debugging\n # pre_pd_manip = self.q_pre_pd_manip_out.tryGet()\n # if pre_pd_manip:\n # pre_pd_manip = pre_pd_manip.getCvFrame()\n # cv2.imshow(\"pre_pd_manip\", pre_pd_manip)\n # pre_lm_manip = self.q_pre_lm_manip_out.tryGet()\n # if pre_lm_manip:\n # pre_lm_manip = pre_lm_manip.getCvFrame()\n # cv2.imshow(\"pre_lm_manip\", pre_lm_manip)\n \n # Get result from device\n res = marshal.loads(self.q_manager_out.get().getData())\n if res[\"type\"] != 0 and res[\"lm_score\"] > self.lm_score_thresh:\n body = mpu.Body()\n body.rect_x_center_a = res[\"rect_center_x\"] * self.frame_size\n body.rect_y_center_a = res[\"rect_center_y\"] * self.frame_size\n body.rect_w_a = body.rect_h_a = res[\"rect_size\"] * self.frame_size\n body.rotation = res[\"rotation\"] \n body.rect_points = mpu.rotated_rect_to_points(body.rect_x_center_a, body.rect_y_center_a, body.rect_w_a, body.rect_h_a, body.rotation)\n body.lm_score = res[\"lm_score\"]\n self.lm_postprocess(body, res['lms'], res['lms_world'])\n if self.xyz:\n if res['xyz_ref'] == 0:\n body.xyz_ref = None\n else:\n if res['xyz_ref'] == 1:\n body.xyz_ref = \"mid_hips\"\n else: # res['xyz_ref'] == 2:\n body.xyz_ref = \"mid_shoulders\"\n body.xyz = np.array(res[\"xyz\"])\n if self.smoothing:\n body.xyz = self.filter_xyz.apply(body.xyz)\n body.xyz_zone = np.array(res[\"xyz_zone\"])\n body.xyz_ref_coords_pixel = np.mean(body.xyz_zone.reshape((2,2)), axis=0)\n\n\n else:\n body = None\n if self.smoothing: \n self.filter_landmarks.reset()\n self.filter_landmarks_aux.reset()\n self.filter_landmarks_world.reset()\n if self.xyz: self.filter_xyz.reset()\n\n # Statistics\n if self.stats:\n if res[\"type\"] == 0:\n self.nb_pd_inferences += 1\n self.nb_frames_no_body += 1\n else: \n self.nb_lm_inferences += 1\n if res[\"type\"] == 1:\n self.nb_pd_inferences += 1\n else: # res[\"type\"] == 2\n self.nb_lm_inferences_after_landmarks_ROI += 1\n if res[\"lm_score\"] < self.lm_score_thresh: self.nb_frames_no_body += 1\n\n return video_frame, body\n\n\n def exit(self):\n self.device.close()\n # Print some stats\n if self.stats:\n print(f\"FPS : {self.fps.get_global():.1f} f/s (# frames = {self.fps.nbf})\")\n print(f\"# frames without body : {self.nb_frames_no_body}\")\n print(f\"# pose detection inferences : {self.nb_pd_inferences}\")\n print(f\"# landmark inferences : {self.nb_lm_inferences} - # after pose detection: {self.nb_lm_inferences - self.nb_lm_inferences_after_landmarks_ROI} - # after landmarks ROI prediction: {self.nb_lm_inferences_after_landmarks_ROI}\")\n \n \n \n\n"
] |
[
[
"numpy.hstack",
"numpy.dot",
"numpy.expand_dims",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
paulsok/chaos-analysis
|
[
"4124a349fdf10d1fa0b0e014dc4b4d84374b0b55"
] |
[
"systems/noise_sine.py"
] |
[
"import math\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef noise_sine(length=10000, level=0.2, mu=2.4, discard=1000):\n \"\"\"Simulate the noise-driven sine map described in Freitas et al (2009),\n \"Failure in distinguishing colored noise from chaos using the 'noise\n titration' technique\".\n\n Parameters\n ----------\n length : int\n Length of the time series.\n level : float\n The amplitude of white noise to add to the final signal.\n mu : float\n Constant.\n discard : int\n Number of steps to discard in order to eliminate transients.\n\n Returns\n -------\n x : array\n Array containing the time series.\n \"\"\"\n x = np.zeros(length + discard)\n x[0] = random.random()\n\n for i in range(1, length + discard):\n q = random.random()\n if q <= 0.01:\n y = 1\n else:\n y = 0\n x[i] = mu * math.sin(x[i-1]) + y * (4 * random.random() - 2)\n\n # add white noise\n _x = x + level * np.std(x) * np.random.rand(length + discard)\n\n return _x[discard:]\n\n\nif __name__ == '__main__':\n time_series = noise_sine(length=10000, level=0.2, mu=2.4, discard=1000)\n plt.plot(time_series)\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.random.rand",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mangonihao/gpt2-pytorch
|
[
"945cf98a3e7d9378d59866f9bdc977f4bcc96a6e"
] |
[
"models/modules.py"
] |
[
"# -*- coding: utf-8 -*-\n\n# @Author : xmh\n# @Time : 2021/1/22 9:01\n# @File : Module.py\n\n\"\"\"\nfile description::\n\n\"\"\"\nimport torch.nn as nn\nimport torch\nimport math\nimport copy\nimport torch.nn.functional as F\nfrom tqdm import trange\n\n\ndef gelu(x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\n\nclass LayerNorm(nn.Module):\n\n def __init__(self, normalized_shape, epsilon=1e-12):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(normalized_shape))\n self.bias = nn.Parameter(torch.zeros(normalized_shape))\n self.epsilon = epsilon\n\n def forward(self, x):\n x_expected = x.mean(-1, keepdims=True)\n x_var = (x - x_expected).pow(2).mean(-1, keepdims=True)\n x = (x - x_expected) / torch.sqrt(x_var+self.epsilon)\n return self.weight * x + self.bias\n\n\nclass Conv1D(nn.Module):\n '''\n The CONV1D layer can be thought of as a LINEAR layer itself. Essentially, it is casting an initial tensor x\n (having the final dimension of x.size(-1)) being passed to it to have a final dimension of size self.output_dim\n '''\n def __init__(self, input_dim, output_dim):\n super().__init__()\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.weight = torch.empty(input_dim, output_dim)\n nn.init.normal_(self.weight, std=0.02)\n self.weight = nn.Parameter(self.weight)\n self.bias = nn.Parameter(torch.zeros(self.output_dim))\n\n def forward(self, x):\n size_out = x.size()[:-1] + (self.output_dim,)\n x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) # self.input_dim\n x = x.view(*size_out)\n return x\n\n\nclass FeedForwardNetwork(nn.Module):\n def __init__(self, n_embed, hidden_dim, dropout=0.1):\n super().__init__()\n self.c_fc = Conv1D(n_embed, hidden_dim)\n self.c_project = Conv1D(hidden_dim, n_embed)\n self.activation = gelu\n self.dropout_layer = nn.Dropout(dropout) # 测试的时候需要去掉\n\n def forward(self, x):\n return self.c_project(self.activation(self.c_fc(x)))\n\n\nclass Attention(nn.Module):\n\n def __init__(self, n_embed=768, n_ctx=1024, n_head=6, scale=False):\n super().__init__()\n self.n_embed = n_embed\n self.scale = scale\n self.atten_head = n_head\n\n self.register_buffer(\"bias\", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)) # mask\n\n self.c_atten = Conv1D(n_embed, n_embed*3)\n self.c_project = Conv1D(n_embed, n_embed)\n\n assert self.n_embed % self.atten_head == 0, \"n_embed must mod atten_head\"\n\n # def split_heads(self, x, is_key=False):\n # x_new_shape = x.size()[:-1] + (self.atten_head, self.n_embed//self.atten_head)\n # x = x.view(*x_new_shape)\n # if is_key:\n # return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n # else:\n # return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def split_heads(self, x):\n x_new_shape = x.size()[:-1] + (self.atten_head, self.n_embed//self.atten_head)\n x = x.view(*x_new_shape)\n\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n x_new_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n\n return x.view(*x_new_shape)\n\n def mask_attn_weight(self, atten_weight):\n start, end = atten_weight.size(-2), atten_weight.size(-1)\n mask = self.bias[:, :, end-start:end, :end] # mask的意义不是很清楚\n atten_weight = atten_weight * mask - 1e10 * (1-mask)\n\n return atten_weight\n\n def cal_attention(self, query, key, value):\n atten_weight = torch.matmul(query, key.transpose(-2, -1))\n if self.scale:\n # atten_weight [batch, head]\n atten_weight = atten_weight / math.sqrt(value.size(-1)) # head_features\n atten_weight = self.mask_attn_weight(atten_weight)\n atten_weight = nn.Softmax(dim=-1)(atten_weight)\n\n return torch.matmul(atten_weight, value) # 计算注意力向量\n\n def forward(self, x, layer_past=None):\n x = self.c_atten(x)\n query, key, value = x.split(self.n_embed, dim=2)\n query, key, value = map(self.split_heads, (query, key, value))\n # query = self.split_heads(query)\n # key = self.split_heads(key)\n # value = self.split_heads(value)\n\n if layer_past is not None: # tensor无法判断真假\n key_past, value_past = layer_past[0], layer_past[1] # 为什么要添加过去的k,v值,而q不用\n key = torch.cat((key_past, key), dim=-2) # (batch, head, seq_length, head_features)\n value = torch.cat((value_past, value), dim=-2)\n # Concatenate a sequence of tensors along a new dimension\n present = torch.stack((key, value))\n out = self.cal_attention(query, key, value)\n out = self.merge_heads(out)\n out = self.c_project(out)\n\n return out, present\n\n\nclass TransformerDecoderBlock(nn.Module):\n def __init__(self, config, scale=True):\n super().__init__()\n self.layer_norm1 = LayerNorm(config.n_embed, config.layer_norm_epsilon)\n self.atten_layer = Attention(config.n_embed, config.n_ctx, config.n_head, scale)\n self.ffn = FeedForwardNetwork(config.n_embed, 4*config.n_embed, config.dropout_rate)\n self.layer_norm2 = LayerNorm(config.n_embed, config.layer_norm_epsilon)\n\n def forward(self, x, layer_past=None):\n a, present = self.atten_layer(self.layer_norm1(x), layer_past=layer_past)\n x = x + a\n x = x + self.ffn(self.layer_norm2(x))\n\n return x, present\n\n\nclass GPT2Model(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.n_layer = config.n_layer\n self.n_embed = config.n_embed\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embed) # 文本嵌入向量\n self.wpe = nn.Embedding(config.n_position, config.n_embed) # 位置编码\n block = TransformerDecoderBlock(config, scale=True)\n self.blocks = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])\n self.layer_norm = LayerNorm(config.n_embed, config.layer_norm_epsilon)\n\n def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):\n\n if past is None:\n past_length = 0\n past = [None] * self.n_layer\n else:\n past_length = past[0][0].size(-2) # 当前生成文本的长度(包括初始文本)\n\n if position_ids is None:\n position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long,\n device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n\n input_ids = input_ids.view(-1, input_ids.size(-1))\n position_ids = position_ids.view(-1, position_ids.size(-1))\n\n input_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))\n token_type_embeds = self.wte(token_type_ids)\n else:\n token_type_embeds = 0\n\n hidden_ids = input_embeds + position_embeds + token_type_embeds\n\n presents = []\n for block, layer_past in zip(self.blocks, past):\n hidden_ids, present = block(hidden_ids, layer_past) # hidden_ids要跟着改变,翻了与GLMP一样的错误\n presents.append(present)\n\n hidden_ids = self.layer_norm(hidden_ids) # [1, seq_length, n_embed]\n\n return hidden_ids, presents\n\n\nclass GPT2LMHeadModel(nn.Module):\n def __init__(self, embed_weight):\n super().__init__()\n self.share_embed_weight(embed_weight)\n\n def share_embed_weight(self, embed_weight):\n embed_shape = embed_weight.shape\n self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)\n self.decoder.weight = embed_weight\n\n def forward(self, hidden_states):\n return self.decoder(hidden_states)\n\n\nclass GPT2GeneratorModel(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.gpt2 = GPT2Model(config)\n self.decoder = GPT2LMHeadModel(self.gpt2.wte.weight)\n\n def top_k_logits(self, logits, k):\n if k == 0:\n return logits\n values, _ = torch.topk(logits, k)\n min_values = values[:, -1].unsqueeze(1)\n return torch.where(logits < min_values, torch.ones_like(logits, dtype=logits.dtype) * -1e10, logits)\n\n def generate_sequence(self, context=None, start_token=None, generate_length=256, topk_num=40, sample=True):\n self.train(False)\n if start_token is None:\n assert context is not None, \"Specify exactly one of start_token and context!\"\n context = torch.tensor(context, dtype=torch.long).unsqueeze(0)\n else:\n assert context is None, \"Specify exactly one of start_token and\"\n context = torch.full((1,), start_token, dtype=torch.long)\n\n prev, output = context, context\n past = None\n with torch.no_grad():\n for _ in trange(generate_length):\n logits, past = self.forward(prev, past=past)\n logits = logits[:, -1, :]\n logits = self.top_k_logits(logits, topk_num)\n # logits ,_ = torch.topk(logits, topk_num) # 这个会输出一对乱码\n log_soft = F.softmax(logits, dim=-1)\n if sample:\n prev = torch.multinomial(log_soft, num_samples=1)\n else:\n _, prev = torch.topk(log_soft, k=1, dim=-1)\n output = torch.cat((output, prev), dim=1)\n\n return output\n\n def set_tied(self):\n \"\"\" Make sure we are sharing the embeddings\n \"\"\"\n self.decoder.share_embed_weight(self.gpt2.wte.weight)\n\n def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):\n hidden_states, presents = self.gpt2(input_ids, position_ids, token_type_ids, past)\n lm_logits = self.decoder(hidden_states)\n\n if lm_labels:\n loss_func = nn.CrossEntropyLoss(ignore_index=-1)\n loss = loss_func(lm_logits.view(-1, lm_logits.size(-1)), lm_labels)\n return loss\n\n return lm_logits, presents\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.cat",
"torch.nn.Embedding",
"torch.multinomial",
"torch.no_grad",
"torch.topk",
"torch.pow",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.sqrt",
"torch.tensor",
"torch.ones_like",
"torch.nn.Parameter",
"torch.empty",
"torch.full",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.stack",
"torch.matmul"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZeliangSu/LRCS-Xlearn
|
[
"50ff9c64f36c0d80417aa44aac2db68f392130f0"
] |
[
"src/segmentpy/_taskManager/ActViewer_logic.py"
] |
[
"from PySide2.QtWidgets import QApplication, QWidget, QMessageBox, QListWidget\nfrom PySide2.QtGui import QPixmap, QImage\nfrom PySide2.QtCore import Qt\n\nfrom segmentpy._taskManager.ActViewer_design import Ui_actViewer\nfrom segmentpy._taskManager.nodes_list_logic import node_list_logic\nfrom segmentpy._taskManager.file_dialog import file_dialog\nfrom segmentpy.tf114.util import print_nodes_name, check_N_mkdir\nfrom segmentpy.tf114.analytic import partialRlt_and_diff, visualize_weights\n\nfrom PIL import Image\nimport re\nimport sys\nimport os\nimport numpy as np\nimport subprocess\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nfrom operator import add\n\n# logging\nimport logging\nfrom segmentpy.tf114 import log\nlogger = log.setup_custom_logger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass actViewer_logic(QWidget, Ui_actViewer):\n def __init__(self, *args, **kwargs):\n \"\"\"order: set_ckpt() = set_input() > load_graph() > get_nodes() > load_activations()\"\"\"\n super().__init__()\n\n self.setupUi(self)\n self.actList.setSelectionMode(QListWidget.MultiSelection)\n\n self.ckptButton.clicked.connect(self.ckptFileDialog)\n self.inputButton.clicked.connect(self.inputFileDialog)\n self.load.clicked.connect(self.load_activations)\n self.saveButton.clicked.connect(self.save_selected_activations)\n self.cancelButton.clicked.connect(self.exit)\n self.ckptPathLine.editingFinished.connect(self.set_ckpt)\n self.inputPathLine.editingFinished.connect(self.set_input)\n self.corrector.editingFinished.connect(self.setCorrector)\n self.actList.doubleClicked.connect(self.set_focused_layer)\n self.actSlider.valueChanged.connect(self.display)\n self.weightSlider.valueChanged.connect(self.displayWeight)\n\n # variables\n self.ckpt = None\n self.input = None\n self.layer_name = None\n self.layer = None\n self.correction = None\n\n def log_window(self, title: str, Msg: str):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(Msg)\n msg.setWindowTitle(title)\n msg.exec_()\n\n def ckptFileDialog(self):\n tmp = file_dialog(title='choose .meta file').openFileNameDialog()\n if tmp:\n self.ckptPathLine.setText(tmp)\n self.set_ckpt()\n\n def inputFileDialog(self):\n tmp = file_dialog(title='choose .tif for input', type='.tif').openFileNameDialog()\n if tmp:\n self.inputPathLine.setText(tmp)\n self.set_input()\n\n def setCorrector(self):\n self.correction = float(self.corrector.text())\n self.hyperparams['normalization'] = self.correction\n\n def set_ckpt(self):\n self.ckpt = self.ckptPathLine.text()\n # hit Enter or close file dialog load automatically the model\n\n # prepare\n if self.ckpt:\n _re = re.search('(.+)ckpt/step(\\d+)\\.meta', self.ckpt)\n self.step = _re.group(2)\n self.graph_def_dir = _re.group(1)\n self.paths = {\n 'step': self.step,\n 'working_dir': self.graph_def_dir,\n 'ckpt_dir': self.graph_def_dir + 'ckpt/',\n 'ckpt_path': self.graph_def_dir + 'ckpt/step{}'.format(self.step),\n 'save_pb_dir': self.graph_def_dir + 'pb/',\n 'save_pb_path': self.graph_def_dir + 'pb/step{}.pb'.format(self.step),\n 'data_dir': self.input,\n }\n\n model = re.search('mdl_([A-Za-z]*\\d*)', self.ckpt).group(1)\n\n self.hyperparams = {\n 'model': model,\n 'window_size': int(re.search('ps(\\d+)', self.ckpt).group(1)),\n 'batch_size': int(re.search('bs(\\d+)', self.ckpt).group(1)),\n # 'stride': args.stride,\n 'device_option': 'cpu',\n 'mode': 'classification', # todo:\n 'batch_normalization': False,\n 'feature_map': True if model in ['LRCS8', 'LRCS9', 'LRCS10', 'Unet3'] else False,\n 'correction': self.correction\n }\n\n # get node and set the listViewWidget\n self.get_nodes()\n\n def set_input(self):\n self.input = self.inputPathLine.text()\n self.paths['data_dir'] = self.input\n\n def get_nodes(self):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n # restore from ckpt the nodes\n tf.reset_default_graph()\n self.actList.clear()\n _ = tf.train.import_meta_graph(\n self.ckpt,\n clear_devices=True,\n )\n\n # get_nodes\n graph = tf.get_default_graph().as_graph_def()\n nodes = print_nodes_name(graph)\n options = []\n for node in nodes:\n tmp = re.search('(^[a-zA-Z]+\\d*\\/).*(leaky|relu|sigmoid|tanh|logits\\/identity|up\\d+\\/Reshape\\_4|concat\\d+\\/concat)$', # concat\\d+\\/concat for uniquely Unet\n node)\n if tmp is not None:\n tmp = tmp.string\n options.append(tmp)\n self.actList.addItems([n for n in options])\n\n def set_focused_layer(self, list_number=None):\n self.layer_name = self.actList.item(list_number.row()).text()\n self.layer = list_number.row()\n self.display(0)\n\n def display(self, nth=0):\n logger.debug(self.layer_name)\n logger.debug(self.layer)\n if not hasattr(self, 'activations'):\n self.get_nodes()\n self.load_activations()\n else:\n act = self.activations[self.layer_name][0]\n weight = self.kernels[self.layer]\n logger.debug('weight matrix shape: {}'.format(weight.shape))\n logger.debug('activations list len: {}'.format(len(self.activations[self.layer_name])))\n self.actSlider.setMaximum(act.shape[-1] - 1) # -1 as starts with 0\n\n # 1D dnn output\n if 'dnn' in self.layer_name:\n ceiling = int(np.ceil(np.sqrt(act.size)))\n tmp = np.zeros((ceiling ** 2), np.float32).ravel()\n tmp[:act.size] = act\n act = tmp.reshape(ceiling, ceiling)\n else:\n logger.debug('act shape: {}'.format(act.shape))\n logger.debug('weight shape: {}'.format(weight.shape))\n act = act[:, :, nth]\n act = (act - np.min(act)) / (np.max(act) - np.min(act)) * 255\n act = np.asarray(Image.fromarray(act).convert('RGB'))\n act = act.copy()\n\n # imshow\n self.q = QImage(act,\n act.shape[1],\n act.shape[0],\n act.shape[1] * 3, QImage.Format_RGB888)\n self.p = QPixmap(self.q)\n self.p.scaled(self.width(), self.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation)\n self.Images.setScaledContents(True)\n self.Images.setPixmap(self.p)\n self.Images.update()\n self.Images.repaint()\n\n # get weight\n weight = weight[:, :, :, nth]\n logger.debug('weightSlide maxi: {}'.format(weight.shape[2]))\n self.weightSlider.setMaximum(weight.shape[2] - 1)\n weight = (weight - np.min(weight)) / (np.max(weight) - np.min(weight)) * 255\n self.weight = weight.copy()\n self.displayWeight(0)\n\n def displayWeight(self, slide=None):\n # get weight\n fig_weight = plt.figure(figsize=(1.2, 1.2))\n fig_weight.clear()\n ax = fig_weight.add_subplot(111)\n img = np.squeeze(self.weight[:, :, slide])\n ax.imshow(img, interpolation='none', aspect='auto')\n for (y, x), z in np.ndenumerate(np.squeeze(img)):\n ax.text(x, y, '%.2f' % z, fontsize=5, ha='center', va='center',)\n ax.axis('off')\n fig_weight.canvas.draw()\n data = np.fromstring(fig_weight.canvas.tostring_rgb(), dtype=np.uint8)\n logger.debug('img shape: {}'.format(data.shape))\n logger.debug(fig_weight.canvas.get_width_height())\n logger.debug(fig_weight.canvas.get_width_height()[::-1])\n data = data.reshape(tuple(map(add, fig_weight.canvas.get_width_height()[::-1],\n fig_weight.canvas.get_width_height())[::-1]) + (3,))\n # plt.imshow(data)\n # plt.show()\n logger.debug('img shape: {}'.format(data.shape))\n del fig_weight\n\n\n logger.debug(slide)\n # plot weight\n self.wt = QImage(data,\n data.shape[1],\n data.shape[0],\n data.shape[1] * 3, QImage.Format_RGB888)\n self.pw = QPixmap(self.wt)\n self.pw.scaled(self.width(), self.height(),\n Qt.KeepAspectRatio,\n Qt.SmoothTransformation\n )\n self.weightLabel.setScaledContents(False)\n self.weightLabel.setPixmap(self.pw)\n self.weightLabel.update()\n self.weightLabel.repaint()\n\n def load_activations(self):\n if not self.input:\n self.log_window(title='Error!', Msg='Please indicate a input image')\n\n elif not self.correction:\n self.log_window(title='Error!', Msg='You forgot to put the corrector')\n\n else:\n self.activations = partialRlt_and_diff(paths=self.paths, hyperparams=self.hyperparams,\n conserve_nodes=[self.actList.item(i).text() for i in range(self.actList.count())],\n write_rlt=False)\n logger.debug(self.activations)\n\n # todo: display the weight the input and output too\n self.kern_name, self.kernels = visualize_weights(params=self.paths, write_rlt=False)\n logger.debug(self.kern_name)\n\n def save_selected_activations(self):\n if not self.input:\n self.log_window(title='Error!', Msg='Please indicate a input image')\n else:\n save_act_path = file_dialog(title='choose a folder to save the images', type='/').openFolderDialog()\n selected_idx = self.actList.selectionModel().selectedIndexes()\n for idx in selected_idx:\n layer_name = self.actList.item(idx.row()).text()\n rlt = np.squeeze(self.activations[layer_name])\n if rlt.ndim == 3:\n for i in range(rlt.shape[-1]):\n check_N_mkdir(save_act_path+layer_name.replace('/','_'))\n Image.fromarray(rlt[:, :, i]).save(save_act_path+layer_name.replace('/','_')+'/{}.tif'.format(i))\n elif rlt.ndim == 2:\n check_N_mkdir(save_act_path+layer_name.replace('/','_'))\n Image.fromarray(rlt[:, :]).save(save_act_path + layer_name.replace('/','_') + '/act.tif')\n else:\n logger.debug('got an unexpected ndim of the activations: {}'.format(rlt.ndim))\n\n def exit(self):\n self.close()\n\n\ndef test():\n app = QApplication(sys.argv)\n\n # set ui\n ui = actViewer_logic()\n ui.show()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n test()"
] |
[
[
"numpy.sqrt",
"numpy.min",
"numpy.squeeze",
"tensorflow.train.import_meta_graph",
"numpy.max",
"tensorflow.reset_default_graph",
"tensorflow.get_default_graph",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fmxFranky/decision-transformer
|
[
"475ee21429869731f3f4fbbd4220532de8b38d19",
"475ee21429869731f3f4fbbd4220532de8b38d19",
"475ee21429869731f3f4fbbd4220532de8b38d19"
] |
[
"atari/create_dataset.py",
"gym/decision_transformer/training/act_trainer.py",
"gym/decision_transformer/models/trajectory_gpt2.py"
] |
[
"import argparse\nimport csv\nimport logging\nimport math\nimport pickle\nimport random\nfrom collections import deque\n\nimport blosc\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom fixed_replay_buffer import FixedReplayBuffer\nfrom mingpt.model_atari import GPT, GPTConfig\nfrom mingpt.trainer_atari import Trainer, TrainerConfig\n# make deterministic\nfrom mingpt.utils import sample, set_seed\nfrom torch.nn import functional as F\nfrom torch.utils.data import Dataset\n\n\ndef create_dataset(num_buffers, num_steps, game, data_dir_prefix,\n trajectories_per_buffer):\n # -- load data from memory (make more efficient)\n obss = []\n actions = []\n returns = [0]\n done_idxs = []\n stepwise_returns = []\n\n transitions_per_buffer = np.zeros(50, dtype=int)\n num_trajectories = 0\n while len(obss) < num_steps:\n buffer_num = np.random.choice(np.arange(50 - num_buffers, 50), 1)[0]\n i = transitions_per_buffer[buffer_num]\n print('loading from buffer %d which has %d already loaded' %\n (buffer_num, i))\n frb = FixedReplayBuffer(data_dir=data_dir_prefix + game + '/1/replay_logs',\n replay_suffix=buffer_num,\n observation_shape=(84, 84),\n stack_size=4,\n update_horizon=1,\n gamma=0.99,\n observation_dtype=np.uint8,\n batch_size=32,\n replay_capacity=100000)\n if frb._loaded_buffers:\n done = False\n curr_num_transitions = len(obss)\n trajectories_to_load = trajectories_per_buffer\n while not done:\n states, ac, ret, next_states, next_action, next_reward, terminal, indices = frb.sample_transition_batch(\n batch_size=1, indices=[i])\n states = states.transpose(\n (0, 3, 1, 2))[0] # (1, 84, 84, 4) --> (4, 84, 84)\n obss += [states]\n actions += [ac[0]]\n stepwise_returns += [ret[0]]\n if terminal[0]:\n done_idxs += [len(obss)]\n returns += [0]\n if trajectories_to_load == 0:\n done = True\n else:\n trajectories_to_load -= 1\n returns[-1] += ret[0]\n i += 1\n if i >= 100000:\n obss = obss[:curr_num_transitions]\n actions = actions[:curr_num_transitions]\n stepwise_returns = stepwise_returns[:curr_num_transitions]\n returns[-1] = 0\n i = transitions_per_buffer[buffer_num]\n done = True\n num_trajectories += (trajectories_per_buffer - trajectories_to_load)\n transitions_per_buffer[buffer_num] = i\n print(\n 'this buffer has %d loaded transitions and there are now %d transitions total divided into %d trajectories'\n % (i, len(obss), num_trajectories))\n\n actions = np.array(actions)\n returns = np.array(returns)\n stepwise_returns = np.array(stepwise_returns)\n done_idxs = np.array(done_idxs)\n\n # -- create reward-to-go dataset\n start_index = 0\n rtg = np.zeros_like(stepwise_returns)\n for i in done_idxs:\n i = int(i)\n curr_traj_returns = stepwise_returns[start_index:i + 1] # includes i\n for j in range(i - 1, start_index - 1, -1): # start from i-1\n rtg_j = curr_traj_returns[j - start_index:i + 1 - start_index]\n rtg[j] = sum(rtg_j) # includes i\n start_index = i + 1\n print('max rtg is %d' % max(rtg))\n\n # -- create timestep dataset\n start_index = 0\n timesteps = np.zeros(len(actions) + 1, dtype=int)\n for i in done_idxs:\n i = int(i)\n timesteps[start_index:i + 1] = np.arange(i + 1 - start_index)\n start_index = i + 1\n print('max timestep is %d' % max(timesteps))\n\n return obss, actions, returns, done_idxs, rtg, timesteps\n",
"import numpy as np\nimport torch\nfrom decision_transformer.training.trainer import Trainer\n\n\nclass ActTrainer(Trainer):\n\n def train_step(self):\n states, actions, rewards, dones, _, _, attention_mask = self.get_batch(\n self.batch_size)\n state_target, action_target, reward_target = torch.clone(\n states), torch.clone(actions), torch.clone(rewards)\n\n state_preds, action_preds, reward_preds = self.model.forward(\n states,\n actions,\n rewards,\n attention_mask=attention_mask,\n target_return=returns,\n )\n\n act_dim = action_preds.shape[2]\n action_preds = action_preds.reshape(-1, act_dim)\n action_target = action_target[:, -1].reshape(-1, act_dim)\n\n loss = self.loss_fn(\n state_preds,\n action_preds,\n reward_preds,\n state_target,\n action_target,\n reward_target,\n )\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss.detach().cpu().item()\n",
"# coding=utf-8\n# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch OpenAI GPT-2 model.\"\"\"\n\nimport os\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom transformers.activations import ACT2FN\nfrom transformers.file_utils import (ModelOutput, add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings)\nfrom transformers.modeling_outputs import \\\n BaseModelOutputWithPastAndCrossAttentions\nfrom transformers.modeling_utils import (Conv1D, PreTrainedModel,\n SequenceSummary,\n find_pruneable_heads_and_indices,\n prune_conv1d_layer)\nfrom transformers.models.gpt2.configuration_gpt2 import GPT2Config\nfrom transformers.utils import logging\nfrom transformers.utils.model_parallel_utils import (assert_device_map,\n get_device_map)\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"GPT2Config\"\n_TOKENIZER_FOR_DOC = \"GPT2Tokenizer\"\n\nGPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"gpt2\",\n \"gpt2-medium\",\n \"gpt2-large\",\n \"gpt2-xl\",\n \"distilgpt2\",\n # See all GPT-2 models at https://huggingface.co/models?filter=gpt2\n]\n\n\ndef load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model\"\"\"\n try:\n import re\n\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")\n raise\n tf_path = os.path.abspath(gpt2_checkpoint_path)\n logger.info(\"Converting TensorFlow checkpoint from {}\".format(tf_path))\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n logger.info(\"Loading TF weight {} with shape {}\".format(name, shape))\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array.squeeze())\n\n for name, array in zip(names, arrays):\n name = name[6:] # skip \"model/\"\n name = name.split(\"/\")\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+\\d+\", m_name):\n scope_names = re.split(r\"(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"w\" or scope_names[0] == \"g\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"b\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"wpe\" or scope_names[0] == \"wte\":\n pointer = getattr(pointer, scope_names[0])\n pointer = getattr(pointer, \"weight\")\n else:\n pointer = getattr(pointer, scope_names[0])\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n try:\n assert (\n pointer.shape == array.shape\n ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass Attention(nn.Module):\n\n def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):\n super().__init__()\n\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((n_ctx, n_ctx),\n dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx))\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n self.is_cross_attention = is_cross_attention\n if self.is_cross_attention:\n self.c_attn = Conv1D(2 * n_state, nx)\n self.q_attn = Conv1D(n_state, nx)\n else:\n self.c_attn = Conv1D(3 * n_state, nx)\n self.c_proj = Conv1D(n_state, nx)\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.n_head, self.split_size // self.n_head, self.pruned_heads)\n index_attn = torch.cat(\n [index, index + self.split_size, index + (2 * self.split_size)])\n\n # Prune conv1d layers\n self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)\n self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)\n\n # Update hyper params\n self.split_size = (self.split_size // self.n_head) * (self.n_head -\n len(heads))\n self.n_head = self.n_head - len(heads)\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def _attn(self,\n q,\n k,\n v,\n attention_mask=None,\n head_mask=None,\n output_attentions=False):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / (float(v.size(-1))**0.5)\n nd, ns = w.size(-2), w.size(-1)\n\n if not self.is_cross_attention:\n # if only \"normal\" attention layer implements causal mask\n mask = self.bias[:, :, ns - nd:ns, :ns]\n w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))\n\n if attention_mask is not None:\n # Apply the attention mask\n w = w + attention_mask\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n\n # Mask heads if we want to\n if head_mask is not None:\n w = w * head_mask\n\n outputs = [torch.matmul(w, v)]\n if output_attentions:\n outputs.append(w)\n return outputs\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n if encoder_hidden_states is not None:\n assert hasattr(\n self, \"q_attn\"\n ), \"If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`.\"\n query = self.q_attn(hidden_states)\n key, value = self.c_attn(encoder_hidden_states).split(self.split_size,\n dim=2)\n attention_mask = encoder_attention_mask\n else:\n query, key, value = self.c_attn(hidden_states).split(self.split_size,\n dim=2)\n\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n if layer_past is not None:\n past_key, past_value = layer_past[0].transpose(\n -2, -1), layer_past[1] # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = torch.stack(\n (key.transpose(-2, -1),\n value)) # transpose to have same shapes for stacking\n else:\n present = (None,)\n\n attn_outputs = self._attn(query, key, value, attention_mask, head_mask,\n output_attentions)\n a = attn_outputs[0]\n\n a = self.merge_heads(a)\n a = self.c_proj(a)\n a = self.resid_dropout(a)\n\n outputs = [a, present] + attn_outputs[1:]\n return outputs # a, present, (attentions)\n\n\nclass MLP(nn.Module):\n\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super().__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return self.dropout(h2)\n\n\nclass AdapterMLP(nn.Module):\n\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super().__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return self.dropout(h2)\n\n\nclass Block(nn.Module):\n\n def __init__(self, n_ctx, config, scale=False):\n super().__init__()\n hidden_size = config.n_embd\n inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size\n self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.attn = Attention(hidden_size, n_ctx, config, scale)\n self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n # self.adapter_ln = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n if config.add_cross_attention:\n self.crossattention = Attention(hidden_size,\n n_ctx,\n config,\n scale,\n is_cross_attention=True)\n self.ln_cross_attn = nn.LayerNorm(hidden_size,\n eps=config.layer_norm_epsilon)\n self.mlp = MLP(inner_dim, config)\n # self.adapter_mlp = AdapterMLP(512, config) # ADAPTER\n\n def forward(\n self,\n hidden_states,\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n attn_outputs = self.attn(\n self.ln_1(hidden_states),\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs[0] # output_attn: a, present, (attentions)\n outputs = attn_outputs[1:]\n # residual connection\n hidden_states = attn_output + hidden_states\n\n if encoder_hidden_states is not None:\n # add one self-attention block for cross-attention\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n cross_attn_outputs = self.crossattention(\n self.ln_cross_attn(hidden_states),\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n )\n attn_output = cross_attn_outputs[0]\n # residual connection\n hidden_states = hidden_states + attn_output\n outputs = outputs + cross_attn_outputs[\n 2:] # add cross attentions if we output attention weights\n\n feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))\n # residual connection\n hidden_states = hidden_states + feed_forward_hidden_states\n # hidden_states = hidden_states + self.adapter_ln(self.adapter_mlp(hidden_states))\n\n outputs = [hidden_states] + outputs\n return outputs # hidden_states, present, (attentions, cross_attentions)\n\n\nclass GPT2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = GPT2Config\n load_tf_weights = load_tf_weights_in_gpt2\n base_model_prefix = \"transformer\"\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights.\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n # module.weight.data.fill_(.01) # KL: Adapter change\n\n\n@dataclass\nclass GPT2DoubleHeadsModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of models predicting if two sentences are consecutive or not.\n Args:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):\n Language modeling loss.\n mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):\n Multiple choice classification loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):\n Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).\n past_key_values (:obj:`List[torch.FloatTensor]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n List of :obj:`torch.FloatTensor` of length :obj:`config.n_layers`, with each tensor of shape :obj:`(2,\n batch_size, num_heads, sequence_length, embed_size_per_head)`).\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n :obj:`past_key_values` input) to speed up sequential decoding.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n mc_loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n mc_logits: torch.FloatTensor = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nGPT2_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n Parameters:\n config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nGPT2_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):\n :obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else\n ``past_key_values[0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be\n passed as ``input_ids``.\n Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n `What are input IDs? <../glossary.html#input-ids>`__\n past_key_values (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n :obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which\n have their past given to this model should not be passed as ``input_ids`` as they have already been\n computed.\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see\n :obj:`past_key_values`).\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\nPARALLELIZE_DOCSTRING = r\"\"\"\n Uses a device map to distribute attention modules of the model across several devices. If no device map is given,\n it will evenly distribute blocks across all devices.\n Args:\n device_map (:obj:`Dict[int, list]`, optional, defaults to None):\n A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always\n automatically mapped to the first device (for esoteric reasons). That means that the first device should\n have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the\n following number of attention modules:\n - gpt2: 12\n - gpt2-medium: 24\n - gpt2-large: 36\n - gpt2-xl: 48\n Example::\n # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:\n model = GPT2LMHeadModel.from_pretrained('gpt2-xl')\n device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],\n 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],\n 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}\n model.parallelize(device_map)\n\"\"\"\nDEPARALLELIZE_DOCSTRING = r\"\"\"\n Moves the model to cpu from a model parallel state.\n Example::\n # On a 4 GPU machine with gpt2-large:\n model = GPT2LMHeadModel.from_pretrained('gpt2-large')\n device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],\n 1: [8, 9, 10, 11, 12, 13, 14, 15],\n 2: [16, 17, 18, 19, 20, 21, 22, 23],\n 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}\n model.parallelize(device_map) # Splits the model across several devices\n model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.\",\n GPT2_START_DOCSTRING,\n)\nclass GPT2Model(GPT2PreTrainedModel):\n\n def __init__(self, config):\n super().__init__(config)\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n # self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList([\n Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)\n ])\n self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n self.use_layers = None\n\n def set_layers(self, num_layers):\n assert 1 <= num_layers <= len(self.h)\n if num_layers is not None:\n num_layers -= 1\n self.use_layers = num_layers\n\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\n def parallelize(self, device_map=None):\n # Check validity of device_map\n self.device_map = (get_device_map(len(self.h),\n range(torch.cuda.device_count()))\n if device_map is None else device_map)\n assert_device_map(self.device_map, len(self.h))\n self.model_parallel = True\n self.first_device = \"cpu\" if \"cpu\" in self.device_map.keys(\n ) else \"cuda:\" + str(min(self.device_map.keys()))\n self.last_device = \"cuda:\" + str(max(self.device_map.keys()))\n self.wte = self.wte.to(self.first_device)\n self.wpe = self.wpe.to(self.first_device)\n # Load onto devices\n for k, v in self.device_map.items():\n for block in v:\n cuda_device = \"cuda:\" + str(k)\n self.h[block] = self.h[block].to(cuda_device)\n # ln_f to last\n self.ln_f = self.ln_f.to(self.last_device)\n\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\n def deparallelize(self):\n self.model_parallel = False\n self.device_map = None\n self.first_device = \"cpu\"\n self.last_device = \"cpu\"\n self.wte = self.wte.to(\"cpu\")\n self.wpe = self.wpe.to(\"cpu\")\n for index in range(len(self.h)):\n self.h[index] = self.h[index].to(\"cpu\")\n self.ln_f = self.ln_f.to(\"cpu\")\n torch.cuda.empty_cache()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"gpt2\",\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (output_hidden_states if output_hidden_states\n is not None else self.config.output_hidden_states)\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = [None] * len(self.h)\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(past_length,\n input_shape[-1] + past_length,\n dtype=torch.long,\n device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.add_cross_attention and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size(\n )\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_attention_mask = self.invert_attention_mask(\n encoder_attention_mask)\n else:\n encoder_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n # position_embeds = self.wpe(position_ids)\n hidden_states = inputs_embeds # + position_embeds\n\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n hidden_states = hidden_states + token_type_embeds\n\n hidden_states = self.drop(hidden_states)\n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = (\n ) if output_attentions and self.config.add_cross_attention else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n if self.use_layers is not None and i >= self.use_layers:\n break\n\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(hidden_states.device)\n # Ensure layer_past is on same device as hidden_states (might not be correct)\n if layer_past is not None:\n layer_past = layer_past.to(hidden_states.device)\n # Ensure that attention_mask is always on the same device as hidden_states\n if attention_mask is not None:\n attention_mask = attention_mask.to(hidden_states.device)\n if isinstance(head_mask, torch.Tensor):\n head_mask = head_mask.to(hidden_states.device)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if getattr(self.config, \"gradient_checkpointing\", False):\n\n def create_custom_forward(module):\n\n def custom_forward(*inputs):\n # checkpointing only works with tuple returns, not with lists\n return tuple(\n output\n for output in module(*inputs, use_cache, output_attentions))\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n layer_past,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states, present = outputs[:2]\n if use_cache is True:\n presents = presents + (present,)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (outputs[3],)\n\n # Model Parallel: If it's the last layer for that device, put things on the next device\n if self.model_parallel:\n for k, v in self.device_map.items():\n if i == v[-1] and \"cuda:\" + str(k) != self.last_device:\n hidden_states = hidden_states.to(\"cuda:\" + str(k + 1))\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v for v in\n [hidden_states, presents, all_hidden_states, all_self_attentions]\n if v is not None)\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.zeros_like"
],
[
"torch.clone"
],
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.cuda.set_device",
"torch.cat",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"tensorflow.train.load_variable",
"torch.matmul",
"torch.cuda.empty_cache",
"torch.tensor",
"torch.arange",
"tensorflow.train.list_variables",
"torch.cuda.device_count"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
un-knight/models
|
[
"196f7f2eecf7938dfe4ac2372a51cebbd8a7c170",
"196f7f2eecf7938dfe4ac2372a51cebbd8a7c170"
] |
[
"tutorials/image/cifar10/cifar10_input.py",
"tutorials/rnn/ptb/ptb_word_lm.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Routine for decoding the CIFAR-10 binary file format.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# Process images of this size. Note that this differs from the original CIFAR\n# image size of 32 x 32. If one alters this number, then the entire model\n# architecture will change and any model would need to be retrained.\nIMAGE_SIZE = 24\n\n# Global constants describing the CIFAR-10 data set.\nNUM_CLASSES = 10\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000\n\n\ndef read_cifar10(filename_queue):\n \"\"\"Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n \"\"\"\n\n class CIFAR10Record(object):\n pass\n result = CIFAR10Record()\n\n # Dimensions of the images in the CIFAR-10 dataset.\n # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the\n # input format.\n label_bytes = 1 # 2 for CIFAR-100\n result.height = 32\n result.width = 32\n result.depth = 3\n image_bytes = result.height * result.width * result.depth\n # Every record consists of a label followed by the image, with a\n # fixed number of bytes for each.\n record_bytes = label_bytes + image_bytes\n\n # Read a record, getting filenames from the filename_queue. No\n # header or footer in the CIFAR-10 format, so we leave header_bytes\n # and footer_bytes at their default of 0.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)\n result.key, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8 that is record_bytes long.\n record_bytes = tf.decode_raw(value, tf.uint8)\n\n # The first bytes represent the label, which we convert from uint8->int32.\n result.label = tf.cast(\n tf.slice(record_bytes, [0], [label_bytes]), tf.int32)\n\n # The remaining bytes after the label represent the image, which we reshape\n # from [depth * height * width] to [depth, height, width].\n depth_major = tf.reshape(\n tf.slice(record_bytes, [label_bytes],\n [image_bytes]),\n [result.depth, result.height, result.width])\n # Convert from [depth, height, width] to [height, width, depth].\n result.uint8image = tf.transpose(depth_major, [1, 2, 0])\n\n return result\n\n\ndef _generate_image_and_label_batch(image, label, min_queue_examples,\n batch_size, shuffle):\n \"\"\"Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n images, label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n images, label_batch = tf.train.batch(\n [image, label],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n\n # Display the training images in the visualizer.\n tf.contrib.deprecated.image_summary('images', images)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\ndef distorted_inputs(data_dir, batch_size):\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for training the network. Note the many random\n # distortions applied to the image.\n\n # Randomly crop a [height, width] section of the image.\n distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\n # Randomly flip the image horizontally.\n distorted_image = tf.image.random_flip_left_right(distorted_image)\n\n # Because these operations are not commutative, consider randomizing\n # the order their operation.\n distorted_image = tf.image.random_brightness(distorted_image,\n max_delta=63)\n distorted_image = tf.image.random_contrast(distorted_image,\n lower=0.2, upper=1.8)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(distorted_image)\n\n # Set the shapes of tensors.\n float_image.set_shape([height, width, 3])\n read_input.label.set_shape([1])\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n min_fraction_of_examples_in_queue)\n print ('Filling queue with %d CIFAR images before starting to train. '\n 'This will take a few minutes.' % min_queue_examples)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=True)\n\n\ndef inputs(eval_data, data_dir, batch_size):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n \"\"\"\n if not eval_data:\n filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)\n for i in xrange(1, 6)]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\n else:\n filenames = [os.path.join(data_dir, 'test_batch.bin')]\n num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n for f in filenames:\n if not tf.gfile.Exists(f):\n raise ValueError('Failed to find file: ' + f)\n\n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer(filenames)\n\n # Read examples from files in the filename queue.\n read_input = read_cifar10(filename_queue)\n reshaped_image = tf.cast(read_input.uint8image, tf.float32)\n\n height = IMAGE_SIZE\n width = IMAGE_SIZE\n\n # Image processing for evaluation.\n # Crop the central [height, width] of the image.\n resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,\n width, height)\n\n # Subtract off the mean and divide by the variance of the pixels.\n float_image = tf.image.per_image_standardization(resized_image)\n\n # Ensure that the random shuffling has good mixing properties.\n min_fraction_of_examples_in_queue = 0.4\n min_queue_examples = int(num_examples_per_epoch *\n min_fraction_of_examples_in_queue)\n\n # Generate a batch of images and labels by building up a queue of examples.\n return _generate_image_and_label_batch(float_image, read_input.label,\n min_queue_examples, batch_size,\n shuffle=False)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Example / benchmark for building a PTB LSTM model.\n\nTrains the model described in:\n(Zaremba, et. al.) Recurrent Neural Network Regularization\nhttp://arxiv.org/abs/1409.2329\n\nThere are 3 supported model configurations:\n===========================================\n| config | epochs | train | valid | test\n===========================================\n| small | 13 | 37.99 | 121.39 | 115.91\n| medium | 39 | 48.45 | 86.16 | 82.07\n| large | 55 | 37.87 | 82.62 | 78.29\nThe exact results may vary depending on the random initialization.\n\nThe hyperparameters used in the model:\n- init_scale - the initial scale of the weights\n- learning_rate - the initial value of the learning rate\n- max_grad_norm - the maximum permissible norm of the gradient\n- num_layers - the number of LSTM layers\n- num_steps - the number of unrolled steps of LSTM\n- hidden_size - the number of LSTM units\n- max_epoch - the number of epochs trained with the initial learning rate\n- max_max_epoch - the total number of epochs for training\n- keep_prob - the probability of keeping weights in the dropout layer\n- lr_decay - the decay of the learning rate for each epoch after \"max_epoch\"\n- batch_size - the batch size\n\nThe data required for this example is in the data/ dir of the\nPTB dataset from Tomas Mikolov's webpage:\n\n$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n$ tar xvf simple-examples.tgz\n\nTo run:\n\n$ python ptb_word_lm.py --data_path=simple-examples/data/\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.models.rnn.ptb import reader\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\n \"model\", \"small\",\n \"A type of model. Possible options are: small, medium, large.\")\nflags.DEFINE_string(\"data_path\", None,\n \"Where the training/test data is stored.\")\nflags.DEFINE_string(\"save_path\", None,\n \"Model output directory.\")\nflags.DEFINE_bool(\"use_fp16\", False,\n \"Train using 16-bit floats instead of 32bit floats\")\n\nFLAGS = flags.FLAGS\n\n\ndef data_type():\n return tf.float16 if FLAGS.use_fp16 else tf.float32\n\n\nclass PTBInput(object):\n \"\"\"The input data.\"\"\"\n\n def __init__(self, config, data, name=None):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = reader.ptb_producer(\n data, batch_size, num_steps, name=name)\n\n\nclass PTBModel(object):\n \"\"\"The PTB model.\"\"\"\n\n def __init__(self, is_training, config, input_):\n self._input = input_\n\n batch_size = input_.batch_size\n num_steps = input_.num_steps\n size = config.hidden_size\n vocab_size = config.vocab_size\n\n # Slightly better results can be obtained with forget gate biases\n # initialized to 1 but the hyperparameters of the model would need to be\n # different than reported in the paper.\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(\n size, forget_bias=0.0, state_is_tuple=True)\n if is_training and config.keep_prob < 1:\n lstm_cell = tf.contrib.rnn.DropoutWrapper(\n lstm_cell, output_keep_prob=config.keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell(\n [lstm_cell] * config.num_layers, state_is_tuple=True)\n\n self._initial_state = cell.zero_state(batch_size, data_type())\n\n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, input_.input_data)\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n # Simplified version of tensorflow.models.rnn.rnn.py's rnn().\n # This builds an unrolled LSTM for tutorial purposes only.\n # In general, use the rnn() or state_saving_rnn() from rnn.py.\n #\n # The alternative version of the code below is:\n #\n # inputs = tf.unstack(inputs, num=num_steps, axis=1)\n # outputs, state = tf.nn.rnn(cell, inputs,\n # initial_state=self._initial_state)\n outputs = []\n state = self._initial_state\n with tf.variable_scope(\"RNN\"):\n for time_step in range(num_steps):\n if time_step > 0: tf.get_variable_scope().reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n outputs.append(cell_output)\n\n output = tf.reshape(tf.concat_v2(outputs, 1), [-1, size])\n softmax_w = tf.get_variable(\n \"softmax_w\", [size, vocab_size], dtype=data_type())\n softmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=data_type())\n logits = tf.matmul(output, softmax_w) + softmax_b\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [logits],\n [tf.reshape(input_.targets, [-1])],\n [tf.ones([batch_size * num_steps], dtype=data_type())])\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n\n if not is_training:\n return\n\n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars),\n global_step=tf.contrib.framework.get_or_create_global_step())\n\n self._new_lr = tf.placeholder(\n tf.float32, shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n @property\n def input(self):\n return self._input\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def final_state(self):\n return self._final_state\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n\nclass SmallConfig(object):\n \"\"\"Small config.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 20\n hidden_size = 200\n max_epoch = 4\n max_max_epoch = 13\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 10000\n\n\nclass MediumConfig(object):\n \"\"\"Medium config.\"\"\"\n init_scale = 0.05\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 35\n hidden_size = 650\n max_epoch = 6\n max_max_epoch = 39\n keep_prob = 0.5\n lr_decay = 0.8\n batch_size = 20\n vocab_size = 10000\n\n\nclass LargeConfig(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 1.0\n max_grad_norm = 10\n num_layers = 2\n num_steps = 35\n hidden_size = 1500\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n\n\nclass TestConfig(object):\n \"\"\"Tiny config, for testing.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 1\n num_layers = 1\n num_steps = 2\n hidden_size = 2\n max_epoch = 1\n max_max_epoch = 1\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 10000\n\n\ndef run_epoch(session, model, eval_op=None, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n \"cost\": model.cost,\n \"final_state\": model.final_state,\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size / (time.time() - start_time)))\n\n return np.exp(costs / iters)\n\n\ndef get_config():\n if FLAGS.model == \"small\":\n return SmallConfig()\n elif FLAGS.model == \"medium\":\n return MediumConfig()\n elif FLAGS.model == \"large\":\n return LargeConfig()\n elif FLAGS.model == \"test\":\n return TestConfig()\n else:\n raise ValueError(\"Invalid model: %s\", FLAGS.model)\n\n\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to PTB data directory\")\n\n raw_data = reader.ptb_raw_data(FLAGS.data_path)\n train_data, valid_data, test_data, _ = raw_data\n\n config = get_config()\n eval_config = get_config()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n\n with tf.Graph().as_default():\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n with tf.name_scope(\"Train\"):\n train_input = PTBInput(config=config, data=train_data, name=\"TrainInput\")\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n m = PTBModel(is_training=True, config=config, input_=train_input)\n tf.contrib.deprecated.scalar_summary(\"Training Loss\", m.cost)\n tf.contrib.deprecated.scalar_summary(\"Learning Rate\", m.lr)\n\n with tf.name_scope(\"Valid\"):\n valid_input = PTBInput(config=config, data=valid_data, name=\"ValidInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mvalid = PTBModel(is_training=False, config=config, input_=valid_input)\n tf.contrib.deprecated.scalar_summary(\"Validation Loss\", mvalid.cost)\n\n with tf.name_scope(\"Test\"):\n test_input = PTBInput(config=eval_config, data=test_data, name=\"TestInput\")\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mtest = PTBModel(is_training=False, config=eval_config,\n input_=test_input)\n\n sv = tf.train.Supervisor(logdir=FLAGS.save_path)\n with sv.managed_session() as session:\n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, eval_op=m.train_op,\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(session, mvalid)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(session, mtest)\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\n if FLAGS.save_path:\n print(\"Saving model to %s.\" % FLAGS.save_path)\n sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n"
] |
[
[
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.image.random_brightness",
"tensorflow.transpose",
"tensorflow.image.random_flip_left_right",
"tensorflow.image.random_contrast",
"tensorflow.contrib.deprecated.image_summary",
"tensorflow.slice",
"tensorflow.gfile.Exists",
"tensorflow.decode_raw",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.FixedLengthRecordReader",
"tensorflow.random_crop",
"tensorflow.train.string_input_producer",
"tensorflow.image.per_image_standardization",
"tensorflow.train.batch",
"tensorflow.train.shuffle_batch"
],
[
"tensorflow.device",
"tensorflow.reduce_sum",
"numpy.exp",
"tensorflow.Graph",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"tensorflow.gradients",
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.name_scope",
"tensorflow.trainable_variables",
"tensorflow.app.run",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.contrib.deprecated.scalar_summary",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.concat_v2",
"tensorflow.nn.embedding_lookup",
"tensorflow.models.rnn.ptb.reader.ptb_raw_data",
"tensorflow.models.rnn.ptb.reader.ptb_producer",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.contrib.rnn.BasicLSTMCell",
"tensorflow.assign",
"tensorflow.reshape",
"tensorflow.train.Supervisor",
"tensorflow.variable_scope",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.get_variable_scope"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Anevar/mne-python
|
[
"15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb",
"15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb",
"15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb"
] |
[
"mne/simulation/evoked.py",
"mne/fiff/meas_info.py",
"examples/inverse/plot_gamma_map_inverse.py"
] |
[
"# Authors: Alexandre Gramfort <[email protected]>\n# Daniel Strohmeier <[email protected]>\n# Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\nimport copy\n\nimport numpy as np\nfrom scipy import signal\n\nfrom ..fiff.pick import pick_channels_cov\nfrom ..utils import check_random_state\nfrom ..forward import apply_forward\n\n\ndef generate_evoked(fwd, stc, evoked, cov, snr=3, tmin=None, tmax=None,\n iir_filter=None, random_state=None):\n \"\"\"Generate noisy evoked data\n\n Parameters\n ----------\n fwd : dict\n a forward solution\n stc : SourceEstimate object\n The source time courses\n evoked : Evoked object\n An instance of evoked used as template\n cov : Covariance object\n The noise covariance\n snr : float\n signal to noise ratio in dB. It corresponds to\n 10 * log10( var(signal) / var(noise) )\n tmin : float | None\n start of time interval to estimate SNR. If None first time point\n is used.\n tmax : float\n start of time interval to estimate SNR. If None last time point\n is used.\n iir_filter : None | array\n IIR filter coefficients (denominator) e.g. [1, -1, 0.2]\n random_state : None | int | np.random.RandomState\n To specify the random generator state.\n\n Returns\n -------\n evoked : Evoked object\n The simulated evoked data\n \"\"\"\n evoked = apply_forward(fwd, stc, evoked)\n noise = generate_noise_evoked(evoked, cov, iir_filter, random_state)\n evoked_noise = add_noise_evoked(evoked, noise, snr, tmin=tmin, tmax=tmax)\n return evoked_noise\n\n\ndef generate_noise_evoked(evoked, noise_cov, iir_filter=None,\n random_state=None):\n \"\"\"Creates noise as a multivariate Gaussian\n\n The spatial covariance of the noise is given from the cov matrix.\n\n Parameters\n ----------\n evoked : evoked object\n an instance of evoked used as template\n cov : Covariance object\n The noise covariance\n iir_filter : None | array\n IIR filter coefficients (denominator)\n random_state : None | int | np.random.RandomState\n To specify the random generator state.\n\n Returns\n -------\n noise : evoked object\n an instance of evoked\n \"\"\"\n noise = copy.deepcopy(evoked)\n noise_cov = pick_channels_cov(noise_cov, include=noise.info['ch_names'])\n rng = check_random_state(random_state)\n n_channels = np.zeros(noise.info['nchan'])\n n_samples = evoked.data.shape[1]\n noise.data = rng.multivariate_normal(n_channels, noise_cov.data,\n n_samples).T\n if iir_filter is not None:\n noise.data = signal.lfilter([1], iir_filter, noise.data, axis=-1)\n return noise\n\n\ndef add_noise_evoked(evoked, noise, snr, tmin=None, tmax=None):\n \"\"\"Adds noise to evoked object with specified SNR.\n\n SNR is computed in the interval from tmin to tmax.\n\n Parameters\n ----------\n evoked : Evoked object\n An instance of evoked with signal\n noise : Evoked object\n An instance of evoked with noise\n snr : float\n signal to noise ratio in dB. It corresponds to\n 10 * log10( var(signal) / var(noise) )\n tmin : float\n start time before event\n tmax : float\n end time after event\n\n Returns\n -------\n evoked_noise : Evoked object\n An instance of evoked corrupted by noise\n \"\"\"\n evoked = copy.deepcopy(evoked)\n times = evoked.times\n if tmin is None:\n tmin = np.min(times)\n if tmax is None:\n tmax = np.max(times)\n tmask = (times >= tmin) & (times <= tmax)\n tmp = np.mean((evoked.data[:, tmask] ** 2).ravel()) / \\\n np.mean((noise.data ** 2).ravel())\n tmp = 10 * np.log10(tmp)\n noise.data = 10 ** ((tmp - float(snr)) / 20) * noise.data\n evoked.data += noise.data\n return evoked\n",
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom warnings import warn\nfrom copy import deepcopy\nimport os.path as op\nimport numpy as np\nfrom scipy import linalg\nfrom ..externals.six import BytesIO\nfrom datetime import datetime as dt\n\nfrom .open import fiff_open\nfrom .tree import dir_tree_find, copy_tree\nfrom .constants import FIFF\nfrom .tag import read_tag\nfrom .proj import read_proj, write_proj\nfrom .ctf import read_ctf_comp, write_ctf_comp\nfrom .channels import read_bad_channels\nfrom .write import (start_file, end_file, start_block, end_block,\n write_string, write_dig_point, write_float, write_int,\n write_coord_trans, write_ch_info, write_name_list,\n write_julian)\nfrom ..utils import logger, verbose\n\n\ndef _summarize_str(st):\n \"\"\"Aux function\"\"\"\n return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...'\n\n\nclass Info(dict):\n \"\"\" Info class to nicely represent info dicts\n \"\"\"\n\n def __repr__(self):\n \"\"\"Summarize info instead of printing all\"\"\"\n strs = ['<Info | %s non-empty fields']\n non_empty = 0\n for k, v in self.items():\n if k in ['bads', 'ch_names']:\n entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)\n if v else '0 items')\n if len(entr) >= 56:\n # get rid of of half printed ch names\n entr = _summarize_str(entr)\n elif k == 'filename' and v:\n path, fname = op.split(v)\n entr = path[:10] + '.../' + fname\n elif k == 'projs' and v:\n entr = ', '.join(p['desc'] + ': o%s' %\n {0: 'ff', 1: 'n'}[p['active']] for p in v)\n if len(entr) >= 56:\n entr = _summarize_str(entr)\n elif k == 'meas_date' and np.iterable(v):\n # first entire in meas_date is meaningful\n entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')\n else:\n this_len = (len(v) if hasattr(v, '__len__') else\n ('%s' % v if v is not None else None))\n entr = (('%d items' % this_len) if isinstance(this_len, int)\n else ('%s' % this_len if this_len else ''))\n if entr:\n non_empty += 1\n entr = ' | ' + entr\n strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))\n strs_non_empty = sorted(s for s in strs if '|' in s)\n strs_empty = sorted(s for s in strs if '|' not in s)\n st = '\\n '.join(strs_non_empty + strs_empty)\n st += '\\n>'\n st %= non_empty\n return st\n\n def _anonymize(self):\n if self.get('subject_info') is not None:\n del self['subject_info']\n\n\ndef read_fiducials(fname):\n \"\"\"Read fiducials from a fiff file\n\n Returns\n -------\n pts : list of dicts\n List of digitizer points (each point in a dict).\n coord_frame : int\n The coordinate frame of the points (one of\n mne.fiff.FIFF.FIFFV_COORD_...)\n \"\"\"\n fid, tree, _ = fiff_open(fname)\n with fid:\n isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)\n isotrak = isotrak[0]\n pts = []\n coord_frame = FIFF.FIFFV_COORD_UNKNOWN\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n pts.append(tag.data)\n elif kind == FIFF.FIFF_MNE_COORD_FRAME:\n tag = read_tag(fid, pos)\n coord_frame = tag.data[0]\n\n if coord_frame == FIFF.FIFFV_COORD_UNKNOWN:\n err = (\"No coordinate frame was found in the file %r, it is probably \"\n \"not a valid fiducials file.\" % fname)\n raise ValueError(err)\n\n # coord_frame is not stored in the tag\n for pt in pts:\n pt['coord_frame'] = coord_frame\n\n return pts, coord_frame\n\n\ndef write_fiducials(fname, pts, coord_frame=0):\n \"\"\"Write fiducials to a fiff file\n\n Parameters\n ----------\n fname : str\n Destination file name.\n pts : iterator of dict\n Iterator through digitizer points. Each point is a dictionary with\n the keys 'kind', 'ident' and 'r'.\n coord_frame : int\n The coordinate frame of the points (one of\n mne.fiff.FIFF.FIFFV_COORD_...)\n \"\"\"\n pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))\n bad_frames = pts_frames - set((coord_frame,))\n if len(bad_frames) > 0:\n err = (\"Points have coord_frame entries that are incompatible with \"\n \"coord_frame=%i: %s.\" % (coord_frame, str(tuple(bad_frames))))\n raise ValueError(err)\n\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_ISOTRAK)\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)\n for pt in pts:\n write_dig_point(fid, pt)\n\n end_block(fid, FIFF.FIFFB_ISOTRAK)\n end_file(fid)\n\n\n@verbose\ndef read_info(fname, verbose=None):\n \"\"\"Read measurement info from a file\n\n Parameters\n ----------\n fname : str\n File name.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of mne.fiff.meas_info.Info\n Info on dataset.\n \"\"\"\n f, tree, _ = fiff_open(fname)\n with f as fid:\n info = read_meas_info(fid, tree)[0]\n return info\n\n\n@verbose\ndef read_meas_info(fid, tree, verbose=None):\n \"\"\"Read the measurement info\n\n Parameters\n ----------\n fid : file\n Open file descriptor.\n tree : tree\n FIF tree structure.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n info : instance of mne.fiff.meas_info.Info\n Info on dataset.\n meas : dict\n Node in tree that contains the info.\n \"\"\"\n # Find the desired blocks\n meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)\n if len(meas) == 0:\n raise ValueError('Could not find measurement data')\n if len(meas) > 1:\n raise ValueError('Cannot read more that 1 measurement data')\n meas = meas[0]\n\n meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)\n if len(meas_info) == 0:\n raise ValueError('Could not find measurement info')\n if len(meas_info) > 1:\n raise ValueError('Cannot read more that 1 measurement info')\n meas_info = meas_info[0]\n\n # Read measurement info\n dev_head_t = None\n ctf_head_t = None\n meas_date = None\n highpass = None\n lowpass = None\n nchan = None\n sfreq = None\n chs = []\n experimenter = None\n description = None\n proj_id = None\n proj_name = None\n line_freq = None\n p = 0\n for k in range(meas_info['nent']):\n kind = meas_info['directory'][k].kind\n pos = meas_info['directory'][k].pos\n if kind == FIFF.FIFF_NCHAN:\n tag = read_tag(fid, pos)\n nchan = int(tag.data)\n elif kind == FIFF.FIFF_SFREQ:\n tag = read_tag(fid, pos)\n sfreq = float(tag.data)\n elif kind == FIFF.FIFF_CH_INFO:\n tag = read_tag(fid, pos)\n chs.append(tag.data)\n p += 1\n elif kind == FIFF.FIFF_LOWPASS:\n tag = read_tag(fid, pos)\n lowpass = float(tag.data)\n elif kind == FIFF.FIFF_HIGHPASS:\n tag = read_tag(fid, pos)\n highpass = float(tag.data)\n elif kind == FIFF.FIFF_MEAS_DATE:\n tag = read_tag(fid, pos)\n meas_date = tag.data\n elif kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n dev_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n ctf_head_t = cand\n elif kind == FIFF.FIFF_EXPERIMENTER:\n tag = read_tag(fid, pos)\n experimenter = tag.data\n elif kind == FIFF.FIFF_DESCRIPTION:\n tag = read_tag(fid, pos)\n description = tag.data\n elif kind == FIFF.FIFF_PROJ_ID:\n tag = read_tag(fid, pos)\n proj_id = tag.data\n elif kind == FIFF.FIFF_PROJ_NAME:\n tag = read_tag(fid, pos)\n proj_name = tag.data\n elif kind == FIFF.FIFF_LINE_FREQ:\n tag = read_tag(fid, pos)\n line_freq = float(tag.data)\n\n # Check that we have everything we need\n if nchan is None:\n raise ValueError('Number of channels in not defined')\n\n if sfreq is None:\n raise ValueError('Sampling frequency is not defined')\n\n if len(chs) == 0:\n raise ValueError('Channel information not defined')\n\n if len(chs) != nchan:\n raise ValueError('Incorrect number of channel definitions found')\n\n if dev_head_t is None or ctf_head_t is None:\n hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)\n if len(hpi_result) == 1:\n hpi_result = hpi_result[0]\n for k in range(hpi_result['nent']):\n kind = hpi_result['directory'][k].kind\n pos = hpi_result['directory'][k].pos\n if kind == FIFF.FIFF_COORD_TRANS:\n tag = read_tag(fid, pos)\n cand = tag.data\n if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n dev_head_t = cand\n elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \\\n cand['to'] == FIFF.FIFFV_COORD_HEAD:\n ctf_head_t = cand\n\n # Locate the Polhemus data\n isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)\n dig = None\n if len(isotrak) == 0:\n logger.info('Isotrak not found')\n elif len(isotrak) > 1:\n warn('Multiple Isotrak found')\n else:\n isotrak = isotrak[0]\n dig = []\n for k in range(isotrak['nent']):\n kind = isotrak['directory'][k].kind\n pos = isotrak['directory'][k].pos\n if kind == FIFF.FIFF_DIG_POINT:\n tag = read_tag(fid, pos)\n dig.append(tag.data)\n dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD\n\n # Locate the acquisition information\n acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)\n acq_pars = None\n acq_stim = None\n if len(acqpars) == 1:\n acqpars = acqpars[0]\n for k in range(acqpars['nent']):\n kind = acqpars['directory'][k].kind\n pos = acqpars['directory'][k].pos\n if kind == FIFF.FIFF_DACQ_PARS:\n tag = read_tag(fid, pos)\n acq_pars = tag.data\n elif kind == FIFF.FIFF_DACQ_STIM:\n tag = read_tag(fid, pos)\n acq_stim = tag.data\n\n # Load the SSP data\n projs = read_proj(fid, meas_info)\n\n # Load the CTF compensation data\n comps = read_ctf_comp(fid, meas_info, chs)\n\n # Load the bad channel list\n bads = read_bad_channels(fid, meas_info)\n\n #\n # Put the data together\n #\n if tree['id'] is not None:\n info = Info(file_id=tree['id'])\n else:\n info = Info(file_id=None)\n\n subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)\n if len(subject_info) == 1:\n subject_info = subject_info[0]\n si = dict()\n for k in range(subject_info['nent']):\n kind = subject_info['directory'][k].kind\n pos = subject_info['directory'][k].pos\n if kind == FIFF.FIFF_SUBJ_ID:\n tag = read_tag(fid, pos)\n si['id'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HIS_ID:\n tag = read_tag(fid, pos)\n si['his_id'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_LAST_NAME:\n tag = read_tag(fid, pos)\n si['last_name'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:\n tag = read_tag(fid, pos)\n si['first_name'] = str(tag.data)\n elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:\n tag = read_tag(fid, pos)\n si['birthday'] = tag.data\n elif kind == FIFF.FIFF_SUBJ_SEX:\n tag = read_tag(fid, pos)\n si['sex'] = int(tag.data)\n elif kind == FIFF.FIFF_SUBJ_HAND:\n tag = read_tag(fid, pos)\n si['hand'] = int(tag.data)\n else:\n si = None\n info['subject_info'] = si\n\n # Load extra information blocks\n read_extra_meas_info(fid, tree, info)\n\n # Make the most appropriate selection for the measurement id\n if meas_info['parent_id'] is None:\n if meas_info['id'] is None:\n if meas['id'] is None:\n if meas['parent_id'] is None:\n info['meas_id'] = info['file_id']\n else:\n info['meas_id'] = meas['parent_id']\n else:\n info['meas_id'] = meas['id']\n else:\n info['meas_id'] = meas_info['id']\n else:\n info['meas_id'] = meas_info['parent_id']\n\n info['experimenter'] = experimenter\n info['description'] = description\n info['proj_id'] = proj_id\n info['proj_name'] = proj_name\n\n if meas_date is None:\n info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]\n else:\n info['meas_date'] = meas_date\n\n info['nchan'] = nchan\n info['sfreq'] = sfreq\n info['highpass'] = highpass if highpass is not None else 0\n info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0\n info['line_freq'] = line_freq\n\n # Add the channel information and make a list of channel names\n # for convenience\n info['chs'] = chs\n info['ch_names'] = [ch['ch_name'] for ch in chs]\n\n #\n # Add the coordinate transformations\n #\n info['dev_head_t'] = dev_head_t\n info['ctf_head_t'] = ctf_head_t\n if dev_head_t is not None and ctf_head_t is not None:\n head_ctf_trans = linalg.inv(ctf_head_t['trans'])\n dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])\n info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE,\n 'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD,\n 'trans': dev_ctf_trans}\n else:\n info['dev_ctf_t'] = None\n\n # All kinds of auxliary stuff\n info['dig'] = dig\n info['bads'] = bads\n info['projs'] = projs\n info['comps'] = comps\n info['acq_pars'] = acq_pars\n info['acq_stim'] = acq_stim\n\n return info, meas\n\n\ndef read_extra_meas_info(fid, tree, info):\n \"\"\"Read extra blocks from fid\"\"\"\n # current method saves them into a BytesIO file instance for simplicity\n # this and its partner, write_extra_meas_info, could be made more\n # comprehensive (i.e.., actually parse and read the data instead of\n # just storing it for later)\n blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,\n FIFF.FIFFB_PROCESSING_HISTORY]\n info['orig_blocks'] = blocks\n\n fid_str = BytesIO()\n fid_str = start_file(fid_str)\n start_block(fid_str, FIFF.FIFFB_MEAS_INFO)\n for block in blocks:\n nodes = dir_tree_find(tree, block)\n copy_tree(fid, tree['id'], nodes, fid_str)\n info['orig_fid_str'] = fid_str\n\n\ndef write_extra_meas_info(fid, info):\n \"\"\"Write otherwise left out blocks of data\"\"\"\n # uses BytesIO fake file to read the appropriate blocks\n if 'orig_blocks' in info and info['orig_blocks'] is not None:\n # Blocks from the original\n blocks = info['orig_blocks']\n fid_str, tree, _ = fiff_open(info['orig_fid_str'])\n for block in blocks:\n nodes = dir_tree_find(tree, block)\n copy_tree(fid_str, tree['id'], nodes, fid)\n\n\ndef write_meas_info(fid, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info into a file id (from a fif file)\n\n Parameters\n ----------\n fid : file\n Open file descriptor\n info : instance of mne.fiff.meas_info.Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n\n Note\n ----\n Tags are written in a particular order for compatibility with maxfilter\n \"\"\"\n\n # Measurement info\n start_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n # Extra measurement info\n write_extra_meas_info(fid, info)\n\n # Polhemus data\n if info['dig'] is not None:\n start_block(fid, FIFF.FIFFB_ISOTRAK)\n for d in info['dig']:\n write_dig_point(fid, d)\n\n end_block(fid, FIFF.FIFFB_ISOTRAK)\n\n # megacq parameters\n if info['acq_pars'] is not None or info['acq_stim'] is not None:\n start_block(fid, FIFF.FIFFB_DACQ_PARS)\n if info['acq_pars'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])\n\n if info['acq_stim'] is not None:\n write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])\n\n end_block(fid, FIFF.FIFFB_DACQ_PARS)\n\n # Coordinate transformations if the HPI result block was not there\n if info['dev_head_t'] is not None:\n write_coord_trans(fid, info['dev_head_t'])\n\n if info['ctf_head_t'] is not None:\n write_coord_trans(fid, info['ctf_head_t'])\n\n # Projectors\n write_proj(fid, info['projs'])\n\n # CTF compensation info\n write_ctf_comp(fid, info['comps'])\n\n # Bad channels\n if len(info['bads']) > 0:\n start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])\n end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)\n\n # General\n if info.get('experimenter') is not None:\n write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])\n if info.get('description') is not None:\n write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])\n if info.get('proj_id') is not None:\n write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])\n if info.get('proj_name') is not None:\n write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])\n if info.get('meas_date') is not None:\n write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])\n write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])\n write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])\n write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])\n write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])\n if info.get('line_freq') is not None:\n write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])\n if data_type is not None:\n write_int(fid, FIFF.FIFF_DATA_PACK, data_type)\n\n # Channel information\n for k, c in enumerate(info['chs']):\n # Scan numbers may have been messed up\n c = deepcopy(c)\n c['scanno'] = k + 1\n # for float/double, the \"range\" param is unnecessary\n if reset_range is True:\n c['range'] = 1.0\n write_ch_info(fid, c)\n\n # Subject information\n if info.get('subject_info') is not None:\n start_block(fid, FIFF.FIFFB_SUBJECT)\n si = info['subject_info']\n if si.get('id') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])\n if si.get('his_id') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])\n if si.get('last_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])\n if si.get('first_name') is not None:\n write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])\n if si.get('birthday') is not None:\n write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])\n if si.get('sex') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])\n if si.get('hand') is not None:\n write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])\n end_block(fid, FIFF.FIFFB_SUBJECT)\n\n end_block(fid, FIFF.FIFFB_MEAS_INFO)\n\n\ndef write_info(fname, info, data_type=None, reset_range=True):\n \"\"\"Write measurement info in fif file.\n\n Parameters\n ----------\n fname : str\n The name of the file. Should end by -info.fif.\n info : instance of mne.fiff.meas_info.Info\n The measurement info structure\n data_type : int\n The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),\n 5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for\n raw data.\n reset_range : bool\n If True, info['chs'][k]['range'] will be set to unity.\n \"\"\"\n fid = start_file(fname)\n start_block(fid, FIFF.FIFFB_MEAS)\n write_meas_info(fid, info, data_type, reset_range)\n end_block(fid, FIFF.FIFFB_MEAS)\n end_file(fid)\n",
"\"\"\"\n===============================================================================\nCompute a sparse inverse solution using the Gamma-Map empirical Bayesian method\n===============================================================================\n\nSee Wipf et al. \"A unified Bayesian framework for MEG/EEG source imaging.\"\nNeuroImage, vol. 44, no. 3, pp. 947?66, Mar. 2009.\n\"\"\"\n# Author: Martin Luessi <[email protected]>\n#\n# License: BSD (3-clause)\n\nprint(__doc__)\n\nimport numpy as np\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.inverse_sparse import gamma_map\nfrom mne.viz import plot_sparse_source_estimates\n\ndata_path = sample.data_path()\nsubjects_dir = data_path + '/subjects'\nfwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'\nevoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'\ncov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'\n\n# Read the evoked response and crop it\nsetno = 'Left visual'\nevoked = mne.fiff.read_evoked(evoked_fname, setno=setno, baseline=(None, 0))\nevoked.crop(tmin=-50e-3, tmax=300e-3)\n\n# Read the forward solution\nforward = mne.read_forward_solution(fwd_fname, surf_ori=True,\n force_fixed=False)\n\n# Read noise covariance matrix and regularize it\ncov = mne.read_cov(cov_fname)\ncov = mne.cov.regularize(cov, evoked.info)\n\n# Run the Gamma-MAP method\nalpha = 0.5\nstc, residual = gamma_map(evoked, forward, cov, alpha, xyz_same_gamma=True,\n return_residual=True)\n\n# View in 2D and 3D (\"glass\" brain like 3D plot)\n\n# Show the sources as spheres scaled by their strength\nscale_factors = np.max(np.abs(stc.data), axis=1)\nscale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors))\n\nplot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),\n modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None),\n fig_name=\"Gamma-MAP\")\n\n# Show the evoked response and the residual for gradiometers\nylim = dict(grad=[-120, 120])\nevoked = mne.fiff.pick_types_evoked(evoked, meg='grad', exclude='bads')\nevoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,\n proj=True)\n\nresidual = mne.fiff.pick_types_evoked(residual, meg='grad', exclude='bads')\nresidual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,\n proj=True)\n"
] |
[
[
"numpy.min",
"numpy.max",
"numpy.log10",
"scipy.signal.lfilter",
"numpy.zeros"
],
[
"scipy.linalg.inv",
"numpy.dot",
"numpy.iterable"
],
[
"numpy.max",
"numpy.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ferhah/pytorch-maml
|
[
"8fbf8b200c1c73aeb98787e4df43036955dca323"
] |
[
"maml/metalearners/maml.py"
] |
[
"import torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom collections import OrderedDict\nfrom maml.utils import update_parameters, tensors_to_device, compute_accuracy\n\n__all__ = ['ModelAgnosticMetaLearning', 'MAML', 'FOMAML']\n\n\nclass ModelAgnosticMetaLearning(object):\n \"\"\"Meta-learner class for Model-Agnostic Meta-Learning [1].\n\n Parameters\n ----------\n model : `torchmeta.modules.MetaModule` instance\n The model.\n\n optimizer : `torch.optim.Optimizer` instance, optional\n The optimizer for the outer-loop optimization procedure. This argument\n is optional for evaluation.\n\n step_size : float (default: 0.1)\n The step size of the gradient descent update for fast adaptation\n (inner-loop update).\n\n first_order : bool (default: False)\n If `True`, then the first-order approximation of MAML is used.\n\n learn_step_size : bool (default: False)\n If `True`, then the step size is a learnable (meta-trained) additional\n argument [2].\n\n per_param_step_size : bool (default: False)\n If `True`, then the step size parameter is different for each parameter\n of the model. Has no impact unless `learn_step_size=True`.\n\n num_adaptation_steps : int (default: 1)\n The number of gradient descent updates on the loss function (over the\n training dataset) to be used for the fast adaptation on a new task.\n\n scheduler : object in `torch.optim.lr_scheduler`, optional\n Scheduler for the outer-loop optimization [3].\n\n loss_function : callable (default: `torch.nn.functional.cross_entropy`)\n The loss function for both the inner and outer-loop optimization.\n Usually `torch.nn.functional.cross_entropy` for a classification\n problem, of `torch.nn.functional.mse_loss` for a regression problem.\n\n device : `torch.device` instance, optional\n The device on which the model is defined.\n\n References\n ----------\n .. [1] Finn C., Abbeel P., and Levine, S. (2017). Model-Agnostic Meta-Learning\n for Fast Adaptation of Deep Networks. International Conference on\n Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)\n\n .. [2] Li Z., Zhou F., Chen F., Li H. (2017). Meta-SGD: Learning to Learn\n Quickly for Few-Shot Learning. (https://arxiv.org/abs/1707.09835)\n\n .. [3] Antoniou A., Edwards H., Storkey A. (2018). How to train your MAML.\n International Conference on Learning Representations (ICLR).\n (https://arxiv.org/abs/1810.09502)\n \"\"\"\n def __init__(self, model, optimizer=None, step_size=0.1, first_order=False,\n learn_step_size=False, per_param_step_size=False,\n num_adaptation_steps=1, scheduler=None,\n loss_function=F.cross_entropy, device=None):\n self.model = model.to(device=device)\n self.optimizer = optimizer\n self.step_size = step_size\n self.first_order = first_order\n self.num_adaptation_steps = num_adaptation_steps\n self.scheduler = scheduler\n self.loss_function = loss_function\n self.device = device\n\n if per_param_step_size:\n self.step_size = OrderedDict((name, torch.tensor(step_size,\n dtype=param.dtype, device=self.device,\n requires_grad=learn_step_size)) for (name, param)\n in model.meta_named_parameters())\n else:\n self.step_size = torch.tensor(step_size, dtype=torch.float32,\n device=self.device, requires_grad=learn_step_size)\n\n if (self.optimizer is not None) and learn_step_size:\n self.optimizer.add_param_group({'params': self.step_size.values()\n if per_param_step_size else [self.step_size]})\n if scheduler is not None:\n for group in self.optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n self.scheduler.base_lrs([group['initial_lr']\n for group in self.optimizer.param_groups])\n\n def get_outer_loss(self, batch):\n if 'test' not in batch:\n raise RuntimeError('The batch does not contain any test dataset.')\n\n _, test_targets = batch['test']\n num_tasks = test_targets.size(0)\n is_classification_task = (not test_targets.dtype.is_floating_point)\n results = {\n 'num_tasks': num_tasks,\n 'inner_losses': np.zeros((self.num_adaptation_steps,\n num_tasks), dtype=np.float32),\n 'outer_losses': np.zeros((num_tasks,), dtype=np.float32),\n 'mean_outer_loss': 0.\n }\n if is_classification_task:\n results.update({\n 'accuracies_before': np.zeros((num_tasks,), dtype=np.float32),\n 'accuracies_after': np.zeros((num_tasks,), dtype=np.float32)\n })\n\n mean_outer_loss = torch.tensor(0., device=self.device)\n for task_id, (train_inputs, train_targets, test_inputs, test_targets) \\\n in enumerate(zip(*batch['train'], *batch['test'])):\n params, adaptation_results = self.adapt(train_inputs, train_targets,\n is_classification_task=is_classification_task,\n num_adaptation_steps=self.num_adaptation_steps,\n step_size=self.step_size, first_order=self.first_order)\n\n results['inner_losses'][:, task_id] = adaptation_results['inner_losses']\n if is_classification_task:\n results['accuracies_before'][task_id] = adaptation_results['accuracy_before']\n\n with torch.set_grad_enabled(self.model.training):\n test_logits = self.model(test_inputs, params=params)\n outer_loss = self.loss_function(test_logits, test_targets)\n results['outer_losses'][task_id] = outer_loss.item()\n mean_outer_loss += outer_loss\n\n if is_classification_task:\n results['accuracies_after'][task_id] = compute_accuracy(\n test_logits, test_targets)\n\n mean_outer_loss.div_(num_tasks)\n results['mean_outer_loss'] = mean_outer_loss.item()\n\n return mean_outer_loss, results\n\n def adapt(self, inputs, targets, is_classification_task=None,\n num_adaptation_steps=1, step_size=0.1, first_order=False):\n if is_classification_task is None:\n is_classification_task = (not targets.dtype.is_floating_point)\n params = None\n\n results = {'inner_losses': np.zeros(\n (num_adaptation_steps,), dtype=np.float32)}\n\n for step in range(num_adaptation_steps):\n logits = self.model(inputs, params=params)\n inner_loss = self.loss_function(logits, targets)\n results['inner_losses'][step] = inner_loss.item()\n\n if (step == 0) and is_classification_task:\n results['accuracy_before'] = compute_accuracy(logits, targets)\n\n self.model.zero_grad()\n params = update_parameters(self.model, inner_loss,\n step_size=step_size, params=params,\n first_order=(not self.model.training) or first_order)\n\n return params, results\n\n def train(self, dataloader, max_batches=500, verbose=True, **kwargs):\n with tqdm(total=max_batches, disable=not verbose, **kwargs) as pbar:\n for results in self.train_iter(dataloader, max_batches=max_batches):\n pbar.update(1)\n postfix = {'loss': '{0:.4f}'.format(results['mean_outer_loss'])}\n if 'accuracies_after' in results:\n postfix['accuracy'] = '{0:.4f}'.format(\n np.mean(results['accuracies_after']))\n pbar.set_postfix(**postfix)\n\n def train_iter(self, dataloader, max_batches=500):\n if self.optimizer is None:\n raise RuntimeError('Trying to call `train_iter`, while the '\n 'optimizer is `None`. In order to train `{0}`, you must '\n 'specify a Pytorch optimizer as the argument of `{0}` '\n '(eg. `{0}(model, optimizer=torch.optim.SGD(model.'\n 'parameters(), lr=0.01), ...).'.format(__class__.__name__))\n num_batches = 0\n self.model.train()\n while num_batches < max_batches:\n for batch in dataloader:\n if num_batches >= max_batches:\n break\n\n if self.scheduler is not None:\n self.scheduler.step(epoch=num_batches)\n\n self.optimizer.zero_grad()\n\n batch = tensors_to_device(batch, device=self.device)\n outer_loss, results = self.get_outer_loss(batch)\n yield results\n\n outer_loss.backward()\n self.optimizer.step()\n\n num_batches += 1\n\n def evaluate(self, dataloader, max_batches=500, verbose=True, **kwargs):\n mean_outer_loss, mean_accuracy, count = 0., 0., 0\n with tqdm(total=max_batches, disable=not verbose, **kwargs) as pbar:\n for results in self.evaluate_iter(dataloader, max_batches=max_batches):\n pbar.update(1)\n count += 1\n mean_outer_loss += (results['mean_outer_loss']\n - mean_outer_loss) / count\n postfix = {'loss': '{0:.4f}'.format(mean_outer_loss)}\n if 'accuracies_after' in results:\n mean_accuracy += (np.mean(results['accuracies_after'])\n - mean_accuracy) / count\n postfix['accuracy'] = '{0:.4f}'.format(mean_accuracy)\n pbar.set_postfix(**postfix)\n\n mean_results = {'mean_outer_loss': mean_outer_loss}\n if 'accuracies_after' in results:\n mean_results['accuracies_after'] = mean_accuracy\n\n return mean_results\n\n def evaluate_iter(self, dataloader, max_batches=500):\n num_batches = 0\n self.model.eval()\n while num_batches < max_batches:\n for batch in dataloader:\n if num_batches >= max_batches:\n break\n\n batch = tensors_to_device(batch, device=self.device)\n _, results = self.get_outer_loss(batch)\n yield results\n\n num_batches += 1\n\nMAML = ModelAgnosticMetaLearning\n\nclass FOMAML(ModelAgnosticMetaLearning):\n def __init__(self, model, optimizer=None, step_size=0.1,\n learn_step_size=False, per_param_step_size=False,\n num_adaptation_steps=1, scheduler=None,\n loss_function=F.cross_entropy, device=None):\n super(FOMAML, self).__init__(model, optimizer=optimizer, first_order=True,\n step_size=step_size, learn_step_size=learn_step_size,\n per_param_step_size=per_param_step_size,\n num_adaptation_steps=num_adaptation_steps, scheduler=scheduler,\n loss_function=loss_function, device=device)\n"
] |
[
[
"torch.set_grad_enabled",
"numpy.zeros",
"numpy.mean",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Basvanstein/MIP-EGO
|
[
"e1ed0b0ea020850c72c4de5efd5dda0a99de571f",
"e1ed0b0ea020850c72c4de5efd5dda0a99de571f",
"e1ed0b0ea020850c72c4de5efd5dda0a99de571f"
] |
[
"mipego/SearchSpace.py",
"benchmark/bbobbenchmarks.py",
"example/example_CMA_ES.py"
] |
[
"from pdb import set_trace\n\nimport json\nfrom copy import copy, deepcopy\n\nimport numpy as np\nfrom numpy.random import randint, rand\n\nfrom abc import abstractmethod\nfrom pyDOE import lhs\nfrom scipy.special import logit\n\n# TODO: rename `sampling` --> `sample`\n# TODO: add conditional parameters\n\nTRANS = {\n 'log': np.log,\n 'log10': np.log10,\n 'logit': logit,\n 'bilog': lambda x: np.sign(x) * np.log(1 + np.abs(x))\n}\n\nINV_TRANS = {\n 'log': np.exp,\n 'log10': lambda x: np.power(10, x),\n 'logit': lambda x: 1 / (1 + np.exp(-x)),\n 'bilog': lambda x: np.sign(x) * (np.exp(np.abs(x)) - 1) \n}\n\nclass SearchSpace(object):\n def __init__(self, bounds, var_name, name, random_seed=None):\n \"\"\"Search Space Base Class\n\n Parameters\n ----------\n bounds : (list of) list,\n lower and upper bound for continuous/ordinal parameter type\n categorical values for nominal parameter type.\n The dimension of the space is determined by the length of the \n nested list\n var_name : (list of) str,\n variable name per dimension. If only a string is given for multiple \n dimensions, variable names are created by appending counting numbers\n to the input string. \n name : str,\n search space name. It is typically used as the grouping variable\n when converting the Solution object to dictionary, allowing for \n vector-valued search parameters. See 'to_dict' method below.\n\n Attributes\n ----------\n dim : int,\n dimensinality of the search space\n bounds : a list of lists,\n each sub-list stores the lower and upper bound for continuous/ordinal variable\n and categorical values for nominal variable\n levels : a list of lists,\n each sub-list stores the categorical levels for every nominal variable. It takes\n `None` value when there is no nomimal variable\n precision : a list of double,\n the numerical precison (granularity) of continuous parameters, which usually \n very practical in real-world applications\n var_name : a list of str,\n variable names per dimension \n var_type : a list of str, \n variable type per dimension, 'C': continuous, 'N': nominal, 'O': ordinal\n C_mask : a bool array,\n the mask array for continuous variables\n O_mask : a bool array,\n the mask array for integer variables\n N_mask : a bool array,\n the mask array for discrete variables\n id_C : an int array,\n the index array for continuous variables\n id_O : an int array,\n the index array for integer variables\n id_N : an int array,\n the index array for discrete variables\n \"\"\"\n if hasattr(bounds[0], '__iter__') and not isinstance(bounds[0], str):\n self.bounds = [tuple(b) for b in bounds]\n else:\n self.bounds = [tuple(bounds)]\n \n self.dim = len(self.bounds)\n self.name = name\n self.random_seed = random_seed\n self.var_type = None \n self.levels = None\n self.precision = {}\n self.scale = {}\n\n if var_name is not None:\n if isinstance(var_name, str):\n if self.dim > 1:\n var_name = [var_name + '_' + str(_) for _ in range(self.dim)]\n else:\n var_name = [var_name]\n assert len(var_name) == self.dim\n self.var_name = var_name\n\n @property\n def random_seed(self):\n return self._random_seed\n \n @random_seed.setter\n def random_seed(self, seed):\n if seed:\n self._random_seed = int(seed)\n np.random.seed(self._random_seed)\n\n @abstractmethod\n def sampling(self, N=1):\n \"\"\"The output is a list of shape (N, self.dim)\n \"\"\"\n pass\n \n def _set_index(self):\n self.C_mask = np.asarray(self.var_type) == 'C' # Continuous\n self.O_mask = np.asarray(self.var_type) == 'O' # Ordinal\n self.N_mask = np.asarray(self.var_type) == 'N' # Nominal \n \n self.id_C = np.nonzero(self.C_mask)[0]\n self.id_O = np.nonzero(self.O_mask)[0]\n self.id_N = np.nonzero(self.N_mask)[0]\n\n def _set_levels(self):\n \"\"\"Set categorical levels for all nominal variables\n \"\"\"\n if hasattr(self, 'id_N') and len(self.id_N) > 0:\n self.levels = {i : self.bounds[i] for i in self.id_N}\n self._n_levels = {i : len(self.bounds[i]) for i in self.id_N}\n else:\n self.levels, self._n_levels = None, None \n\n def to_linear_scale(self, X):\n X = deepcopy(X)\n if not hasattr(X[0], '__iter__'):\n for k, v in self.scale.items():\n X[k] = INV_TRANS[v](X[k])\n else:\n for k, v in self.scale.items():\n for i in range(len(X)):\n X[i][k] = INV_TRANS[v](X[i][k])\n return X\n\n def round(self, X):\n \"\"\"Round the real-valued components of `X` to the \n corresponding numerical precision, if given\n \"\"\"\n # NOTE: make sure the rounding is applied in the original linear scale\n X = self.to_linear_scale(X)\n\n if self.precision is not None:\n X = deepcopy(X)\n if not hasattr(X[0], '__iter__'):\n for k, v in self.precision.items():\n X[k] = np.round(X[k], v)\n else:\n for k, v in self.precision.items():\n for i in range(len(X)):\n X[i][k] = np.round(X[i][k], v)\n return X\n\n @classmethod\n def from_dict(cls, param, space_name=True):\n \"\"\"Create a search space object from input dictionary\n\n Parameters\n ----------\n param : dict\n A dictionary that describes the search space\n space_name : bool, optional\n Whether a (multi-dimensional) subspace should be named. If this named space \n is a subspace a whole search space, for a solution sampled from the whole space, its \n components pertaining to this subspace will be grouped together under the key \n `space_name`, when this solution is converted to a dictionary/json\n (see `SearchSpace.to_dict`).\n\n Returns\n -------\n SearchSpace\n \"\"\"\n assert isinstance(param, dict)\n\n # construct the search space\n for i, (k, v) in enumerate(param.items()):\n bounds = v['range']\n if not hasattr(bounds[0], '__iter__') or isinstance(bounds[0], str):\n bounds = [bounds]\n\n N = v['N'] if 'N' in v else int(1)\n bounds *= N\n name = k if space_name else None\n\n # IMPORTANT: name argument is necessary for the variable grouping\n if v['type'] in ['r', 'real']: # real-valued parameter\n precision = v['precision'] if 'precision' in v else None \n scale = v['scale'] if 'scale' in v else None \n space_ = ContinuousSpace(\n bounds, var_name=k, name=name, \n precision=precision, scale=scale\n )\n elif v['type'] in ['i', 'int', 'integer']: # integer-valued parameter\n space_ = OrdinalSpace(bounds, var_name=k, name=name)\n elif v['type'] in ['c', 'cat', 'bool']: # category-valued parameter\n space_ = NominalSpace(bounds, var_name=k, name=name) \n \n if i == 0:\n space = space_\n else:\n space += space_\n return space\n\n @classmethod\n def from_json(cls, file):\n \"\"\"Create a seach space from a json file\n\n Parameters\n ----------\n file : str\n Path to the input json file\n\n Returns\n -------\n SearchSpace\n an `SearchSpace` object converted from the json file\n \"\"\"\n with open(file, 'r') as f:\n return cls.from_dict(json.load(f))\n\n def __len__(self):\n return self.dim\n\n def __iter__(self):\n pass\n\n def __add__(self, space):\n \"\"\"Direct Sum of two `SearchSpace`s\n \"\"\"\n assert isinstance(space, SearchSpace)\n return ProductSpace(self, space)\n\n def __radd__(self, space):\n return self.__add__(space)\n\n def __mul__(self, N):\n \"\"\"Replicate a `SearchSpace` N times\n \"\"\"\n N = int(N)\n s = deepcopy(self)\n s.dim = int(self.dim * N)\n s.var_type *= N\n s.bounds *= N\n s.var_name = ['{}_{}'.format(v, k) for k in range(N) for v in self.var_name]\n return s\n\n def __rmul__(self, N):\n return self.__mul__(N)\n \n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n _ = 'Search Space of %d variables: \\n'%self.dim\n for i in range(self.dim):\n _ += ' `%s`'%self.var_name[i]\n _ += ' - categories: ' if self.var_type[i] == 'N' else ' bounds: ' \n _ += str(self.bounds[i])\n if i in self.precision:\n _ += ' - precision: %d'%self.precision[i]\n if i in self.scale:\n _ += ' - scale: %s'%self.scale[i]\n _ += '\\n'\n return _\n \n\nclass ContinuousSpace(SearchSpace):\n \"\"\"Continuous (real-valued) Search Space\n \"\"\"\n def __init__(\n self, \n bounds, \n var_name='r', \n name=None, \n precision=None,\n scale=None\n ):\n super(ContinuousSpace, self).__init__(bounds, var_name, name)\n self.var_type = ['C'] * self.dim\n self._set_index()\n \n # set up the precision for each dimension\n if hasattr(precision, '__iter__'):\n assert len(precision) == self.dim\n self.precision = {\n i : precision[i] for i in range(self.dim) if precision[i] is not None\n }\n elif precision is not None:\n self.precision = {i : precision for i in range(self.dim)}\n\n # set up the scale for each dimension\n if scale is not None:\n if isinstance(scale, str):\n scale = [scale] * self.dim\n elif hasattr(scale, '__iter__'):\n assert len(scale) == self.dim\n\n self.scale = {\n i : scale[i] for i in range(self.dim) if scale[i] is not None\n }\n\n for i, s in self.scale.items():\n lower, upper = self.bounds[i]\n self.bounds[i] = (TRANS[s](lower), TRANS[s](upper))\n\n self._bounds = np.atleast_2d(self.bounds).T\n assert all(self._bounds[0, :] < self._bounds[1, :])\n\n def __mul__(self, N):\n s = super(ContinuousSpace, self).__mul__(N)\n s._bounds = np.tile(s._bounds, (1, N))\n s._set_index()\n\n s.precision = {}\n for i in range(N):\n s.precision.update(\n {(k + self.dim * i) : v for k, v in self.precision.items()}\n )\n\n s.scale = {}\n for i in range(N):\n s.scale.update(\n {(k + self.dim * i) : v for k, v in self.scale.items()}\n )\n return s\n \n def sampling(self, N=1, method='uniform'):\n lb, ub = self._bounds\n if method == 'uniform': # uniform random samples\n X = ((ub - lb) * rand(N, self.dim) + lb)\n elif method == 'LHS': # Latin hypercube sampling\n if N == 1:\n X = ((ub - lb) * rand(N, self.dim) + lb)\n else:\n X = ((ub - lb) * lhs(self.dim, samples=N, criterion='cm') + lb)\n return X.tolist()\n\n\nclass NominalSpace(SearchSpace):\n \"\"\"Nominal (discrete) Search Space\n \"\"\"\n def __init__(self, levels, var_name='d', name=None):\n levels = self._get_unique_levels(levels)\n super(NominalSpace, self).__init__(levels, var_name, name)\n self.var_type = ['N'] * self.dim\n self._levels = [np.array(b) for b in self.bounds]\n self._set_index()\n self._set_levels()\n \n def _get_unique_levels(self, levels):\n index = list(hasattr(l, '__iter__') and not isinstance(l, str) for l in levels)\n if any(index):\n return [\n list(set(levels[k] if i else [levels[k]])) \\\n for k, i in enumerate(index)\n ]\n else: \n return [list(set(levels))]\n\n def __mul__(self, N):\n s = super(NominalSpace, self).__mul__(N)\n s._set_index()\n s._set_levels()\n return s\n \n def sampling(self, N=1, method='uniform'):\n # NOTE: `LHS` sampling does not apply here since nominal variable is not ordered\n res = np.empty((N, self.dim), dtype=object)\n for i in range(self.dim):\n idx = randint(0, self._n_levels[i], N)\n res[:, i] = [self.levels[i][_] for _ in idx]\n\n return res.tolist()\n\n\nclass OrdinalSpace(SearchSpace):\n \"\"\"Ordinal (integer) Search Space\n \"\"\"\n def __init__(self, bounds, var_name='i', name=None):\n super(OrdinalSpace, self).__init__(bounds, var_name, name)\n self.var_type = ['O'] * self.dim\n self._lb, self._ub = zip(*self.bounds) # for sampling\n assert all(np.array(self._lb) < np.array(self._ub))\n self._set_index()\n\n def __mul__(self, N):\n s = super(OrdinalSpace, self).__mul__(N)\n s._lb, s._ub = s._lb * N, s._ub * N\n s._set_index()\n return s\n \n def sampling(self, N=1, method='uniform'):\n # TODO: adding LHS sampling here\n res = np.zeros((N, self.dim), dtype=int)\n for i in range(self.dim):\n res[:, i] = list(map(int, randint(self._lb[i], self._ub[i], N)))\n return res.tolist()\n\n\nclass ProductSpace(SearchSpace):\n \"\"\"Cartesian product of the search spaces\n \"\"\"\n def __init__(self, spaceL, spaceR):\n # setup the space names\n nameL = spaceL.name if isinstance(spaceL, ProductSpace) else [spaceL.name] \n nameR = spaceR.name if isinstance(spaceR, ProductSpace) else [spaceR.name]\n self.name = nameL + nameR\n self.dim = spaceL.dim + spaceR.dim\n\n # TODO: check coincides of variable names\n self.var_name = spaceL.var_name + spaceR.var_name \n self.bounds = spaceL.bounds + spaceR.bounds\n self.var_type = spaceL.var_type + spaceR.var_type\n\n self._subspaceL = deepcopy(spaceL)\n self._subspaceR = deepcopy(spaceR)\n self._set_index()\n self._set_levels()\n\n self.precision = copy(spaceL.precision)\n self.precision.update({(k + spaceL.dim) : v for k, v in spaceR.precision.items()})\n\n self.scale = copy(spaceL.scale) \n self.scale.update({(k + spaceL.dim) : v for k, v in spaceR.scale.items()})\n \n def sampling(self, N=1, method='uniform'):\n a = self._subspaceL.sampling(N, method)\n b = self._subspaceR.sampling(N, method)\n return [a[i] + b[i] for i in range(N)]\n \n def to_dict(self, solution):\n \"\"\"Save a Solution instance to a dictionary \n \n The result is grouped by sub-spaces, which is meant for vector-valued \n parameters for the configuration \n\n Parameters\n ----------\n solution : .base.Solution\n A solution object\n\n Returns\n -------\n dict\n \"\"\"\n id1 = list(range(self._subspaceL.dim))\n id2 = list(range(self._subspaceL.dim, self.dim))\n L = solution[id1] if len(solution.shape) == 1 else solution[:, id1]\n R = solution[id2] if len(solution.shape) == 1 else solution[:, id2]\n return {**self._subspaceL.to_dict(L), **self._subspaceR.to_dict(R)}\n\n def __mul__(self, space):\n raise ValueError('Unsupported operation')\n\n def __rmul__(self, space):\n raise ValueError('Unsupported operation')\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"BBOB noiseless testbed.\n\nThe optimisation test functions are represented as classes `F1` to\n`F24` (and `F101` to `F130`).\n\nThis module implements the class :py:class:`BBOBFunction` and\nsub-classes:\n\n* :py:class:`BBOBNfreeFunction` which have all the methods common to the\n classes :py:class:`F1` to :py:class:`F24`\n* :py:class:`BBOBGaussFunction`, :py:class:`BBOBCauchyFunction`,\n :py:class:`BBOBUniformFunction` which have methods in classes from\n :py:class:`F101` to :py:class:`F130`\n\nModule attributes:\n\n* :py:data:`dictbbob` is a dictionary such that dictbbob[2] contains\n the test function class F2 and f2 = dictbbob[2]() returns\n the instance 0 of the test function that can be\n called as f2([1,2,3]).\n* :py:data:`nfreeIDs` == range(1,25) indices for the noiseless functions that can be\n found in dictbbob\n* :py:data:`noisyIDs` == range(101, 131) indices for the noisy functions that can be\n found in dictbbob. We have nfreeIDs + noisyIDs == sorted(dictbbob.keys())\n* :py:data:`nfreeinfos` function infos\n\nExamples:\n\n>>> import bbobbenchmarks as bn\n>>> for s in bn.nfreeinfos:\n... print s\n1: Noise-free Sphere function\n2: Separable ellipsoid with monotone transformation\n<BLANKLINE>\n Parameter: condition number (default 1e6)\n<BLANKLINE>\n<BLANKLINE>\n3: Rastrigin with monotone transformation separable \"condition\" 10\n4: skew Rastrigin-Bueche, condition 10, skew-\"condition\" 100\n5: Linear slope\n6: Attractive sector function\n7: Step-ellipsoid, condition 100, noise-free\n8: Rosenbrock noise-free\n9: Rosenbrock, rotated\n10: Ellipsoid with monotone transformation, condition 1e6\n11: Discus (tablet) with monotone transformation, condition 1e6\n12: Bent cigar with asymmetric space distortion, condition 1e6\n13: Sharp ridge\n14: Sum of different powers, between x^2 and x^6, noise-free\n15: Rastrigin with asymmetric non-linear distortion, \"condition\" 10\n16: Weierstrass, condition 100\n17: Schaffers F7 with asymmetric non-linear transformation, condition 10\n18: Schaffers F7 with asymmetric non-linear transformation, condition 1000\n19: F8F2 sum of Griewank-Rosenbrock 2-D blocks, noise-free\n20: Schwefel with tridiagonal variable transformation\n21: Gallagher with 101 Gaussian peaks, condition up to 1000, one global rotation, noise-free\n22: Gallagher with 21 Gaussian peaks, condition up to 1000, one global rotation\n23: Katsuura function\n24: Lunacek bi-Rastrigin, condition 100\n<BLANKLINE>\n in PPSN 2008, Rastrigin part rotated and scaled\n<BLANKLINE>\n<BLANKLINE>\n>>> f3 = bn.F3(13) # instantiate instance 13 of function f3\n>>> f3([0, 1, 2]) # short-cut for f3.evaluate([0, 1, 2])\n59.87335291\n>>> print bn.instantiate(5)[1] # returns function instance and optimal f-value\n51.53\n>>> print bn.nfreeIDs # list noise-free functions\n[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\n>>> for i in bn.nfreeIDs: # evaluate all noiseless functions once\n... print bn.instantiate(i)[0]([0., 0., 0., 0.]),\n-77.27454592 6180022.82173 92.9877507529 92.9877507529 140.510117618 70877.9554128 -72.5505202195 33355.7924722 -339.94 4374717.49343 15631566.3487 4715481.0865 550.599783901 -17.2991756229 27.3633128519 -227.827833529 -24.3305918781 131.420159348 40.7103737427 6160.81782924 376.746889545 107.830426761 220.482266557 106.094767386\n\n\"\"\"\n\n# TODO: define interface for this module.\n# TODO: funId is expected to be a number since it is used as rseed.\n\nimport warnings\nfrom pdb import set_trace\nimport numpy as np\nfrom math import floor as floor\nfrom numpy import dot, linspace, diag, tile, zeros, sign, resize\nfrom numpy.random import standard_normal as _randn # TODO: may bring confusion\nfrom numpy.random import random as _rand # TODO: may bring confusion\n\n\"\"\"\n% VAL = BENCHMARKS(X, FUNCID)\n% VAL = BENCHMARKS(X, STRFUNC)\n% Input:\n% X -- solution column vector or matrix of column vectors\n% FUNCID -- number of function to be executed with X as input,\n% by default 8.\n% STRFUNC -- function as string to be executed with X as input\n% Output: function value(s) of solution(s)\n% Examples:\n% F = BENCHMARKS([1 2 3]', 17);\n% F = BENCHMARKS([1 2 3]', 'f1');\n%\n% NBS = BENCHMARKS()\n% NBS = BENCHMARKS('FunctionIndices')\n% Output:\n% NBS -- array of valid benchmark function numbers,\n% presumably 1:24\n%\n% FHS = BENCHMARKS('handles')\n% Output:\n% FHS -- cell array of function handles\n% Examples:\n% FHS = BENCHMARKS('handles');\n% f = FHS{1}(x); % evaluates x on the sphere function f1\n% f = feval(FHS{1}, x); % ditto\n%\n% see also: functions FGENERIC, BENCHMARKINFOS, BENCHMARKSNOISY\n\n% Authors (copyright 2009): Nikolaus Hansen, Raymond Ros, Steffen Finck\n% Version = 'Revision: $Revision: 1115 $'\n% Last Modified: $Date: 2009-02-09 19:22:42 +0100 (Mon, 09 Feb 2009) $\n\n% INTERFACE OF BENCHMARK FUNCTIONS\n% FHS = BENCHMARKS('handles');\n% FUNC = FHS{1};\n%\n% [FVALUE, FTRUE] = FUNC(X)\n% [FVALUE, FTRUE] = FUNC(X, [], IINSTANCE)\n% Input: X -- matrix of column vectors\n% IINSTANCE -- instance number of the function, sets function\n% instance (XOPT, FOPT, rotation matrices,...)\n% up until a new number is set, or the function is\n% cleared. Default is zero.\n% Output: row vectors with function value for each input column\n% FVALUE -- function value\n% FTRUE -- noise-less, deterministic function value\n% [FOPT STRFUNCTION] = FUNC('any_even_empty_string', ...)\n% Output:\n% FOPT -- function value at optimum\n% STRFUNCTION -- not yet implemented: function description string, ID before first whitespace\n% [FOPT STRFUNCTION] = FUNC('any_even_empty_string', DIM, NTRIAL)\n% Sets rotation matrices and xopt depending on NTRIAL (by changing the random seed).\n% Output:\n% FOPT -- function value at optimum\n% STRFUNCTION -- not yet implemented: function description string, ID before first whitespace\n% [FOPT, XOPT] = FUNC('xopt', DIM)\n% Output:\n% FOPT -- function value at optimum XOPT\n% XOPT -- optimal solution vector in DIM-D\n% [FOPT, MATRIX] = FUNC('linearTF', DIM) % might vanish in future\n% Output:\n% FOPT -- function value at optimum XOPT\n% MATRIX -- used transformation matrix\n\n\"\"\"\n\n### FUNCTION DEFINITION ###\n\ndef compute_xopt(rseed, dim):\n \"\"\"Generate a random vector used as optimum argument.\n\n Rounded by four digits, but never to zero.\n\n \"\"\"\n xopt = 8 * np.floor(1e4 * unif(dim, rseed)) / 1e4 - 4\n idx = (xopt == 0)\n xopt[idx] = -1e-5\n return xopt\n\ndef compute_rotation(seed, dim):\n \"\"\"Returns an orthogonal basis.\"\"\"\n\n B = np.reshape(gauss(dim * dim, seed), (dim, dim))\n for i in range(dim):\n for j in range(0, i):\n B[i] = B[i] - dot(B[i], B[j]) * B[j]\n B[i] = B[i] / (np.sum(B[i]**2) ** .5)\n return B\n\ndef monotoneTFosc(f):\n \"\"\"Maps [-inf,inf] to [-inf,inf] with different constants\n for positive and negative part.\n\n \"\"\"\n if np.isscalar(f):\n if f > 0.:\n f = np.log(f) / 0.1\n f = np.exp(f + 0.49 * (np.sin(f) + np.sin(0.79 * f))) ** 0.1\n elif f < 0.:\n f = np.log(-f) / 0.1\n f = -np.exp(f + 0.49 * (np.sin(0.55 * f) + np.sin(0.31 * f))) ** 0.1\n return f\n else:\n f = np.asarray(f)\n g = f.copy()\n idx = (f > 0)\n g[idx] = np.log(f[idx]) / 0.1\n g[idx] = np.exp(g[idx] + 0.49 * (np.sin(g[idx]) + np.sin(0.79 * g[idx])))**0.1\n idx = (f < 0)\n g[idx] = np.log(-f[idx]) / 0.1\n g[idx] = -np.exp(g[idx] + 0.49 * (np.sin(0.55 * g[idx]) + np.sin(0.31 * g[idx])))**0.1\n return g\n\ndef defaultboundaryhandling(x, fac):\n \"\"\"Returns a float penalty for being outside of boundaries [-5, 5]\"\"\"\n xoutside = np.maximum(0., np.abs(x) - 5) * sign(x)\n fpen = fac * np.sum(xoutside**2, -1) # penalty\n return fpen\n\ndef gauss(N, seed):\n \"\"\"Samples N standard normally distributed numbers\n being the same for a given seed\n\n \"\"\"\n r = unif(2 * N, seed)\n g = np.sqrt(-2 * np.log(r[:N])) * np.cos(2 * np.pi * r[N:2*N])\n if np.any(g == 0.):\n g[g == 0] = 1e-99\n return g\n\ndef unif(N, inseed):\n \"\"\"Generates N uniform numbers with starting seed.\"\"\"\n\n # initialization\n inseed = np.abs(inseed)\n if inseed < 1.:\n inseed = 1.\n\n rgrand = 32 * [0.]\n aktseed = inseed\n for i in range(39, -1, -1):\n tmp = floor(aktseed / 127773.)\n aktseed = 16807. * (aktseed - tmp * 127773.) - 2836. * tmp\n if aktseed < 0:\n aktseed = aktseed + 2147483647.\n if i < 32:\n rgrand[i] = aktseed\n aktrand = rgrand[0]\n\n # sample numbers\n r = int(N) * [0.]\n for i in range(int(N)):\n tmp = floor(aktseed / 127773.)\n aktseed = 16807. * (aktseed - tmp * 127773.) - 2836. * tmp\n if aktseed < 0:\n aktseed = aktseed + 2147483647.\n tmp = int(floor(aktrand / 67108865.))\n aktrand = rgrand[tmp]\n rgrand[tmp] = aktseed\n r[i] = aktrand / 2.147483647e9\n r = np.asarray(r)\n if (r == 0).any():\n warnings.warn('zero sampled(?), set to 1e-99')\n r[r == 0] = 1e-99\n return r\n\n# for testing and comparing to other implementations,\n# myrand and myrandn are used only for sampling the noise\n# Rename to myrand and myrandn to rand and randn and\n# comment lines 24 and 25.\n\n_randomnseed = 30. # warning this is a global variable...\ndef _myrandn(size):\n \"\"\"Normal random distribution sampling.\n\n For testing and comparing purpose.\n\n \"\"\"\n\n global _randomnseed\n _randomnseed = _randomnseed + 1.\n if _randomnseed > 1e9:\n _randomnseed = 1.\n res = np.reshape(gauss(np.prod(size), _randomnseed), size)\n return res\n\n_randomseed = 30. # warning this is a global variable...\ndef _myrand(size):\n \"\"\"Uniform random distribution sampling.\n\n For testing and comparing purpose.\n\n \"\"\"\n\n global _randomseed\n _randomseed = _randomseed + 1\n if _randomseed > 1e9:\n _randomseed = 1\n res = np.reshape(unif(np.prod(size), _randomseed), size)\n return res\n\ndef fGauss(ftrue, beta):\n \"\"\"Returns Gaussian model noisy value.\"\"\"\n # expects ftrue to be a np.array\n popsi = np.shape(ftrue)\n fval = ftrue * np.exp(beta * _randn(popsi)) # with gauss noise\n tol = 1e-8\n fval = fval + 1.01 * tol\n idx = ftrue < tol\n try:\n fval[idx] = ftrue[idx]\n except IndexError: # fval is a scalar\n if idx:\n fval = ftrue\n return fval\n\ndef fUniform(ftrue, alpha, beta):\n \"\"\"Returns uniform model noisy value.\"\"\"\n # expects ftrue to be a np.array\n popsi = np.shape(ftrue)\n fval = (_rand(popsi) ** beta * ftrue *\n np.maximum(1., (1e9 / (ftrue + 1e-99)) ** (alpha * _rand(popsi))))\n tol = 1e-8\n fval = fval + 1.01 * tol\n idx = ftrue < tol\n try:\n fval[idx] = ftrue[idx]\n except IndexError: # fval is a scalar\n if idx:\n fval = ftrue\n return fval\n\ndef fCauchy(ftrue, alpha, p):\n \"\"\"Returns Cauchy model noisy value\n\n Cauchy with median 1e3*alpha and with p=0.2, zero otherwise\n\n P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032\n\n \"\"\"\n # expects ftrue to be a np.array\n popsi = np.shape(ftrue)\n fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) *\n _randn(popsi) / (np.abs(_randn(popsi)) + 1e-199))\n tol = 1e-8\n fval = fval + 1.01 * tol\n idx = ftrue < tol\n try:\n fval[idx] = ftrue[idx]\n except IndexError: # fval is a scalar\n if idx:\n fval = ftrue\n return fval\n\n### CLASS DEFINITION ###\n\nclass AbstractTestFunction(object):\n \"\"\"Abstract class for test functions.\n\n Defines methods to be implemented in test functions which are to be\n provided to method setfun of class Logger.\n In particular, (a) the attribute fopt and (b) the method _evalfull.\n\n The _evalfull method returns two values, the possibly noisy value and\n the noise-free value. The latter is only meant to be for recording purpose.\n\n \"\"\"\n def __call__(self, x): # makes the instances callable\n \"\"\"Returns the objective function value of argument x.\n\n Example:\n\n >>> import bbobbenchmarks as bn\n >>> f3 = bn.F3(13) # instantiate function 3 on instance 13\n >>> f3([0, 1, 2]) # call f3, same as f3.evaluate([0, 1, 2])\n 59.87335291\n\n \"\"\"\n return self.evaluate(x)\n\n def evaluate(self, x):\n \"\"\"Returns the objective function value (in case noisy).\n\n \"\"\"\n return self._evalfull(x)[0]\n # TODO: is it better to leave evaluate out and check for hasattr('evaluate') in ExpLogger?\n\n def _evalfull(self, x):\n \"\"\"return noisy and noise-free value, the latter for recording purpose. \"\"\"\n raise NotImplementedError\n\n def getfopt(self):\n \"\"\"Returns the best function value of this instance of the function.\"\"\"\n # TODO: getfopt error:\n # import bbobbenchmarks as bb\n # bb.instantiate(1)[0].getfopt()\n # AttributeError: F1 instance has no attribute '_fopt'\n\n if not hasattr(self, 'iinstance'):\n raise Exception('This function class has not been instantiated yet.')\n return self._fopt\n\n def setfopt(self, fopt):\n try:\n self._fopt = float(fopt)\n except ValueError:\n raise Exception('Optimal function value must be cast-able to a float.')\n\n fopt = property(getfopt, setfopt)\n\nclass BBOBFunction(AbstractTestFunction):\n \"\"\"Abstract class of BBOB test functions.\n\n Implements some base functions that are used by the test functions\n of BBOB such as initialisations of class attributes.\n\n \"\"\"\n def __init__(self, iinstance=0, zerox=False, zerof=False, param=None, **kwargs):\n \"\"\"Common initialisation.\n\n Keyword arguments:\n iinstance -- instance of the function (int)\n zerox -- sets xopt to [0, ..., 0]\n zerof -- sets fopt to 0\n param -- parameter of the function (if applicable)\n kwargs -- additional attributes\n\n \"\"\"\n # Either self.rrseed or self.funId have to be defined for BBOBFunctions\n # TODO: enforce\n try:\n rrseed = self.rrseed\n except AttributeError:\n rrseed = self.funId\n\n try:\n self.rseed = rrseed + 1e4 * iinstance\n except TypeError:\n # rrseed AND iinstance have to be float\n warnings.warn('self.rseed could not be set, reset to 1 instead.')\n self.rseed = 1\n\n self.zerox = zerox\n if zerof:\n self.fopt = 0.\n else:\n self.fopt = min(1000, max(-1000, (np.round(100 * 100 * gauss(1, self.rseed)[0] / gauss(1, self.rseed + 1)[0]) / 100)))\n self.iinstance = iinstance\n self.dim = None\n self.lastshape = None\n self.param = param\n for i, v in kwargs.items():\n setattr(self, i, v)\n self._xopt = None\n\n def shape_(self, x):\n # this part is common to all evaluate function\n # it is assumed x are row vectors\n curshape = np.shape(x)\n dim = np.shape(x)[-1]\n return curshape, dim\n\n def getiinstance(self):\n \"\"\"Designates the instance of the function class.\n\n An instance in this case means a given target function value, a\n given optimal argument x, and given transformations for the\n function. It needs to have a string representation. Preferably\n it should be a number or a string.\n\n \"\"\"\n return self._iinstance\n\n def setiinstance(self, iinstance):\n self._iinstance = iinstance\n\n iinstance = property(getiinstance, setiinstance)\n\n def shortstr(self):\n \"\"\"Gives a short string self representation (shorter than str(self)).\"\"\"\n\n res = 'F%s' % str(self.funId)\n if hasattr(self, 'param'):\n res += '_p%s' % str(self.param) # NH param -> self.param\n return res\n\n def __eq__(self, obj):\n return (self.funId == obj.funId\n and (not hasattr(self, 'param') or self.param == obj.param))\n # TODO: make this test on other attributes than param?\n\n# def dimensionality(self, dim):\n# \"\"\"Return the availability of dimensionality dim.\"\"\"\n# return True\n\n # GETTERS\n# def getfopt(self):\n# \"\"\"Optimal Function Value.\"\"\"\n# return self._fopt\n\n# fopt = property(getfopt)\n\n def _setxopt(self, xopt):\n \"\"\"Return the argument of the optimum of the function.\"\"\"\n self._xopt = xopt\n\n def _getxopt(self):\n \"\"\"Return the argument of the optimum of the function.\"\"\"\n if self._xopt is None:\n warnings.warn('You need to evaluate object to set dimension first.')\n return self._xopt\n\n xopt = property(_getxopt, _setxopt)\n\n# def getrange(self):\n# \"\"\"Return the domain of the function.\"\"\"\n# #TODO: could depend on the dimension\n# # TODO: return exception NotImplemented yet\n# pass\n\n# range = property(getrange)\n\n# def getparam(self):\n# \"\"\"Optional parameter value.\"\"\"\n# return self._param\n\n# param = property(getparam)\n\n# def getitrial(self):\n# \"\"\"Instance id number.\"\"\"\n# return self._itrial\n\n# itrial = property(getitrial)\n\n# def getlinearTf(self):\n# return self._linearTf\n\n# linearTf = property(getlinearTf)\n\n# def getrotation(self):\n# return self._rotation\n\n# rotation = property(getrotation)\n\n\n\nclass BBOBNfreeFunction(BBOBFunction):\n \"\"\"Class of the noise-free functions of BBOB.\"\"\"\n\n def noise(self, ftrue):\n \"\"\"Returns the noise-free function values.\"\"\"\n\n return ftrue.copy()\n\nclass BBOBGaussFunction(BBOBFunction):\n \"\"\"Class of the Gauss noise functions of BBOB.\n\n Attribute gaussbeta needs to be defined by inheriting classes.\n\n \"\"\"\n\n # gaussbeta = None\n\n def noise(self, ftrue):\n \"\"\"Returns the noisy function values.\"\"\"\n\n return fGauss(ftrue, self.gaussbeta)\n\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 100.)\n\nclass BBOBUniformFunction(BBOBFunction, object):\n \"\"\"Class of the uniform noise functions of BBOB.\n\n Attributes unifalphafac and unifbeta need to be defined by inheriting\n classes.\n\n \"\"\"\n # unifalphafac = None\n # unifbeta = None\n\n def noise(self, ftrue):\n \"\"\"Returns the noisy function values.\"\"\"\n\n return fUniform(ftrue, self.unifalphafac * (0.49 + 1. / self.dim), self.unifbeta)\n\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 100.)\n\nclass BBOBCauchyFunction(BBOBFunction):\n \"\"\"Class of the Cauchy noise functions of BBOB.\n\n Attributes cauchyalpha and cauchyp need to be defined by inheriting\n classes.\n\n \"\"\"\n # cauchyalpha = None\n # cauchyp = None\n\n def noise(self, ftrue):\n \"\"\"Returns the noisy function values.\"\"\"\n\n return fCauchy(ftrue, self.cauchyalpha, self.cauchyp)\n\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 100.)\n\nclass _FSphere(BBOBFunction):\n \"\"\"Abstract Sphere function.\n\n Method boundaryhandling needs to be defined.\n\n \"\"\"\n rrseed = 1\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n\n # COMPUTATION core\n ftrue = np.sum(x**2, -1)\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F1(_FSphere, BBOBNfreeFunction):\n \"\"\"Noise-free Sphere function\"\"\"\n funId = 1\n def boundaryhandling(self, x):\n return 0.\n\nclass F101(_FSphere, BBOBGaussFunction):\n \"\"\"Sphere with moderate Gauss noise\"\"\"\n funId = 101\n gaussbeta = 0.01\n\nclass F102(_FSphere, BBOBUniformFunction):\n \"\"\"Sphere with moderate uniform noise\"\"\"\n funId = 102\n unifalphafac = 0.01\n unifbeta = 0.01\n\nclass F103(_FSphere, BBOBCauchyFunction):\n \"\"\"Sphere with moderate Cauchy noise\"\"\"\n funId = 103\n cauchyalpha = 0.01\n cauchyp = 0.05\n\nclass F107(_FSphere, BBOBGaussFunction):\n \"\"\"Sphere with Gauss noise\"\"\"\n funId = 107\n gaussbeta = 1.\n\nclass F108(_FSphere, BBOBUniformFunction):\n \"\"\"Sphere with uniform noise\"\"\"\n funId = 108\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F109(_FSphere, BBOBCauchyFunction):\n \"\"\"Sphere with Cauchy noise\"\"\"\n funId = 109\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass F2(BBOBNfreeFunction):\n \"\"\"Separable ellipsoid with monotone transformation\n\n Parameter: condition number (default 1e6)\n\n \"\"\"\n\n funId = 2\n paramValues = (1e0, 1e6)\n condition = 1e6\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n if hasattr(self, 'param') and self.param: # not self.param is None\n tmp = self.param\n else:\n tmp = self.condition\n self.scales = tmp ** linspace(0, 1, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n\n # COMPUTATION core\n ftrue = dot(monotoneTFosc(x)**2, self.scales)\n fval = self.noise(ftrue) # without noise\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F3(BBOBNfreeFunction):\n \"\"\"Rastrigin with monotone transformation separable \"condition\" 10\"\"\"\n\n funId = 3\n condition = 10.\n beta = 0.2\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialisation\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n self.arrscales = resize(self.scales, curshape)\n self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt\n x = monotoneTFosc(x)\n idx = (x > 0)\n x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))\n x = self.arrscales * x\n\n # COMPUTATION core\n ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)\n fval = self.noise(ftrue) # without noise\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F4(BBOBNfreeFunction):\n \"\"\"skew Rastrigin-Bueche, condition 10, skew-\"condition\" 100\"\"\"\n\n funId = 4\n condition = 10.\n alpha = 100.\n maxindex = np.inf # 1:2:min(DIM,maxindex) are the skew variables\n rrseed = 3\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.xopt[:min(dim, self.maxindex):2] = abs(self.xopt[:min(dim, self.maxindex):2])\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n self.arrscales = resize(self.scales, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n xoutside = np.maximum(0., np.abs(x) - 5) * sign(x)\n fpen = 1e2 * np.sum(xoutside**2, -1) # penalty\n fadd = fadd + fpen # self.fadd becomes an array\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # shift optimum to zero\n x = monotoneTFosc(x)\n try:\n tmpx = x[:, :min(self.dim, self.maxindex):2] # tmpx is a reference to a part of x\n except IndexError:\n tmpx = x[:min(self.dim, self.maxindex):2] # tmpx is a reference to a part of x\n tmpx[tmpx > 0] = self.alpha ** .5 * tmpx[tmpx > 0] # this modifies x\n x = self.arrscales * x # scale while assuming that Xopt == 0\n\n # COMPUTATION core\n ftrue = 10 * (self.dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F5(BBOBNfreeFunction):\n \"\"\"Linear slope\"\"\"\n\n funId = 5\n alpha = 100.\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim) # TODO: what happens here?\n else:\n self.xopt = 5 * sign(compute_xopt(self.rseed, dim))\n self.scales = -sign(self.xopt) * (self.alpha ** .5) ** linspace(0, 1, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n fadd = fadd + 5 * np.sum(np.abs(self.scales))\n\n # BOUNDARY HANDLING\n # move \"too\" good coordinates back into domain\n x = np.array(x) # convert x and make a copy of x.\n # The following may modify x directly.\n idx_out_of_bounds = (x * self.arrxopt) > 25 # 25 == 5 * 5\n x[idx_out_of_bounds] = sign(x[idx_out_of_bounds]) * 5\n\n # TRANSFORMATION IN SEARCH SPACE\n\n # COMPUTATION core\n ftrue = dot(x, self.scales)\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F6(BBOBNfreeFunction):\n \"\"\"Attractive sector function\"\"\"\n\n funId = 6\n condition = 10.\n alpha = 100.\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n # decouple scaling from function definition\n self.linearTF = dot(self.linearTF, self.rotation)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.linearTF) # TODO: check\n\n # COMPUTATION core\n idx = (x * self.arrxopt) > 0\n x[idx] = self.alpha * x[idx]\n ftrue = monotoneTFosc(np.sum(x**2, -1)) ** .9\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass _FStepEllipsoid(BBOBFunction):\n \"\"\"Abstract Step-ellipsoid, condition 100\n\n Method boundaryhandling needs to be defined.\n\n \"\"\"\n rrseed = 7\n condition = 100.\n alpha = 10.\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = self.condition ** linspace(0, 1, dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim),\n diag(((self.condition / 10.)**.5) ** linspace(0, 1, dim)))\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.linearTF)\n try:\n x1 = x[:, 0]\n except IndexError:\n x1 = x[0]\n idx = np.abs(x) > .5\n x[idx] = np.round(x[idx])\n x[~idx] = np.round(self.alpha * x[~idx]) / self.alpha\n x = dot(x, self.rotation)\n\n # COMPUTATION core\n ftrue = .1 * np.maximum(1e-4 * np.abs(x1), dot(x ** 2, self.scales))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F7(_FStepEllipsoid, BBOBNfreeFunction):\n \"\"\"Step-ellipsoid, condition 100, noise-free\"\"\"\n funId = 7\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 1.)\n\nclass F113(_FStepEllipsoid, BBOBGaussFunction):\n \"\"\"Step-ellipsoid with gauss noise, condition 100\"\"\"\n funId = 113\n gaussbeta = 1.\n\nclass F114(_FStepEllipsoid, BBOBUniformFunction):\n \"\"\"Step-ellipsoid with uniform noise, condition 100\"\"\"\n funId = 114\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F115(_FStepEllipsoid, BBOBCauchyFunction):\n \"\"\"Step-ellipsoid with Cauchy noise, condition 100\"\"\"\n funId = 115\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass _FRosenbrock(BBOBFunction):\n \"\"\"Abstract Rosenbrock, non-rotated\n\n Method boundaryhandling needs to be defined.\n\n \"\"\"\n rrseed = 8\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = .75 * compute_xopt(self.rseed, dim) # different from all others\n self.scales = max(1, dim ** .5 / 8.)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= self.arrxopt!\n x = self.scales * x\n x = x + 1 # shift zero to factual optimum 1\n\n # COMPUTATION core\n try:\n ftrue = (1e2 * np.sum((x[:, :-1] ** 2 - x[:, 1:]) ** 2, -1) +\n np.sum((x[:, :-1] - 1.) ** 2, -1))\n except IndexError:\n ftrue = (1e2 * np.sum((x[:-1] ** 2 - x[1:]) ** 2) +\n np.sum((x[:-1] - 1.) ** 2))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F8(_FRosenbrock, BBOBNfreeFunction):\n \"\"\"Rosenbrock noise-free\"\"\"\n funId = 8\n def boundaryhandling(self, x):\n return 0.\n\nclass F104(_FRosenbrock, BBOBGaussFunction):\n \"\"\"Rosenbrock non-rotated with moderate Gauss noise\"\"\"\n funId = 104\n gaussbeta = 0.01\n\nclass F105(_FRosenbrock, BBOBUniformFunction):\n \"\"\"Rosenbrock non-rotated with moderate uniform noise\"\"\"\n funId = 105\n unifalphafac = 0.01\n unifbeta = 0.01\n\nclass F106(_FRosenbrock, BBOBCauchyFunction):\n \"\"\"Rosenbrock non-rotated with moderate Cauchy noise\"\"\"\n funId = 106\n cauchyalpha = 0.01\n cauchyp = 0.05\n\nclass F110(_FRosenbrock, BBOBGaussFunction):\n \"\"\"Rosenbrock non-rotated with Gauss noise\"\"\"\n funId = 110\n gaussbeta = 1.\n\nclass F111(_FRosenbrock, BBOBUniformFunction):\n \"\"\"Rosenbrock non-rotated with uniform noise\"\"\"\n funId = 111\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F112(_FRosenbrock, BBOBCauchyFunction):\n \"\"\"Rosenbrock non-rotated with Cauchy noise\"\"\"\n funId = 112\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass F9(BBOBNfreeFunction):\n \"\"\"Rosenbrock, rotated\"\"\"\n funId = 9\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n scale = max(1, dim ** .5 / 8.) # nota: different from scales in F8\n self.linearTF = scale * compute_rotation(self.rseed, dim)\n self.xopt = np.hstack(dot(.5 * np.ones((1, dim)), self.linearTF.T)) / scale ** 2\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = dot(x, self.linearTF) + 0.5 # different from F8\n\n # COMPUTATION core\n try:\n ftrue = (1e2 * np.sum((x[:, :-1] ** 2 - x[:, 1:]) ** 2, -1) +\n np.sum((x[:, :-1] - 1.) ** 2, -1))\n except IndexError:\n ftrue = (1e2 * np.sum((x[:-1] ** 2 - x[1:]) ** 2) +\n np.sum((x[:-1] - 1.) ** 2))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass _FEllipsoid(BBOBFunction):\n \"\"\"Abstract Ellipsoid with monotone transformation.\n\n Method boundaryhandling needs to be defined.\n\n \"\"\"\n rrseed = 10\n condition = 1e6\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = self.condition ** linspace(0, 1, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation)\n x = monotoneTFosc(x)\n\n # COMPUTATION core\n ftrue = dot(x ** 2, self.scales)\n try:\n ftrue = np.hstack(ftrue)\n except TypeError: # argument 2 to map() must support iteration\n pass\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F10(_FEllipsoid, BBOBNfreeFunction):\n \"\"\"Ellipsoid with monotone transformation, condition 1e6\"\"\"\n funId = 10\n condition = 1e6\n def boundaryhandling(self, x):\n return 0.\n\nclass F116(_FEllipsoid, BBOBGaussFunction):\n \"\"\"Ellipsoid with Gauss noise, monotone x-transformation, condition 1e4\"\"\"\n funId = 116\n condition = 1e4\n gaussbeta = 1.\n\nclass F117(_FEllipsoid, BBOBUniformFunction):\n \"\"\"Ellipsoid with uniform noise, monotone x-transformation, condition 1e4\"\"\"\n funId = 117\n condition = 1e4\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F118(_FEllipsoid, BBOBCauchyFunction):\n \"\"\"Ellipsoid with Cauchy noise, monotone x-transformation, condition 1e4\"\"\"\n funId = 118\n condition = 1e4\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass F11(BBOBNfreeFunction):\n \"\"\"Discus (tablet) with monotone transformation, condition 1e6\"\"\"\n funId = 11\n condition = 1e6\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation)\n x = monotoneTFosc(x)\n\n # COMPUTATION core\n try:\n ftrue = np.sum(x**2, -1) + (self.condition - 1.) * x[:, 0] ** 2\n except IndexError:\n ftrue = np.sum(x**2) + (self.condition - 1.) * x[0] ** 2\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F12(BBOBNfreeFunction):\n \"\"\"Bent cigar with asymmetric space distortion, condition 1e6\"\"\"\n funId = 12\n condition = 1e6\n beta = .5\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed + 1e6, dim) # different from others\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation) # no scaling here, because it would go to the arrExpo\n idx = x > 0\n x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))\n x = dot(x, self.rotation)\n\n # COMPUTATION core\n try:\n ftrue = self.condition * np.sum(x**2, -1) + (1 - self.condition) * x[:, 0]**2\n except IndexError:\n ftrue = self.condition * np.sum(x**2) + (1 - self.condition) * x[0]**2\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F13(BBOBNfreeFunction):\n \"\"\"Sharp ridge\"\"\"\n funId = 13\n condition = 10.\n alpha = 100. # slope\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n self.linearTF = dot(self.linearTF, self.rotation)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.linearTF)\n\n # COMPUTATION core\n try:\n ftrue = x[:, 0] ** 2 + self.alpha * np.sqrt(np.sum(x[:, 1:] ** 2, -1))\n except IndexError:\n ftrue = x[0] ** 2 + self.alpha * np.sqrt(np.sum(x[1:] ** 2, -1))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass _FDiffPow(BBOBFunction):\n \"\"\"Abstract Sum of different powers, between x^2 and x^6.\n\n Method boundaryhandling needs to be defined.\n\n \"\"\"\n alpha = 4.\n rrseed = 14\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n self.arrexpo = resize(2. + self.alpha * linspace(0, 1, dim), curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation)\n\n # COMPUTATION core\n ftrue = np.sqrt(np.sum(np.abs(x) ** self.arrexpo, -1))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F14(_FDiffPow, BBOBNfreeFunction):\n \"\"\"Sum of different powers, between x^2 and x^6, noise-free\"\"\"\n funId = 14\n def boundaryhandling(self, x):\n return 0.\n\nclass F119(_FDiffPow, BBOBGaussFunction):\n \"\"\"Sum of different powers with Gauss noise, between x^2 and x^6\"\"\"\n funId = 119\n gaussbeta = 1.\n\nclass F120(_FDiffPow, BBOBUniformFunction):\n \"\"\"Sum of different powers with uniform noise, between x^2 and x^6\"\"\"\n funId = 120\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F121(_FDiffPow, BBOBCauchyFunction):\n \"\"\"Sum of different powers with seldom Cauchy noise, between x^2 and x^6\"\"\"\n funId = 121\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass F15(BBOBNfreeFunction):\n \"\"\"Rastrigin with asymmetric non-linear distortion, \"condition\" 10\"\"\"\n funId = 15\n condition = 10.\n beta = 0.2\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n # decouple scaling from function definition\n self.linearTF = dot(self.linearTF, self.rotation)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation) # no scaling here, because it would go to the arrexpo\n x = monotoneTFosc(x)\n idx = x > 0.\n x[idx] = x[idx] ** (1. + self.arrexpo[idx] * np.sqrt(x[idx])) # smooth in zero\n x = dot(x, self.linearTF)\n\n # COMPUTATION core\n ftrue = 10. * (dim - np.sum(np.cos(2 * np.pi * x), -1)) + np.sum(x ** 2, -1)\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F16(BBOBNfreeFunction):\n \"\"\"Weierstrass, condition 100\"\"\"\n funId = 16\n condition = 100.\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE?\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n # decouple scaling from function definition\n self.linearTF = dot(self.linearTF, self.rotation)\n K = np.arange(0, 12)\n self.aK = np.reshape(0.5 ** K, (1, 12))\n self.bK = np.reshape(3. ** K, (1, 12))\n self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)\n fpen = (10. / dim) * np.sum(xoutside ** 2, -1)\n fadd = fadd + fpen\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation)\n x = monotoneTFosc(x)\n x = dot(x, self.linearTF)\n\n # COMPUTATION core\n if len(curshape) < 2: # popsize is one\n ftrue = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(x, (1, len(x))) + 0.5)))))\n else:\n ftrue = np.zeros(curshape[0]) # curshape[0] is popsize\n for k, i in enumerate(x):\n # TODO: simplify next line\n ftrue[k] = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(i, (1, len(i))) + 0.5)))))\n ftrue = 10. * (ftrue / dim - self.f0) ** 3\n try:\n ftrue = np.hstack(ftrue)\n except TypeError:\n pass\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass _FSchaffersF7(BBOBFunction):\n \"\"\"Abstract Schaffers F7 with asymmetric non-linear transformation, condition 10\n\n Class attribute condition and method boundaryhandling need to be defined.\n\n \"\"\"\n rrseed = 17\n condition = None\n beta = 0.5\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1 , dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.rotation)\n idx = x > 0\n x[idx] = x[idx] ** (1 + self.arrexpo[idx] * np.sqrt(x[idx]))\n x = dot(x, self.linearTF)\n\n # COMPUTATION core\n try:\n s = x[:, :-1] ** 2 + x[:, 1:] ** 2\n except IndexError:\n s = x[:-1] ** 2 + x[1:] ** 2\n ftrue = np.mean(s ** .25 * (np.sin(50 * s ** .1) ** 2 + 1), -1) ** 2\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F17(_FSchaffersF7, BBOBNfreeFunction):\n \"\"\"Schaffers F7 with asymmetric non-linear transformation, condition 10\"\"\"\n funId = 17\n condition = 10.\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 10.)\n\nclass F18(_FSchaffersF7, BBOBNfreeFunction):\n \"\"\"Schaffers F7 with asymmetric non-linear transformation, condition 1000\"\"\"\n funId = 18\n condition = 1000.\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 10.)\n\nclass F122(_FSchaffersF7, BBOBGaussFunction):\n \"\"\"Schaffers F7 with Gauss noise, with asymmetric non-linear transformation, condition 10\"\"\"\n funId = 122\n condition = 10.\n gaussbeta = 1.\n\nclass F123(_FSchaffersF7, BBOBUniformFunction):\n \"\"\"Schaffers F7 with uniform noise, asymmetric non-linear transformation, condition 10\"\"\"\n funId = 123\n condition = 10.\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F124(_FSchaffersF7, BBOBCauchyFunction): # TODO: check boundary handling\n \"\"\"Schaffers F7 with seldom Cauchy noise, asymmetric non-linear transformation, condition 10\"\"\"\n funId = 124\n condition = 10.\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass _F8F2(BBOBFunction):\n \"\"\"Abstract F8F2 sum of Griewank-Rosenbrock 2-D blocks\n\n Class attribute facftrue and method boundaryhandling need to be defined.\n\n \"\"\"\n facftrue = None\n rrseed = 19\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n scale = max(1, dim ** .5 / 8.)\n self.linearTF = scale * compute_rotation(self.rseed, dim)\n # if self.zerox:\n # self.xopt = zeros(dim) # does not work here\n # else:\n # TODO: clean this line\n self.xopt = np.hstack(dot(self.linearTF, 0.5 * np.ones((dim, 1)) / scale ** 2))\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = dot(x, self.linearTF) + 0.5 # cannot be replaced with x -= arrxopt!\n\n # COMPUTATION core\n try:\n f2 = 100. * (x[:, :-1] ** 2 - x[:, 1:]) ** 2 + (1. - x[:, :-1]) ** 2\n except IndexError:\n f2 = 100. * (x[:-1] ** 2 - x[1:]) ** 2 + (1. - x[:-1]) ** 2\n ftrue = self.facftrue + self.facftrue * np.sum(f2 / 4000. - np.cos(f2), -1) / (dim - 1.)\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F19(_F8F2, BBOBNfreeFunction):\n \"\"\"F8F2 sum of Griewank-Rosenbrock 2-D blocks, noise-free\"\"\"\n funId = 19\n facftrue = 10.\n def boundaryhandling(self, x):\n return 0.\n\nclass F125(_F8F2, BBOBGaussFunction):\n \"\"\"F8F2 sum of Griewank-Rosenbrock 2-D blocks with Gauss noise\"\"\"\n funId = 125\n facftrue = 1.\n gaussbeta = 1.\n\nclass F126(_F8F2, BBOBUniformFunction):\n \"\"\"F8F2 sum of Griewank-Rosenbrock 2-D blocks with uniform noise\"\"\"\n funId = 126\n facftrue = 1.\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F127(_F8F2, BBOBCauchyFunction):\n \"\"\"F8F2 sum of Griewank-Rosenbrock 2-D blocks with seldom Cauchy noise\"\"\"\n funId = 127\n facftrue = 1.\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass F20(BBOBNfreeFunction):\n \"\"\"Schwefel with tridiagonal variable transformation\"\"\"\n funId = 20\n condition = 10.\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633\n self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(2 * np.abs(self.xopt), curshape)\n self.arrscales = resize(self.scales, curshape)\n self.arrsigns = resize(sign(self.xopt), curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = 2 * self.arrsigns * x # makes the below boundary handling effective for coordinates\n try:\n x[:, 1:] = x[:, 1:] + .25 * (x[:, :-1] - self.arrxopt[:, :-1])\n except IndexError:\n x[1:] = x[1:] + .25 * (x[:-1] - self.arrxopt[:-1])\n x = 100. * (self.arrscales * (x - self.arrxopt) + self.arrxopt)\n\n # BOUNDARY HANDLING\n xoutside = np.maximum(0., np.abs(x) - 500.) * sign(x) # in [-500, 500]\n fpen = 0.01 * np.sum(xoutside ** 2, -1)\n fadd = fadd + fpen\n\n # COMPUTATION core\n ftrue = 0.01 * ((418.9828872724339) - np.mean(x * np.sin(np.sqrt(np.abs(x))), -1))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass _FGallagher(BBOBFunction):\n \"\"\"Abstract Gallagher with nhighpeaks Gaussian peaks, condition up to 1000, one global rotation\n\n Attribute fac2, nhighpeaks, highpeakcond and method boundary\n handling need to be defined.\n\n \"\"\"\n rrseed = 21\n maxcondition = 1000.\n fitvalues = (1.1, 9.1)\n fac2 = None # added: factor for xopt not too close to boundaries, used by F22\n nhighpeaks = None\n highpeakcond = None\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n self.rotation = compute_rotation(self.rseed, dim)\n arrcondition = self.maxcondition ** linspace(0, 1, self.nhighpeaks - 1)\n idx = np.argsort(unif(self.nhighpeaks - 1, self.rseed)) # random permutation\n arrcondition = np.insert(arrcondition[idx], 0, self.highpeakcond)\n self.arrscales = []\n for i, e in enumerate(arrcondition):\n s = e ** linspace(-.5, .5, dim)\n idx = np.argsort(unif(dim, self.rseed + 1e3 * i)) # permutation instead of rotation\n self.arrscales.append(s[idx]) # this is inverse Cov\n self.arrscales = np.vstack(self.arrscales)\n # compute peak values, 10 is global optimum\n self.peakvalues = np.insert(linspace(self.fitvalues[0], self.fitvalues[1], self.nhighpeaks - 1), 0, 10.)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.xlocal = dot(self.fac2 * np.reshape(10. * unif(dim * self.nhighpeaks, self.rseed) - 5., (self.nhighpeaks, dim)),\n self.rotation)\n if self.zerox:\n self.xlocal[0, :] = zeros(dim)\n else:\n # global optimum not too close to boundary\n self.xlocal[0, :] = 0.8 * self.xlocal[0, :]\n self.xopt = dot(self.xlocal[0, :], self.rotation.T)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n fadd = fadd + self.boundaryhandling(x)\n\n # TRANSFORMATION IN SEARCH SPACE\n x = dot(x, self.rotation)\n\n # COMPUTATION core\n fac = -0.5 / dim\n # f = NaN(nhighpeaks, popsi)\n # TODO: optimize\n if len(curshape) < 2: # popsize is 1 in this case\n f = np.zeros(self.nhighpeaks)\n xx = tile(x, (self.nhighpeaks, 1)) - self.xlocal\n f[:] = self.peakvalues * np.exp(fac * np.sum(self.arrscales * xx ** 2, 1))\n elif curshape[0] < .5 * self.nhighpeaks:\n f = np.zeros((curshape[0], self.nhighpeaks))\n for k, e in enumerate(x):\n xx = tile(e, (self.nhighpeaks, 1)) - self.xlocal\n f[k, :] = self.peakvalues * np.exp(fac * np.sum(self.arrscales * xx ** 2, 1))\n else:\n f = np.zeros((curshape[0], self.nhighpeaks))\n for i in range(self.nhighpeaks):\n xx = (x - tile(self.xlocal[i, :], (curshape[0], 1)))\n f[:, i] = self.peakvalues[i] * np.exp(fac * (dot(xx ** 2, self.arrscales[i, :])))\n ftrue = monotoneTFosc(10 - np.max(f, -1)) ** 2\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F21(_FGallagher, BBOBNfreeFunction):\n \"\"\"Gallagher with 101 Gaussian peaks, condition up to 1000, one global rotation, noise-free\"\"\"\n funId = 21\n nhighpeaks = 101\n fac2 = 1.\n highpeakcond = 1000. ** .5\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 1.)\n\nclass F22(_FGallagher, BBOBNfreeFunction):\n \"\"\"Gallagher with 21 Gaussian peaks, condition up to 1000, one global rotation\"\"\"\n funId = 22\n rrseed = 22\n nhighpeaks = 21\n fac2 = 0.98\n highpeakcond = 1000.\n def boundaryhandling(self, x):\n return defaultboundaryhandling(x, 1.)\n\nclass F128(_FGallagher, BBOBGaussFunction): # TODO: check boundary handling\n \"\"\"Gallagher with 101 Gaussian peaks with Gauss noise, condition up to 1000, one global rotation\"\"\"\n funId = 128\n nhighpeaks = 101\n fac2 = 1.\n highpeakcond = 1000. ** .5\n gaussbeta = 1.\n\nclass F129(_FGallagher, BBOBUniformFunction):\n \"\"\"Gallagher with 101 Gaussian peaks with uniform noise, condition up to 1000, one global rotation\"\"\"\n funId = 129\n nhighpeaks = 101\n fac2 = 1.\n highpeakcond = 1000. ** .5\n unifalphafac = 1.\n unifbeta = 1.\n\nclass F130(_FGallagher, BBOBCauchyFunction):\n \"\"\"Gallagher with 101 Gaussian peaks with seldom Cauchy noise, condition up to 1000, one global rotation\"\"\"\n funId = 130\n nhighpeaks = 101\n fac2 = 1.\n highpeakcond = 1000. ** .5\n cauchyalpha = 1.\n cauchyp = 0.2\n\nclass F23(BBOBNfreeFunction):\n \"\"\"Katsuura function\"\"\"\n funId = 23\n condition = 100.\n arr2k = np.reshape(2. ** (np.arange(1, 33)), (1, 32)) # bug-fix for 32-bit (NH): 2 -> 2. (relevance is minor)\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n # decouple scaling from function definition\n self.linearTF = dot(self.linearTF, self.rotation)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)\n fpen = np.sum(xoutside ** 2, -1)\n fadd = fadd + fpen\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n x = dot(x, self.linearTF)\n\n # COMPUTATION core\n if len(curshape) < 2: # popsize is 1 in this case\n arr = dot(np.reshape(x, (dim, 1)), self.arr2k) # dim times d array\n ftrue = (-10. / dim ** 2. +\n 10. / dim ** 2. *\n np.prod(1 + np.arange(1, dim + 1) * np.dot(np.abs(arr - np.round(arr)), self.arr2k.T ** -1.).T) ** (10. / dim ** 1.2))\n else:\n ftrue = zeros(curshape[0])\n for k, e in enumerate(x):\n arr = dot(np.reshape(e, (dim, 1)), self.arr2k) # dim times d array\n ftrue[k] = (-10. / dim ** 2. +\n 10. / dim ** 2. *\n np.prod(1 + np.arange(1, dim + 1) * np.dot(np.abs(arr - np.round(arr)), self.arr2k.T ** -1.).T) ** (10. / dim ** 1.2))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\nclass F24(BBOBNfreeFunction):\n \"\"\"Lunacek bi-Rastrigin, condition 100\n\n in PPSN 2008, Rastrigin part rotated and scaled\n\n \"\"\"\n funId = 24\n condition = 100.\n _mu1 = 2.5\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed))\n self.rotation = compute_rotation(self.rseed + 1e6, dim)\n self.scales = (self.condition ** .5) ** linspace(0, 1, dim)\n self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales))\n # decouple scaling from function definition\n self.linearTF = dot(self.linearTF, self.rotation)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n # self.arrxopt = resize(self.xopt, curshape)\n self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x)\n fpen = 1e4 * np.sum(xoutside ** 2, -1)\n fadd = fadd + fpen\n\n # TRANSFORMATION IN SEARCH SPACE\n x = self.arrscales * x\n\n # COMPUTATION core\n s = 1 - .5 / ((dim + 20)**0.5 - 4.1) # tested up to DIM = 160 p in [0.25,0.33]\n d = 1 # shift [1,3], smaller is more difficult\n mu2 = -((self._mu1 ** 2 - d) / s) ** .5\n ftrue = np.minimum(np.sum((x - self._mu1) ** 2, -1),\n d * dim + s * np.sum((x - mu2) ** 2, -1))\n ftrue = ftrue + 10 * (dim - np.sum(np.cos(2 * np.pi * dot(x - self._mu1, self.linearTF)), -1))\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\n# dictbbob = {'sphere': F1, 'ellipsoid': F2, 'Rastrigin': F3}\nnfreefunclasses = (F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14,\n F15, F16, F17, F18, F19, F20, F21, F22, F23, F24) # hard coded\nnoisyfunclasses = (F101, F102, F103, F104, F105, F106, F107, F108, F109, F110,\n F111, F112, F113, F114, F115, F116, F117, F118, F119, F120,\n F121, F122, F123, F124, F125, F126, F127, F128, F129, F130)\ndictbbobnfree = dict((i.funId, i) for i in nfreefunclasses)\nnfreeIDs = sorted(dictbbobnfree.keys()) # was: \"nfreenames\"\nnfreeinfos = [str(i) + ': ' + dictbbobnfree[i].__doc__ for i in nfreeIDs]\n\ndictbbobnoisy = dict((i.funId, i) for i in noisyfunclasses)\nnoisyIDs = sorted(dictbbobnoisy.keys()) # was noisynames\n\nfunclasses = list(nfreefunclasses) + list(noisyfunclasses)\ndictbbob = dict((i.funId, i) for i in funclasses)\n\n# TODO: pb xopt f9, 21, 22\nclass _FTemplate(BBOBNfreeFunction):\n \"\"\"Template based on F1\"\"\"\n\n funId = 421337\n\n def initwithsize(self, curshape, dim):\n # DIM-dependent initialization\n if self.dim != dim:\n if self.zerox:\n self.xopt = zeros(dim)\n else:\n self.xopt = compute_xopt(self.rseed, dim)\n\n # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices\n if self.lastshape != curshape:\n self.dim = dim\n self.lastshape = curshape\n self.arrxopt = resize(self.xopt, curshape)\n\n self.linearTf = None\n self.rotation = None\n\n def _evalfull(self, x):\n fadd = self.fopt\n curshape, dim = self.shape_(x)\n # it is assumed x are row vectors\n\n if self.lastshape != curshape:\n self.initwithsize(curshape, dim)\n\n # BOUNDARY HANDLING\n\n # TRANSFORMATION IN SEARCH SPACE\n x = x - self.arrxopt # cannot be replaced with x -= arrxopt!\n\n # COMPUTATION core\n ftrue = np.sum(x**2, 1)\n fval = self.noise(ftrue)\n\n # FINALIZE\n ftrue += fadd\n fval += fadd\n return fval, ftrue\n\ndef instantiate(ifun, iinstance=0, param=None, **kwargs):\n \"\"\"Returns test function ifun, by default instance 0,\n and its optimal f-value.\"\"\"\n res = dictbbob[ifun](iinstance = iinstance, param = param, **kwargs) # calling BBOBFunction.__init__(iinstance, param,...)\n return res, res.fopt\n\ndef get_param(ifun):\n \"\"\"Returns the parameter values of the function ifun.\"\"\"\n try:\n return dictbbob[ifun].paramValues\n except AttributeError:\n return (None,)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod() # run all doctests in this module\n",
"import sys\nimport numpy as np\n\nsys.path.insert(0, '../')\nfrom BayesOpt.optimizer import OnePlusOne_CMA, OnePlusOne_Cholesky_CMA\n\ndef obj_fun(x):\n return np.sum(x ** 2)\n\nopt = OnePlusOne_Cholesky_CMA(\n 2, obj_fun, lb=-5, ub=5, sigma0=0.2, ftarget=1e-8, verbose=True\n)\nopt.run()\nprint(opt.stop_dict)"
] |
[
[
"numpy.abs",
"numpy.random.seed",
"numpy.power",
"numpy.asarray",
"numpy.nonzero",
"numpy.tile",
"numpy.sign",
"numpy.atleast_2d",
"numpy.round",
"numpy.random.rand",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.random.randint"
],
[
"numpy.diag",
"numpy.dot",
"numpy.resize",
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"numpy.round",
"numpy.max",
"numpy.any",
"numpy.hstack",
"numpy.reshape",
"numpy.arange",
"numpy.sin",
"numpy.insert",
"numpy.zeros",
"numpy.log",
"numpy.array",
"numpy.sum",
"numpy.random.random",
"numpy.abs",
"numpy.random.standard_normal",
"numpy.cos",
"numpy.tile",
"numpy.ones",
"numpy.sign",
"numpy.shape",
"numpy.isscalar",
"numpy.prod",
"numpy.vstack"
],
[
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yxw027/LEOGPS
|
[
"8392f92567cd9a8ba1fb16ae071242d2cef90252"
] |
[
"codes/gpsxtr.py"
] |
[
"#!/usr/bin/env python3\n'''\n###############################################################################\n###############################################################################\n## ##\n## _ ___ ___ ___ ___ ___ ##\n## | | | __ / \\ / __| _ | __| ##\n## | |__| __ ( ) | (_ | _|__ \\ ##\n## |____|___ \\___/ \\___|_| \\___/ ##\n## v 0.2 (Alpha) ##\n## ##\n## FILE DESCRIPTION: ##\n## ##\n## Extraction of position information of GPS satellites across time. ##\n## Derivation of velocity of GPS satellites using first order derivative. ##\n## ##\n## INPUTS: ##\n## ##\n## Final Product SP3 file of GPS ephemeris, and CLK_30s file of GPS. ##\n## (Auto-download from IGS CDDIS if online, and if file is not present). ##\n## ##\n## OUTPUT: ##\n## ##\n## GPS dictionary of the following nested key-value pairs: ##\n## gpsdata = {epoch1:{1: {px:123, py:123, pz:123, ##\n## vx:123, vy:123, vz:123, ##\n## clkb:123, clkd:123}, ##\n## 2: {px:123, py:123, pz:123, ##\n## vx:123, vy:123, vz:123, ##\n## clkb:123, clkd:123}, ... ##\n## ... ... ... ... ... ... ##\n## 32:{px:123, py:123, pz:123, ##\n## vx:123, vy:123, vz:123, ##\n## clkb:123, clkd:123}} ... ##\n## epoch2:{1: {px:123, py:123, pz:123, ##\n## ... ... ... ... ... ...}}} ##\n## ##\n## REMARKS: Use only SP3 orbit format for GPS only (no multi-GNSS support) ##\n## ##\n## AUTHOR MODIFIED: 26-11-2019, by Samuel Y.W. Low ##\n## ##\n###############################################################################\n###############################################################################\n'''\n\nimport os\nimport shutil\nimport datetime\nimport warnings\nimport subprocess\nimport numpy as np\nimport urllib.request\n\n# IMPORT LOCAL LIBRARIES\nfrom codes import pubplt\n\n\n''' Now, this is the main routine that parses ephemeris and clock data '''\n\ndef gpsxtr(inps, tstart, tstop, tstep):\n \n warnings.simplefilter('ignore', np.RankWarning) # Ignore polyfit warnings\n \n ###########################################################################\n ###########################################################################\n \n ''' First, we initialise GPS ephemeris and clock download data. '''\n \n igsurl = 'ftp://cddis.nasa.gov/gnss/products/' # IGS CDDIS URL\n \n # We will download them into our directories, below, later on.\n cwd = inps['cwd'] # Get current main working directory\n iwd = cwd + '\\\\input\\\\' # Get directory for ephemeris / clock files\n os.chdir(cwd) # Ensure the user is running in the main directory.\n \n # Then, we must retrieve all number of days of GPS and CLK data needed \n days = (tstop.date() - tstart.date()).days + 1 # Number of days\n filelist = [] # Stores all the required GPS / CLK ephemeris files\n\t\n\t# This is the command line call to use gzip from codes folder:\n gzip_call = '\\\\utils\\\\gzip\\\\gzip.exe -d '\n \n ###########################################################################\n ###########################################################################\n \n ''' Now, we download GPS ephemeris and clock biases from IGS. '''\n \n # Now, check for desired clock files. If non-existent, download them.\n for d in range(-1,days+1):\n \n wd, wwww = gpsweekday( tstart + datetime.timedelta( days = d ) )\n name = 'igs' + wwww + wd # File name, without file extension\n clkurl = igsurl + wwww + '/' + name + '.clk_30s.Z' # URL of clock file\n filelist.append(name) # Add this into the list of files for parsing\n \n # Check for CLK file, and download + unzip it if needed.\n if d in range(0,days) and os.path.exists(iwd+name+'.clk_30s') != True:\n \n print('CLK file for ' + name + ' not found! Downloading now...')\n urllib.request.urlretrieve(clkurl, name + '.clk_30s.Z')\n print('Completed downloading the clock file! Now unzipping...')\n subprocess.call(cwd + gzip_call + name + '.clk_30s.Z')\n print('Files unzipped, moving them into the inputs folder.')\n shutil.move(cwd + '\\\\' + name + '.clk_30s', iwd+name + '.clk_30s')\n print('Unzipping completed! \\n') \n \n else:\n if d in range(0,days):\n print('CLK file for '+name+' found! Proceeding to process! \\n')\n \n ###########################################################################\n ###########################################################################\n \n ''' In this segment, we extract GPS clock information from IGS. '''\n \n # Now, we initialise the clock dictionary holding biases and drifts.\n \n gpsclks = {} # Initialise the dictionary that holds clock information.\n first_flag = True # Flag to mark the first reading of clock time\n \n for file in filelist[1:-1]:\n \n ''' Start reading each downloaded GPS clock (30s) file from IGS '''\n \n tc = datetime.timedelta(seconds=30) # Step size of clock file.\n f_clk = open(iwd + file + '.clk_30s', 'r') # Open up the SP3 file\n \n # Read the clock file.\n for line in f_clk:\n \n # Now, let's save those clock biases and drifts.\n if 'AS G' in line:\n \n line = line.split()\n \n yyyy, mm, dd = int(line[2]), int(line[3]), int(line[4])\n hh, mn, ss = int(line[5]), int(line[6]), int(float(line[7]))\n \n timenow = datetime.datetime(yyyy, mm, dd, hh, mn, ss)\n \n # Is the current clock readings in the user-defined time axis?\n if timenow <= tstop and timenow >= (tstart - tc):\n \n # Record the timing of the first epoch.\n if first_flag == True:\n first_time = timenow\n first_flag = False\n \n # Then, record the clock biases for each SV.\n SV = int(line[1][1:])\n clkbias = float(line[9])\n \n # Check if this SV has been recorded before.\n if SV not in gpsclks:\n gpsclks[SV] = {}\n \n # Check if this epoch has been recorded before.\n if timenow not in gpsclks[SV]:\n gpsclks[SV][timenow] = clkbias\n \n # Record the final time too. \n if timenow + tc > tstop and first_flag == False:\n final_time = timenow\n \n # Close the current CLK file, open the next one if any.\n f_clk.close()\n \n ###########################################################################\n ###########################################################################\n \n ''' In this segment, we interpolate missing clock biases. '''\n \n # Now let's reconstruct the time axis across clock biases.\n # We create two time axes, one using datetime objects, and the other using\n # seconds in integers. This may seem duplicated, but we need the integer\n # array in order to do interpolation, whereas the datetime array is used\n # for keeping track of which missing values are found in the time axis.\n # It is not optimal code, but preference is given to the readability of it.\n \n goodsats = [] # We initialise an array to hold satellites with clock info.\n clktime_d = first_time # Initialise the first time reading (datetime)\n clkaxis_d = [] # Axis for time (datetime) across clock bias values.\n clktime_s = 0 # Initialise the first time reading (seconds)\n clkaxis_s = [] # Axis for time (seconds) across clock bias values.\n \n while clktime_d <= final_time:\n clkaxis_d.append(clktime_d) # Time axis appending of datetime objects.\n clkaxis_s.append(clktime_s) # Time axis appending of integer objects.\n clktime_d = clktime_d + tc # Add time-delta of 30 seconds.\n clktime_s = clktime_s + 30 # Add integer 30 seconds.\n \n # Let's first report which GPS satellite clock biases are completely gone.\n for SV in range(1,33):\n if SV not in gpsclks:\n print('GPS SV ' + str(SV) + ' clock biases completely missing.')\n print('SV ' + str(SV) + ' will not be used in the solver. \\n')\n \n # We begin parsing through the GPS clocks dictionary to check for missing\n # clock values, and to interpolate them if possible.\n for SV in gpsclks:\n if SV not in goodsats:\n goodsats.append(SV)\n \n # Read the clock bias values for each SV as an array.\n clkbiases = list(gpsclks[SV].values()) # Clock bias values\n clkaxis_di = list(gpsclks[SV].keys()) # Recorded datetime objects\n \n # We need a time axis in floats, not datetimes, so we can do the\n # interpolation. Since the full time axis 'clkaxis_d' should be sorted\n # thanks to IGS, we can simplify our search of the sorted list by\n # matching the indices, instead of using brute force or binary search.\n clkaxis_si = [30.0 * clkaxis_d.index(t) for t in clkaxis_di]\n \n # Any SVs that were not recorded, will be not be entered in 'goodsats'.\n # However, there could be missing clock values scattered across time,\n # for a particular SV value. We should interpolate those values here.\n \n if len(clkaxis_si) != len(clkaxis_s):\n \n # Perform a least squares regression best fit line.\n bestfit = np.polyfit(clkaxis_si, clkbiases, 1)\n \n # Then, fill in all missing clock values.\n # Time axis 'clkaxis_d' contains all ideal epochs, without missing\n # values. We will compare its elements to the elements of the\n # epochs in the present 'timeaxisd', and if there are any missing\n # epochs, then we perform extrapolation or interpolation.\n \n for t in range(0,len(clkaxis_d)):\n \n # Now, we check which epochs in the time axis is missing.\n if clkaxis_d[t] not in clkaxis_di:\n \n # Extrapolate the clock bias.\n clkbias_extr = float(np.polyval(bestfit, clkaxis_s[t]))\n \n # Update this interpolated value in the 'gpsclks'\n gpsclks[SV][clkaxis_d[t]] = clkbias_extr\n \n # Sort the list of good satellites found.\n goodsats.sort() # Sort in ascending order\n \n ###########################################################################\n ###########################################################################\n \n ''' In this segment, we initialise the structure of our final output. '''\n \n # We initialise the structure of the final output dictionary.\n # gpsdata = {epoch1: { SV1: { 'px': ..., 'vz': ..., 'clkb':... } ...} ...}\n # Unlike gpsephm and gpsclks, the final gpsdata is indexed across time.\n gpsdata = {} # Final output object.\n ti = tstart # Initialise the time object.\n \n # Begin initialising the 'gpsdata' dictionary object;\n # (the final output of this program).\n while ti <= tstop:\n gpsdata[ti] = {}\n for p in goodsats:\n gpsdata[ti][p] = {}\n gpsdata[ti][p]['px'] = 0.0 # Position X of LEO at t = ti\n gpsdata[ti][p]['py'] = 0.0 # Position Y of LEO at t = ti\n gpsdata[ti][p]['pz'] = 0.0 # Position Z of LEO at t = ti\n gpsdata[ti][p]['vx'] = 0.0 # Velocity X of LEO at t = ti\n gpsdata[ti][p]['vy'] = 0.0 # Velocity Y of LEO at t = ti\n gpsdata[ti][p]['vz'] = 0.0 # Velocity Z of LEO at t = ti\n gpsdata[ti][p]['clkb'] = 0.0 # Clock bias of LEO at t = ti\n gpsdata[ti][p]['clkd'] = 0.0 # Clock drift of LEO at t = ti\n ti = ti + tstep # Update the time step.\n \n # From 'gpsdata' we can get the user-specified time axis in date-time.\n tstep_ss = tstep.seconds\n t_usr_dt = sorted(list(gpsdata.keys()))\n t_usr_ss = np.array(sorted([tstep_ss * t for t in range(0,len(t_usr_dt))]))\n \n ###########################################################################\n ###########################################################################\n \n ''' In this segment, we extract GPS precise ephemerides from IGS. '''\n \n # Now, check for desired ephemeris files. If non-existent, download them.\n for d in range(-1,days+1):\n \n wd, wwww = gpsweekday( tstart + datetime.timedelta( days = d ) )\n name = 'igs' + wwww + wd # File name, without file extension\n ephurl = igsurl + wwww + '/' + name + '.sp3.Z' # URL of ephemeris file\n \n # Check for SP3 ephemeris file, and download + unzip it if needed.\n if os.path.exists(iwd+name+'.sp3') != True:\n \n print('SP3 file for '+ name +' not found! Attempt download now...')\n urllib.request.urlretrieve(ephurl, name + '.sp3.Z')\n print('Completed downloading the ephemeris file! Now unzipping...')\n subprocess.call(cwd + gzip_call + name + '.sp3.Z')\n print('Files unzipped, moving them into the inputs folder.')\n shutil.move(cwd + '\\\\' + name + '.sp3', iwd + name + '.sp3')\n print('Unzipping completed! \\n')\n \n else:\n \n print('SP3 file for ' + name + ' found! Proceeding to process! \\n')\n\n # We download the GPS precise ephemerides one day before and after,\n # to prevent the occurrence of Runge's phenomenon by adding buffer in the\n # extrapolation process.\n \n # This will be where ephemeris data will be parsed into.\n # gpsphm = {SV: {epoch1: {'px':xxx, 'py':yyy ... 'vy':vyy, 'vz',vzz}}}\n gpsephm = {} # Ephemeris dictionary.\n for SV in goodsats:\n gpsephm[SV] = {}\n \n # All epochs after 'gps_tstart' will be recorded from IGS ephemeris file.\n gps_tstart = tstart - datetime.timedelta(seconds = 7200) # Minus 02:00:00\n \n # All epochs will be recorded until 'gps_tstop', inclusive.\n gps_tstop = tstop + datetime.timedelta(seconds = 7200) # Add 02:00:00\n \n # Time step in the IGS precise ephemeris file.\n gps_tstep = datetime.timedelta(seconds = 900)\n \n # Flag to mark the first reading of ephemeris time\n first_flag = True\n \n # Now we parse through all the downloaded IGS files.\n for file in filelist:\n \n ''' Start reading each downloaded GPS ephemeris file from IGS '''\n \n f_gps = open(iwd + file + '.sp3', 'r') # Open up the SP3 file\n gps_record = False # Trigger for recording GPS data\n \n for line in f_gps:\n \n # Split up the strings\n line = line.split()\n \n # Skip blank lines\n if len(line) <= 1:\n continue\n \n # Check if the time reading is accurate...\n if line[0] == '*':\n \n # Read the time reading now, save it as a datetime object.\n timenow = datetime.datetime(int(line[1]),\n int(line[2]),\n int(line[3]),\n int(line[4]),\n int(line[5]),\n int(float(line[6])))\n \n # ... only if the epoch falls within our desired range.\n if timenow <= gps_tstop and timenow > gps_tstart - gps_tstep:\n gps_record = True # Trigger the recording on.\n \n # Record the first ever epoch.\n if first_flag == True:\n first_time = timenow\n first_flag = False\n \n else:\n gps_record = False # Trigger the recording off.\n \n # Now, check for XYZ coordinates of each SV ID\n if 'PG' in line[0] and gps_record == True:\n \n # Get the data we need below.\n SV = int(line[0][2:4]) # Which GPS satellite is this?\n x = float(line[1]) # X-coordinate position\n y = float(line[2]) # Y-coordinate position\n z = float(line[3]) # Z-coordinate position\n \n # Check if this SV has clean clock values.\n if SV in goodsats:\n \n # Check if this is the first epoch.\n if timenow not in gpsephm[SV]:\n gpsephm[SV][timenow] = {}\n \n # Assign the data into the gpsephm dictionary.\n gpsephm[SV][timenow] = {} # Initialise...\n gpsephm[SV][timenow]['px'] = x # Position X (ECEF)\n gpsephm[SV][timenow]['py'] = y # Position Y (ECEF)\n gpsephm[SV][timenow]['pz'] = z # Position Z (ECEF)\n \n # Initialise the velocity values too, for estimation.\n gpsephm[SV][timenow]['vx'] = 0.0 # Velocity X (ECEF)\n gpsephm[SV][timenow]['vy'] = 0.0 # Velocity Y (ECEF)\n gpsephm[SV][timenow]['vz'] = 0.0 # Velocity Z (ECEF)\n \n f_gps.close()\n \n ###########################################################################\n ###########################################################################\n \n ''' In this segment, we interpolate the GPS precise ephemeris. '''\n \n print('We now interpolate the GPS precise ephemeris. \\n')\n \n # We need to add about two hours of buffer before and after the validity\n # period for interpolation. See research paper \"Polynomial interpolation\n # of GPS satellite coordinates\" by Milan Horemuz (Feb 2006).\n # Thus, we use a sliding window interpolation, with fit interval of 4h,\n # and a validity window in the middle of 15 minutes only, leaving the rest\n # of the 4h as interpolation buffer to prevent Runge's phenomenon.\n \n # Our first task is to generate the IGS ephemeris time axis.\n gpsephm_epoch_dt = [] # List of datetime objects.\n gpsephm_epoch_ss = [] # List of objects in integer seconds.\n gpsephm_dt = first_time # First epoch, as a datetime object.\n gpsephm_ss = 0 # First epoch but in integer seconds.\n \n # Fill the list.\n while gpsephm_dt <= gps_tstop:\n gpsephm_epoch_dt.append(gpsephm_dt)\n gpsephm_epoch_ss.append(gpsephm_ss)\n gpsephm_dt = gpsephm_dt + gps_tstep\n gpsephm_ss = gpsephm_ss + 900\n \n # We also initialise the starting time in GPS ephemeris interpolation.\n # This block of code is meant to create a time axis in integer seconds.\n t_offset_eph = (tstart-first_time).days*86400 + (tstart-first_time).seconds\n \n # We offset the interpolant time axis with the time axis offset.\n t_usr_eph = sorted(t_usr_ss + t_offset_eph)\n \n # Also, it would be wise to check if the IGS precise ephemerides had any\n # entries with missing GPS epochs. We simply check the lengths of arrays.\n for p in gpsephm:\n if len(gpsephm_epoch_dt) != len(list(gpsephm[p].keys())):\n print('Warning! IGS ephemerides missing epochs for SV ' + str(p))\n print('Error will occur during interpolation process!')\n \n # The interpolation now starts when iterable 't' > gpsephm_epoch_dt[0],\n # the first element of the array, and it will end when 't' exceeds 'tstop',\n # the user defined ending of the time axis.\n \n # We loop through each fit interval first, interpolate it, and then move\n # our sliding window interpolator every 'window_len' of 2 hours.\n \n coords = ['x', 'y', 'z'] # Position coordinate keys in dictionary.\n windex = 0 # Window index keeps track of sliding window interpolant.\n \n # Sanity check, GPS orbits must be at least 4 hours long for interpolation.\n if len(gpsephm_epoch_dt) <= 17:\n print('Warning! GPS satellite interpolation is not possible!')\n print('Duration of time for orbit interpolation is too short!')\n print('Please use at least 2 hours of duration in scenario!')\n print('Returning an error... \\n')\n return {}, []\n \n while gpsephm_epoch_dt[windex+16] != gpsephm_epoch_dt[-1]:\n \n # First let us get the fit and validity window (as datetime objects).\n fit_dt = gpsephm_epoch_dt[windex : windex + 17]\n val_dt = gpsephm_epoch_dt[windex + 7 : windex + 11]\n \n # Then we get the equivalent windows in terms of integer seconds.\n fit_ss = gpsephm_epoch_ss[windex : windex + 17]\n val_ss = gpsephm_epoch_ss[windex + 7 : windex + 11]\n \n # Now we generate the interpolated time axis.\n intp_dt = [t for t in t_usr_dt if t <= val_dt[-1] and t >= val_dt[0]]\n intp_ss = [t for t in t_usr_eph if t <= val_ss[-1] and t >= val_ss[0]]\n \n # Numpy-rize the arrays, and also create a + 1ms time axis.\n # 'intp_ss_delta' will be used for velocity estimation (1st order)\n intp_ss = np.array(intp_ss)\n intp_ss_delta = intp_ss + 0.00001 # Add one ms\n \n # In this sliding window filter, intepolate across SVs.\n for SV in goodsats:\n \n # For each axes in the coordinate frame...\n for c in coords:\n \n # Get corresponding fit and validity intervals for ephemerides.\n gpsephm_fit = [gpsephm[SV][t]['p'+c] for t in fit_dt]\n \n # Perform 16th Order Polyfit Interpolation.\n poly = np.polyfit( fit_ss, gpsephm_fit, 16 )\n \n # Now, we can perform the interpolation for positions.\n gpsephm_intp = np.polyval(poly, intp_ss)\n \n # We also exploit the interpolation for velocity estimation.\n gpsephm_intp_delta = np.polyval(poly, intp_ss_delta)\n gpsephm_intv = (gpsephm_intp_delta - gpsephm_intp) * 100000\n \n # Add the interpolated positions and velocities into 'gpsdata'.\n for k in range(0,len(gpsephm_intp)):\n \n gpsdata[intp_dt[k]][SV]['p'+c] = gpsephm_intp[k] # Pos\n gpsdata[intp_dt[k]][SV]['v'+c] = gpsephm_intv[k] # Vel\n \n # Check if we have reached the end of the while loop.\n if gpsephm_epoch_dt[windex+16] == gpsephm_epoch_dt[-1]:\n break # End the while loop if we are.\n else:\n windex += 1 # Update the sliding window filter by 2 hours\n \n ###########################################################################\n ###########################################################################\n \n ''' Finally, we interpolate clock biases to the user's time axis. '''\n \n print('Interpolation of GPS precise ephemerides done!')\n print('Now interpolating GPS clock biases. \\n')\n \n # We also initialise the starting time in GPS clock biases interpolation.\n # This block of code is meant to create a time axis in integer seconds.\n \n # From 'gpsdata' we can get the user-specified time axis in date-time.\n tstep_ss = tstep.seconds\n t_usr_dt = sorted(list(gpsdata.keys()))\n t_usr_ss = np.array(sorted([tstep_ss * t for t in range(0,len(t_usr_dt))]))\n \n # We then adjust the time axis of the user's to interpolate for 'clkb'.\n first_time = sorted(list(gpsclks[goodsats[0]].keys()))[0]\n t_offset_clk = (tstart-first_time).days*86400 + (tstart-first_time).seconds\n t_usr_clk = t_usr_ss + t_offset_clk\n \n # Now, start going through each SV for clock bias interpolation. \n for SV in goodsats:\n \n # Read the clock bias values for each SV as an array.\n clkaxis_di = list(gpsclks[SV].keys()) # Recorded datetime objects.\n clkaxis_si = [30 * t for t in range(0,len(clkaxis_di))]\n clkbiases = list(gpsclks[SV].values()) # Clock bias values.\n clkbiases = [x for _,x in sorted(zip(clkaxis_di, clkbiases))]\n clk_bound = 0 # Lower bound index for clock bias.\n lower_bound = clkaxis_si[ clk_bound ] # Nominal lower bound time (sec).\n \n for k in range(0,len(t_usr_clk)):\n tss = t_usr_clk[k] # Epoch in seconds, on user-defined time axis.\n tdt = t_usr_dt[k] # Epoch in datetime, on user-defined time axis.\n \n while clkaxis_si[ clk_bound ] + 30 <= tss:\n clk_bound += 1 # Check the next time-bound in gpsclks.\n lower_bound = clkaxis_si[ clk_bound ] # Update the bound.\n \n # Now we perform the interpolation. First, we need the clock drift.\n if clk_bound + 1 < len(clkbiases):\n \n # Get the clock drift as the gradient between two clock biases.\n clkdrift = (clkbiases[clk_bound+1] - clkbiases[clk_bound]) / 30\n \n # Get the desired first order Delta-T.\n delta_tt = tss - lower_bound\n \n # Interpolate using (y + delta_y) = x + (drift * delta_x)\n clkbias_interp = clkbiases[ clk_bound ] + clkdrift*( delta_tt )\n \n # Append interpolated clock biases into 'gpsdata' output.\n gpsdata[tdt][SV]['clkb'] = clkbias_interp\n gpsdata[tdt][SV]['clkd'] = clkdrift\n \n ###########################################################################\n ###########################################################################\n \n ''' In a final step, we plot the GPS ephemeris and clock biases. '''\n \n # First, check if the user wishes to plot GPS ephemeris and clock biases.\n savefigs = inps['savefigs'] # User-defined option to save plots\n saverept = inps['savereport'] # User-defined option to save report\n \n # If so, then continue to save the final output report on GPS ephemeris.\n if saverept == 'True':\n print('Saving output report on interpolated GPS ephemerides. \\n')\n pubplt.gps_report(gpsdata, goodsats, inps)\n \n # ... as well as ephemeris and clock plots.\n if savefigs == 'True':\n print('Saving plots on GPS position, velocity and clock biases. \\n')\n for SV in goodsats:\n pubplt.gps_graphs(SV, t_usr_dt, t_usr_ss, gpsdata, inps)\n \n ###########################################################################\n ###########################################################################\n \n return gpsdata, goodsats\n\n'''' We define a function that returns the day-of-week and the GPS week. '''\n\ndef gpsweekday(t):\n \n # Logic below calculates the desired GPS day and week number.\n wkday = (t.weekday() + 1) % 7 # Weekday from Python to GPST\n GPST_epoch = datetime.date(1980,1,6) # Date of GPST epoch\n user_epoch = t.date() # Get the date of the input time\n GPST_epoch_Monday = GPST_epoch - datetime.timedelta(GPST_epoch.weekday())\n user_epoch_Monday = user_epoch - datetime.timedelta(user_epoch.weekday())\n wwww = int(((user_epoch_Monday-GPST_epoch_Monday).days/7)-1) # GPS week\n \n # Algorithmic correction to the above logic.\n if wkday == 0:\n wwww += 1\n \n return str(wkday), str(wwww)"
] |
[
[
"numpy.polyfit",
"numpy.array",
"numpy.polyval"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
0oshowero0/COVID19-urban-mobility-model
|
[
"b3837deaa4dc32cf4f8627a86a05031378a96d88",
"b3837deaa4dc32cf4f8627a86a05031378a96d88"
] |
[
"india/simulate_ode_fit_thane_BO.py",
"us/simulate_ode_fit_philadelphia_BO_1521_try.py"
] |
[
"import numpy as np\nfrom multiprocessing import Pool\nfrom datetime import datetime\nfrom argparse import ArgumentParser\nfrom COVID_Model import City\nfrom pathlib import Path\nimport json\nfrom bayes_opt import BayesianOptimization\nimport pandas as pd\nimport setproctitle\nsetproctitle.setproctitle('Thane_SEIR@hanzhenyu')\n\nMULTI_PROCESSING = 20\n\ndef load_pop(data_dir):\n pop = np.load(data_dir).reshape(-1, 1).astype('float32')\n std = pop[pop>100].std()\n mean = pop[pop>100].mean()\n upper_bond = mean + 3*std\n pop = np.where(pop>upper_bond, upper_bond, pop)\n return pop\n\ndef load_cases(data_dir, name):\n data = pd.read_csv(data_dir)\n #cases = data[data['District']==name].iloc[:,1:].to_numpy().reshape(-1, 1).astype('float32')\n cases = data[data['District']==name]\n return cases\n\n\ndef setup_args(parser=None):\n \"\"\" Set up arguments\n\n return:\n python dictionary\n \"\"\"\n if parser is None:\n parser = ArgumentParser()\n # 定义基本参数\n parser.add_argument('--city_name', default='Thane', help='City Name')\n parser.add_argument('--save_dir', default='./simulated_results_Thane_256/', help='Result Loc')\n parser.add_argument('--population_data', default='./population/thane_pop.npy', help='Loc of pop data')\n parser.add_argument('--cases_data', default='./cases/India_epidemic_district_timeline.csv', help='Loc of cases data')\n parser.add_argument('--units', default=256, help='Unit Num', type=int)\n parser.add_argument('--unit_distance', default=1, help='Unit distance between blocks(km)', type=int)\n parser.add_argument('--start_date', default='2021-02-24', help='Start Time')\n parser.add_argument('--change_point', default='2021-04-21', help='Interval of cases')\n parser.add_argument('--final_date', default='2021-07-31', help='End Time')\n parser.add_argument('--sample_rate', default=4, help='Sample Rate of cases curve')\n\n parser.add_argument('--Pi', default=5*2.7913484249081293e-05, help='transmission rate of I (to fit)', type=float)\n parser.add_argument('--Pe', default=5*2.7913484249081293e-05, help='transmission rate of E (the same with Pi)', type=float)\n parser.add_argument('--PE', default=0.3, help='probability of a health people to be E when get infected', type=float)\n parser.add_argument('--e_to_i', default=1 / 5.2, help='probability of the E turn to I', type=float)\n parser.add_argument('--i_to_r', default=1 / 14, help='recover rate of I', type=float)\n\n parser.add_argument('--mobility', default=0.4, help='Mobility Param (to fit)', type=float)\n parser.add_argument('--early_detect', default=0, help='early detect rate (to fit; to accelerate I to R)', type=float)\n\n parser.add_argument('--self_quarantine', default=0, help='Self Quarantine of S (deprecated, not actually use)', type=float)\n parser.add_argument('--ki_disc', default=1, help='mobility discount of I when moving (deprecated, not actually use)', type=float)\n parser.add_argument('--ke_disc', default=1, help='mobility discount of E when moving (deprecated, not actually use)', type=float)\n parser.add_argument('--Pi_disc', default=1, help='discount of transmission rate of I (deprecated, not actually use)', type=float)\n parser.add_argument('--Pe_disc', default=1, help='discount of transmission rate of E (deprecated, not actually use)', type=float)\n\n return parser\n\n\n\n\ndef multi_process_fit(process_i,fit_epoch):\n parser = setup_args()\n opt = vars(parser.parse_args())\n output_dir = Path(opt['save_dir'])\n output_dir.mkdir(exist_ok=True,parents=True)\n ################################################################################\n # Load Data\n pop_data = load_pop(opt['population_data'])\n cases_data = load_cases(opt['cases_data'],opt['city_name'])\n\n start_index = np.where(cases_data.columns == opt['start_date'])[0]\n change_index = np.where(cases_data.columns == opt['change_point'])[0]\n final_index = np.where(cases_data.columns == opt['final_date'])[0]\n\n # Sampling epidemic curve\n origin_x = np.linspace(0, cases_data.shape[1]-1, num=cases_data.shape[1]-1, endpoint=False)\n num_new_points = int((cases_data.shape[1]-1)/opt['sample_rate'])\n resample_x = np.linspace(0, cases_data.shape[1]-1, num=num_new_points, endpoint=False)\n cases_resample = np.interp(x=resample_x, xp=origin_x, fp=cases_data.iloc[:,1:].to_numpy().reshape(-1))\n new_start_index = int(start_index / opt['sample_rate'])\n new_change_index = int(change_index / opt['sample_rate'])\n new_final_index = int(final_index / opt['sample_rate'])\n\n cases_data_processed = []\n cases_data_processed.append(cases_resample[new_start_index:new_change_index])\n cases_data_processed.append(cases_resample[new_change_index:new_final_index])\n\n # Set bias of cases number\n cases_bias = cases_resample[new_start_index]\n # Set active cases\n init_cases_num = np.diff(cases_data.iloc[:,(int(start_index)-3):int(start_index)]).sum()\n opt['cases_bias'] = cases_bias\n opt['init_cases_num'] = int(init_cases_num)\n\n\n\n\n optimizers = []\n # Fit first part\n city = City(opt)\n city.setPopCases(pop_data, cases_data_processed[0])\n city.init_blocks(pop_data, manual_init_case=True)\n\n pbounds = {'pi': (0, 0.0006), 'early_detect': (0, 1), 'mobility': (0, 0.0003)}\n\n optimizer = BayesianOptimization(\n f=city.fit,\n pbounds=pbounds,\n )\n optimizer.maximize(\n init_points=20,\n n_iter=fit_epoch,\n )\n optimizers.append(optimizer)\n\n # Fit second part\n opt['Pi'] = optimizers[0].max['params']['pi']\n opt['Pe'] = optimizers[0].max['params']['pi']\n opt['early_detect'] = optimizers[0].max['params']['early_detect']\n opt['mobility'] = optimizers[0].max['params']['mobility']\n city = City(opt)\n city.setPopCases(pop_data, cases_data_processed[0])\n city.init_blocks(pop_data,manual_init_case=True)\n S_number, E_number, I_number, R_number, new_spread = city.begin_simulate(len(cases_data_processed[0]),fit=True)\n\n new_pop = city.get_blk_pop()\n city.setPopCases(new_pop, cases_data_processed[1])\n city.make_check_point(float(new_spread.cumsum()[-1]))\n\n pbounds = {'pi': (0, 0.0006),'early_detect': (0, 1), 'mobility': (0, optimizer.max['params']['mobility'])}\n\n optimizer = BayesianOptimization(\n f=city.fit_second,\n pbounds=pbounds\n )\n optimizer.maximize(\n init_points=20,\n n_iter=fit_epoch,\n )\n\n optimizers.append(optimizer)\n\n\n # Forward\n city = City(opt)\n city.setPopCases(pop_data, cases_data_processed[0])\n city.init_blocks(pop_data, manual_init_case=True)\n opts = []\n for optimizer in optimizers:\n opt = {'Pi': optimizer.max['params']['pi'], 'early_detect': optimizer.max['params']['early_detect'],\n 'mobility': optimizer.max['params']['mobility']}\n opts.append(opt)\n\n new_spread = city.begin_simulate_multi_parted(opts, cases_data_processed,output_dir.joinpath('result_' + str(process_i).zfill(2) + '.png'),fit=False)\n\n\n i = 0 \n total_opt = {}\n for opt in opts:\n total_opt['opt'+str(i)] = opt\n\n\n with open(output_dir.joinpath('opt_params_' + str(process_i).zfill(2) + '.json'), 'w') as f:\n json.dump(opts, f)\n\n np.save(output_dir.joinpath('result_curve_' + str(process_i).zfill(2) + '.npy'), new_spread.reshape(-1))\n\n\n\n\nif __name__ == \"__main__\":\n fit_num = 40\n p = Pool(MULTI_PROCESSING)\n\n result = [p.apply_async(multi_process_fit, args=(i,200)) for i in range(fit_num)]\n\n for i in result:\n i.get()\n\n\n print(datetime.now())",
"import numpy as np\nfrom multiprocessing import Pool\nfrom datetime import datetime\nfrom argparse import ArgumentParser\nfrom COVID_Model import City\nfrom pathlib import Path\nimport json\nfrom bayes_opt import BayesianOptimization\nimport setproctitle\nsetproctitle.setproctitle('Philadelphia_SEIR@hanzhenyu')\n\n\nMULTI_PROCESSING = 20\n\ndef load_pop(data_dir):\n pop = np.load(data_dir).reshape(-1, 1).astype('float32')\n std = pop[pop>100].std()\n mean = pop[pop>100].mean()\n upper_bond = mean + 2*std\n pop = np.where(pop>upper_bond, upper_bond, pop)\n return pop\n\ndef load_cases(data_dir):\n cases = np.load(data_dir).reshape(-1, 1).astype('float32')\n return cases\n\n\ndef setup_args(parser=None):\n \"\"\" Set up arguments\n\n return:\n python dictionary\n \"\"\"\n if parser is None:\n parser = ArgumentParser()\n # Default Params\n parser.add_argument('--population_data', default='./population/philadelphia_pop.npy', help='Loc of pop data')\n parser.add_argument('--cases_data', default='./cases/philadelphia.npy', help='Loc of cases data')\n parser.add_argument('--units', default=1521, help='Unit Num', type=int)\n parser.add_argument('--unit_distance', default=1, help='Unit distance between blocks(km)', type=int)\n parser.add_argument('--init_cases', default=10, help='Init cases', type=int)\n\n parser.add_argument('--Pi', default=5*2.7913484249081293e-05, help='transmission rate of I (to fit)', type=float)\n parser.add_argument('--Pe', default=5*2.7913484249081293e-05, help='transmission rate of E (the same with Pi)', type=float)\n parser.add_argument('--PE', default=0.3, help='probability of a health people to be E when get infected', type=float)\n parser.add_argument('--e_to_i', default=1 / 5.2, help='probability of the E turn to I', type=float)\n parser.add_argument('--i_to_r', default=1 / 14, help='recover rate of I', type=float)\n\n parser.add_argument('--mobility', default=0.4, help='Mobility Param (to fit)', type=float)\n parser.add_argument('--early_detect', default=0, help='early detect rate (to fit; to accelerate I to R)', type=float)\n\n parser.add_argument('--self_quarantine', default=0, help='Self Quarantine of S (deprecated, not actually use)', type=float)\n parser.add_argument('--ki_disc', default=1, help='mobility discount of I when moving (deprecated, not actually use)', type=float)\n parser.add_argument('--ke_disc', default=1, help='mobility discount of E when moving (deprecated, not actually use)', type=float)\n parser.add_argument('--Pi_disc', default=1, help='discount of transmission rate of I (deprecated, not actually use)', type=float)\n parser.add_argument('--Pe_disc', default=1, help='discount of transmission rate of E (deprecated, not actually use)', type=float)\n\n return parser\n\n\n\n\ndef multi_process_fit(i,fit_epoch):\n output_dir = Path('./simulated_results_Philadelphia_1521/')\n output_dir.mkdir(exist_ok=True,parents=True)\n\n parser = setup_args()\n opt = vars(parser.parse_args())\n ################################################################################\n # Load Data\n pop_data = load_pop(opt['population_data'])\n cases_data = load_cases(opt['cases_data'])\n cases_data_first = cases_data[54:70-6+7]\n cases_data_second = cases_data[70-6+7:]\n\n\n\n error = 99999999999999999999\n while error > 0.5:\n error = 0\n # Fit first part\n city = City(opt)\n city.setPopCases(pop_data, cases_data_first)\n city.init_blocks(pop_data)\n\n pbounds = {'pi': (0, 0.0006), 'early_detect': (0, 1), 'mobility': (0, 0.0003)}\n\n optimizer1 = BayesianOptimization(\n f=city.fit,\n pbounds=pbounds,\n )\n optimizer1.maximize(\n init_points=20,\n n_iter=fit_epoch,\n )\n\n # Fit second part\n opt['Pi'] = optimizer1.max['params']['pi']\n opt['Pe'] = optimizer1.max['params']['pi']\n opt['early_detect'] = optimizer1.max['params']['early_detect']\n opt['mobility'] = optimizer1.max['params']['mobility']\n city = City(opt)\n city.setPopCases(pop_data, cases_data_first)\n city.init_blocks(pop_data)\n S_number, E_number, I_number, R_number, new_spread = city.begin_simulate(len(cases_data_first),fit=True)\n\n new_pop = city.get_blk_pop()\n city.setPopCases(new_pop, cases_data_second)\n city.make_check_point(float(new_spread.cumsum()[-1]))\n\n error = 99999999999999999999\n while error > 0.5:\n error = 0\n pbounds = { 'pi': (0, 0.0006),'early_detect': (0, 1), 'mobility': (0,optimizer1.max['params']['mobility'])}\n\n optimizer2 = BayesianOptimization(\n f=city.fit_second,\n pbounds=pbounds\n )\n optimizer2.maximize(\n init_points=20,\n n_iter=fit_epoch,\n )\n\n # Forward use fitted params.\n city = City(opt)\n city.setPopCases(pop_data, cases_data_first)\n city.init_blocks(pop_data)\n opt1 = {'Pi': optimizer1.max['params']['pi'], 'early_detect': optimizer1.max['params']['early_detect'],\n 'mobility': optimizer1.max['params']['mobility']}\n opt2 = {'Pi': optimizer2.max['params']['pi'], 'early_detect': optimizer2.max['params']['early_detect'],\n 'mobility': optimizer2.max['params']['mobility']}\n\n new_spread,r0 = city.begin_simulate_two_parted(opt1, opt2, cases_data_first, cases_data_second,\n output_dir.joinpath('result_' + str(i).zfill(2) + '.png'),fit=False)\n\n\n\n opts = {'opt1': {'pi': optimizer1.max['params']['pi'], 'early_detect': optimizer1.max['params']['early_detect'], \\\n 'mobility': optimizer1.max['params']['mobility']}, \\\n 'opt2': {'pi': optimizer2.max['params']['pi'], 'early_detect': optimizer2.max['params']['early_detect'], \\\n 'mobility': optimizer2.max['params']['mobility']}\n }\n\n\n with open(output_dir.joinpath('opt_params_' + str(i).zfill(2) + '.json'), 'w') as f:\n json.dump(opts, f)\n\n np.save(output_dir.joinpath('result_curve_' + str(i).zfill(2) + '.npy'), new_spread.reshape(-1))\n np.save(output_dir.joinpath('result_r0_' + str(i).zfill(2) + '.npy'), r0.reshape(-1))\n\n\n\nif __name__ == \"__main__\":\n fit_num = 40\n p = Pool(MULTI_PROCESSING)\n\n result = [p.apply_async(multi_process_fit, args=(i,200)) for i in range(fit_num)]\n\n for i in result:\n i.get()\n\n\n\n print(datetime.now())"
] |
[
[
"numpy.load",
"pandas.read_csv",
"numpy.where",
"numpy.linspace"
],
[
"numpy.load",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frk2/bullet3
|
[
"225d823e4dc3f952c6c39920c3f87390383e0602",
"225d823e4dc3f952c6c39920c3f87390383e0602",
"225d823e4dc3f952c6c39920c3f87390383e0602",
"225d823e4dc3f952c6c39920c3f87390383e0602",
"225d823e4dc3f952c6c39920c3f87390383e0602",
"225d823e4dc3f952c6c39920c3f87390383e0602"
] |
[
"examples/pybullet/examples/rendertest_sync.py",
"examples/pybullet/gym/pybullet_envs/minitaur/agents/ppo/utility.py",
"examples/pybullet/gym/pybullet_envs/bullet/minitaur_gym_env.py",
"examples/pybullet/gym/pybullet_envs/deep_mimic/env/pybullet_deep_mimic_env.py",
"examples/pybullet/gym/pybullet_envs/deep_mimic/learning/ppo_agent.py",
"examples/pybullet/gym/pybullet_envs/examples/enjoy_TF_HumanoidBulletEnv_v0_2017may.py"
] |
[
"#!/usr/bin/env python\nimport os, logging, gym\nfrom baselines import logger\nfrom baselines.common import set_global_seeds\nfrom baselines.common.misc_util import boolean_flag\nfrom baselines import bench\nfrom baselines.a2c.a2c import learn\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom baselines.common.vec_env.vec_frame_stack import VecFrameStack\nimport time\n\nimport gym\nfrom gym import spaces\nimport pybullet as p\nfrom itertools import cycle\nimport numpy as np\n\ncamTargetPos = [0,0,0]\ncameraUp = [0,0,1]\ncameraPos = [1,1,1]\npitch = -10.0\nroll=0\nupAxisIndex = 2\ncamDistance = 4\npixelWidth = 320\npixelHeight = 200\nnearPlane = 0.01\nfarPlane = 100\nfov = 60\n\n\nclass TestEnv(gym.Env):\n def __init__(self,\n renderer = 'tiny', # ('tiny', 'egl', 'debug')\n ):\n self.action_space = spaces.Discrete(2)\n self.iter = cycle(range(0,360,10))\n\n # how we want to show\n assert renderer in ('tiny', 'egl', 'debug','plugin')\n self._renderer = renderer\n self._render_width = 84\n self._render_height = 84\n # connecting\n if self._renderer == \"tiny\" or self._renderer == \"plugin\":\n optionstring='--width={} --height={}'.format(self._render_width,self._render_height)\n p.connect(p.DIRECT, options=optionstring)\n\n if self._renderer == \"plugin\":\n plugin_fn = os.path.join(p.__file__.split(\"bullet3\")[0],\"bullet3/build/lib.linux-x86_64-3.5/eglRenderer.cpython-35m-x86_64-linux-gnu.so\")\n plugin = p.loadPlugin(plugin_fn,\"_tinyRendererPlugin\")\n if plugin < 0:\n print(\"\\nPlugin Failed to load! Try installing via `pip install -e .`\\n\")\n sys.exit()\n print(\"plugin =\",plugin)\n\n elif self._renderer == \"egl\":\n optionstring='--width={} --height={}'.format(self._render_width,self._render_height)\n optionstring += ' --window_backend=2 --render_device=0'\n p.connect(p.GUI, options=optionstring)\n\n elif self._renderer == \"debug\":\n #print(\"Connection: SHARED_MEMORY\")\n #cid = p.connect(p.SHARED_MEMORY)\n #if (cid<0):\n cid = p.connect(p.GUI)\n p.resetDebugVisualizerCamera(1.3,180,-41,[0.52,-0.2,-0.33])\n\n p.configureDebugVisualizer(p.COV_ENABLE_GUI,0)\n p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW,0)\n p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW,0)\n p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW,0)\n\n\n def __del__(self):\n p.disconnect()\n\n def reset(self):\n pass\n\n def step(self,action):\n p.stepSimulation()\n start = time.time()\n yaw = next(self.iter)\n viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)\n aspect = pixelWidth / pixelHeight;\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(pixelWidth, pixelHeight, viewMatrix,\n projectionMatrix, shadow=1,lightDirection=[1,1,1],\n renderer=p.ER_BULLET_HARDWARE_OPENGL)\n #renderer=pybullet.ER_TINY_RENDERER)\n self._observation = img_arr[2]\n return np.array(self._observation), 0, 0, {}\n\n def seed(self, seed=None):\n pass\n\ndef train(env_id, num_timesteps=300, seed=0,num_env=2,renderer='tiny'):\n def make_env(rank):\n def _thunk():\n if env_id == \"TestEnv\":\n env = TestEnv(renderer=renderer) #gym.make(env_id)\n else:\n env = gym.make(env_id)\n env.seed(seed + rank)\n env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))\n gym.logger.setLevel(logging.WARN)\n # only clip rewards when not evaluating\n return env\n return _thunk\n set_global_seeds(seed)\n env = SubprocVecEnv([make_env(i) for i in range(num_env)])\n\n env.reset()\n start = time.time()\n for i in range(num_timesteps):\n action = [env.action_space.sample() for _ in range(num_env)]\n env.step(action)\n stop = time.time()\n duration = (stop - start)\n if (duration):\n fps = num_timesteps/duration\n else:\n fps=0\n env.close()\n return num_env, fps\n\n\nif __name__ == \"__main__\":\n env_id = \"TestEnv\"\n res = []\n\n for renderer in ('tiny','plugin', 'egl'):\n for i in (1,8):\n tmp = train(env_id,num_env=i,renderer=renderer)\n print(renderer,tmp)\n res.append((renderer,tmp))\n print()\n print(\"rendertest_sync.py\")\n print(\"back nenv fps fps_tot\")\n for renderer,i in res:\n print(renderer,'\\t', i[0],round(i[1]),'\\t',round(i[0]*i[1]))\n",
"# Copyright 2017 The TensorFlow Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for the PPO algorithm.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport re\n\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\n\ndef create_nested_vars(tensors):\n \"\"\"Create variables matching a nested tuple of tensors.\n\n Args:\n tensors: Nested tuple of list of tensors.\n\n Returns:\n Nested tuple or list of variables.\n \"\"\"\n if isinstance(tensors, (tuple, list)):\n return type(tensors)(create_nested_vars(tensor) for tensor in tensors)\n return tf.Variable(tensors, False)\n\n\ndef reinit_nested_vars(variables, indices=None):\n \"\"\"Reset all variables in a nested tuple to zeros.\n\n Args:\n variables: Nested tuple or list of variaables.\n indices: Indices along the first dimension to reset, defaults to all.\n\n Returns:\n Operation.\n \"\"\"\n if isinstance(variables, (tuple, list)):\n return tf.group(*[\n reinit_nested_vars(variable, indices) for variable in variables])\n if indices is None:\n return variables.assign(tf.zeros_like(variables))\n else:\n zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list())\n return tf.scatter_update(variables, indices, zeros)\n\n\ndef assign_nested_vars(variables, tensors):\n \"\"\"Assign tensors to matching nested tuple of variables.\n\n Args:\n variables: Nested tuple or list of variables to update.\n tensors: Nested tuple or list of tensors to assign.\n\n Returns:\n Operation.\n \"\"\"\n if isinstance(variables, (tuple, list)):\n return tf.group(*[\n assign_nested_vars(variable, tensor)\n for variable, tensor in zip(variables, tensors)])\n return variables.assign(tensors)\n\n\ndef discounted_return(reward, length, discount):\n \"\"\"Discounted Monte-Carlo returns.\"\"\"\n timestep = tf.range(reward.shape[1].value)\n mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)\n return_ = tf.reverse(tf.transpose(tf.scan(\n lambda agg, cur: cur + discount * agg,\n tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),\n tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])\n return tf.check_numerics(tf.stop_gradient(return_), 'return')\n\n\ndef fixed_step_return(reward, value, length, discount, window):\n \"\"\"N-step discounted return.\"\"\"\n timestep = tf.range(reward.shape[1].value)\n mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)\n return_ = tf.zeros_like(reward)\n for _ in range(window):\n return_ += reward\n reward = discount * tf.concat(\n [reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)\n return_ += discount ** window * tf.concat(\n [value[:, window:], tf.zeros_like(value[:, -window:]), 1])\n return tf.check_numerics(tf.stop_gradient(mask * return_), 'return')\n\n\ndef lambda_return(reward, value, length, discount, lambda_):\n \"\"\"TD-lambda returns.\"\"\"\n timestep = tf.range(reward.shape[1].value)\n mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)\n sequence = mask * reward + discount * value * (1 - lambda_)\n discount = mask * discount * lambda_\n sequence = tf.stack([sequence, discount], 2)\n return_ = tf.reverse(tf.transpose(tf.scan(\n lambda agg, cur: cur[0] + cur[1] * agg,\n tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),\n tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])\n return tf.check_numerics(tf.stop_gradient(return_), 'return')\n\n\ndef lambda_advantage(reward, value, length, discount):\n \"\"\"Generalized Advantage Estimation.\"\"\"\n timestep = tf.range(reward.shape[1].value)\n mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)\n next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)\n delta = reward + discount * next_value - value\n advantage = tf.reverse(tf.transpose(tf.scan(\n lambda agg, cur: cur + discount * agg,\n tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]),\n tf.zeros_like(delta[:, -1]), 1, False), [1, 0]), [1])\n return tf.check_numerics(tf.stop_gradient(advantage), 'advantage')\n\n\ndef diag_normal_kl(mean0, logstd0, mean1, logstd1):\n \"\"\"Epirical KL divergence of two normals with diagonal covariance.\"\"\"\n logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1\n return 0.5 * (\n tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) +\n tf.reduce_sum((mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) +\n tf.reduce_sum(logstd1_2, -1) - tf.reduce_sum(logstd0_2, -1) -\n mean0.shape[-1].value)\n\n\ndef diag_normal_logpdf(mean, logstd, loc):\n \"\"\"Log density of a normal with diagonal covariance.\"\"\"\n constant = -0.5 * (math.log(2 * math.pi) + logstd)\n value = -0.5 * ((loc - mean) / tf.exp(logstd)) ** 2\n return tf.reduce_sum(constant + value, -1)\n\n\ndef diag_normal_entropy(mean, logstd):\n \"\"\"Empirical entropy of a normal with diagonal covariance.\"\"\"\n constant = mean.shape[-1].value * math.log(2 * math.pi * math.e)\n return (constant + tf.reduce_sum(2 * logstd, 1)) / 2\n\n\ndef available_gpus():\n \"\"\"List of GPU device names detected by TensorFlow.\"\"\"\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n\ndef gradient_summaries(grad_vars, groups=None, scope='gradients'):\n \"\"\"Create histogram summaries of the gradient.\n\n Summaries can be grouped via regexes matching variables names.\n\n Args:\n grad_vars: List of (gradient, variable) tuples as returned by optimizers.\n groups: Mapping of name to regex for grouping summaries.\n scope: Name scope for this operation.\n\n Returns:\n Summary tensor.\n \"\"\"\n groups = groups or {r'all': r'.*'}\n grouped = collections.defaultdict(list)\n for grad, var in grad_vars:\n if grad is None:\n continue\n for name, pattern in groups.items():\n if re.match(pattern, var.name):\n name = re.sub(pattern, name, var.name)\n grouped[name].append(grad)\n for name in groups:\n if name not in grouped:\n tf.logging.warn(\"No variables matching '{}' group.\".format(name))\n summaries = []\n for name, grads in grouped.items():\n grads = [tf.reshape(grad, [-1]) for grad in grads]\n grads = tf.concat(grads, 0)\n summaries.append(tf.summary.histogram(scope + '/' + name, grads))\n return tf.summary.merge(summaries)\n\n\ndef variable_summaries(vars_, groups=None, scope='weights'):\n \"\"\"Create histogram summaries for the provided variables.\n\n Summaries can be grouped via regexes matching variables names.\n\n Args:\n vars_: List of variables to summarize.\n groups: Mapping of name to regex for grouping summaries.\n scope: Name scope for this operation.\n\n Returns:\n Summary tensor.\n \"\"\"\n groups = groups or {r'all': r'.*'}\n grouped = collections.defaultdict(list)\n for var in vars_:\n for name, pattern in groups.items():\n if re.match(pattern, var.name):\n name = re.sub(pattern, name, var.name)\n grouped[name].append(var)\n for name in groups:\n if name not in grouped:\n tf.logging.warn(\"No variables matching '{}' group.\".format(name))\n summaries = []\n for name, vars_ in grouped.items():\n vars_ = [tf.reshape(var, [-1]) for var in vars_]\n vars_ = tf.concat(vars_, 0)\n summaries.append(tf.summary.histogram(scope + '/' + name, vars_))\n return tf.summary.merge(summaries)\n",
"\"\"\"This file implements the gym environment of minitaur.\n\n\"\"\"\n\nimport os, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0,parentdir)\n\n\nimport math\nimport time\nimport gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport pybullet\nfrom . import bullet_client\nfrom . import minitaur\nimport os\nimport pybullet_data\nfrom . import minitaur_env_randomizer\nfrom pkg_resources import parse_version\n\nNUM_SUBSTEPS = 5\nNUM_MOTORS = 8\nMOTOR_ANGLE_OBSERVATION_INDEX = 0\nMOTOR_VELOCITY_OBSERVATION_INDEX = MOTOR_ANGLE_OBSERVATION_INDEX + NUM_MOTORS\nMOTOR_TORQUE_OBSERVATION_INDEX = MOTOR_VELOCITY_OBSERVATION_INDEX + NUM_MOTORS\nBASE_ORIENTATION_OBSERVATION_INDEX = MOTOR_TORQUE_OBSERVATION_INDEX + NUM_MOTORS\nACTION_EPS = 0.01\nOBSERVATION_EPS = 0.01\nRENDER_HEIGHT = 720\nRENDER_WIDTH = 960\n\nclass MinitaurBulletEnv(gym.Env):\n \"\"\"The gym environment for the minitaur.\n\n It simulates the locomotion of a minitaur, a quadruped robot. The state space\n include the angles, velocities and torques for all the motors and the action\n space is the desired motor angle for each motor. The reward function is based\n on how far the minitaur walks in 1000 steps and penalizes the energy\n expenditure.\n\n \"\"\"\n metadata = {\n \"render.modes\": [\"human\", \"rgb_array\"],\n \"video.frames_per_second\": 50\n }\n\n def __init__(self,\n urdf_root=pybullet_data.getDataPath(),\n action_repeat=1,\n distance_weight=1.0,\n energy_weight=0.005,\n shake_weight=0.0,\n drift_weight=0.0,\n distance_limit=float(\"inf\"),\n observation_noise_stdev=0.0,\n self_collision_enabled=True,\n motor_velocity_limit=np.inf,\n pd_control_enabled=False,#not needed to be true if accurate motor model is enabled (has its own better PD)\n leg_model_enabled=True,\n accurate_motor_model_enabled=True,\n motor_kp=1.0,\n motor_kd=0.02,\n torque_control_enabled=False,\n motor_overheat_protection=True,\n hard_reset=True,\n on_rack=False,\n render=False,\n kd_for_pd_controllers=0.3,\n env_randomizer=minitaur_env_randomizer.MinitaurEnvRandomizer()):\n \"\"\"Initialize the minitaur gym environment.\n\n Args:\n urdf_root: The path to the urdf data folder.\n action_repeat: The number of simulation steps before actions are applied.\n distance_weight: The weight of the distance term in the reward.\n energy_weight: The weight of the energy term in the reward.\n shake_weight: The weight of the vertical shakiness term in the reward.\n drift_weight: The weight of the sideways drift term in the reward.\n distance_limit: The maximum distance to terminate the episode.\n observation_noise_stdev: The standard deviation of observation noise.\n self_collision_enabled: Whether to enable self collision in the sim.\n motor_velocity_limit: The velocity limit of each motor.\n pd_control_enabled: Whether to use PD controller for each motor.\n leg_model_enabled: Whether to use a leg motor to reparameterize the action\n space.\n accurate_motor_model_enabled: Whether to use the accurate DC motor model.\n motor_kp: proportional gain for the accurate motor model.\n motor_kd: derivative gain for the accurate motor model.\n torque_control_enabled: Whether to use the torque control, if set to\n False, pose control will be used.\n motor_overheat_protection: Whether to shutdown the motor that has exerted\n large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time\n (OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in minitaur.py for more\n details.\n hard_reset: Whether to wipe the simulation and load everything when reset\n is called. If set to false, reset just place the minitaur back to start\n position and set its pose to initial configuration.\n on_rack: Whether to place the minitaur on rack. This is only used to debug\n the walking gait. In this mode, the minitaur's base is hanged midair so\n that its walking gait is clearer to visualize.\n render: Whether to render the simulation.\n kd_for_pd_controllers: kd value for the pd controllers of the motors\n env_randomizer: An EnvRandomizer to randomize the physical properties\n during reset().\n \"\"\"\n self._time_step = 0.01\n self._action_repeat = action_repeat\n self._num_bullet_solver_iterations = 300\n self._urdf_root = urdf_root\n self._self_collision_enabled = self_collision_enabled\n self._motor_velocity_limit = motor_velocity_limit\n self._observation = []\n self._env_step_counter = 0\n self._is_render = render\n self._last_base_position = [0, 0, 0]\n self._distance_weight = distance_weight\n self._energy_weight = energy_weight\n self._drift_weight = drift_weight\n self._shake_weight = shake_weight\n self._distance_limit = distance_limit\n self._observation_noise_stdev = observation_noise_stdev\n self._action_bound = 1\n self._pd_control_enabled = pd_control_enabled\n self._leg_model_enabled = leg_model_enabled\n self._accurate_motor_model_enabled = accurate_motor_model_enabled\n self._motor_kp = motor_kp\n self._motor_kd = motor_kd\n self._torque_control_enabled = torque_control_enabled\n self._motor_overheat_protection = motor_overheat_protection\n self._on_rack = on_rack\n self._cam_dist = 1.0\n self._cam_yaw = 0\n self._cam_pitch = -30\n self._hard_reset = True\n self._kd_for_pd_controllers = kd_for_pd_controllers\n self._last_frame_time = 0.0\n print(\"urdf_root=\" + self._urdf_root)\n self._env_randomizer = env_randomizer\n # PD control needs smaller time step for stability.\n if pd_control_enabled or accurate_motor_model_enabled:\n self._time_step /= NUM_SUBSTEPS\n self._num_bullet_solver_iterations /= NUM_SUBSTEPS\n self._action_repeat *= NUM_SUBSTEPS\n\n if self._is_render:\n self._pybullet_client = bullet_client.BulletClient(\n connection_mode=pybullet.GUI)\n else:\n self._pybullet_client = bullet_client.BulletClient()\n\n self.seed()\n self.reset()\n observation_high = (\n self.minitaur.GetObservationUpperBound() + OBSERVATION_EPS)\n observation_low = (\n self.minitaur.GetObservationLowerBound() - OBSERVATION_EPS)\n action_dim = 8\n action_high = np.array([self._action_bound] * action_dim)\n self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)\n self.observation_space = spaces.Box(observation_low, observation_high, dtype=np.float32)\n self.viewer = None\n self._hard_reset = hard_reset # This assignment need to be after reset()\n\n def set_env_randomizer(self, env_randomizer):\n self._env_randomizer = env_randomizer\n\n def configure(self, args):\n self._args = args\n\n def reset(self):\n if self._hard_reset:\n self._pybullet_client.resetSimulation()\n self._pybullet_client.setPhysicsEngineParameter(\n numSolverIterations=int(self._num_bullet_solver_iterations))\n self._pybullet_client.setTimeStep(self._time_step)\n plane = self._pybullet_client.loadURDF(\"%s/plane.urdf\" % self._urdf_root)\n self._pybullet_client.changeVisualShape(plane,-1,rgbaColor=[1,1,1,0.9])\n self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_PLANAR_REFLECTION,0)\n self._pybullet_client.setGravity(0, 0, -10)\n acc_motor = self._accurate_motor_model_enabled\n motor_protect = self._motor_overheat_protection\n self.minitaur = (minitaur.Minitaur(\n pybullet_client=self._pybullet_client,\n urdf_root=self._urdf_root,\n time_step=self._time_step,\n self_collision_enabled=self._self_collision_enabled,\n motor_velocity_limit=self._motor_velocity_limit,\n pd_control_enabled=self._pd_control_enabled,\n accurate_motor_model_enabled=acc_motor,\n motor_kp=self._motor_kp,\n motor_kd=self._motor_kd,\n torque_control_enabled=self._torque_control_enabled,\n motor_overheat_protection=motor_protect,\n on_rack=self._on_rack,\n kd_for_pd_controllers=self._kd_for_pd_controllers))\n else:\n self.minitaur.Reset(reload_urdf=False)\n\n if self._env_randomizer is not None:\n self._env_randomizer.randomize_env(self)\n\n self._env_step_counter = 0\n self._last_base_position = [0, 0, 0]\n self._objectives = []\n self._pybullet_client.resetDebugVisualizerCamera(\n self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])\n if not self._torque_control_enabled:\n for _ in range(100):\n if self._pd_control_enabled or self._accurate_motor_model_enabled:\n self.minitaur.ApplyAction([math.pi / 2] * 8)\n self._pybullet_client.stepSimulation()\n return self._noisy_observation()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _transform_action_to_motor_command(self, action):\n if self._leg_model_enabled:\n for i, action_component in enumerate(action):\n if not (-self._action_bound - ACTION_EPS <= action_component <=\n self._action_bound + ACTION_EPS):\n raise ValueError(\n \"{}th action {} out of bounds.\".format(i, action_component))\n action = self.minitaur.ConvertFromLegModel(action)\n return action\n\n def step(self, action):\n \"\"\"Step forward the simulation, given the action.\n\n Args:\n action: A list of desired motor angles for eight motors.\n\n Returns:\n observations: The angles, velocities and torques of all motors.\n reward: The reward for the current state-action pair.\n done: Whether the episode has ended.\n info: A dictionary that stores diagnostic information.\n\n Raises:\n ValueError: The action dimension is not the same as the number of motors.\n ValueError: The magnitude of actions is out of bounds.\n \"\"\"\n if self._is_render:\n # Sleep, otherwise the computation takes less time than real time,\n # which will make the visualization like a fast-forward video.\n time_spent = time.time() - self._last_frame_time\n self._last_frame_time = time.time()\n time_to_sleep = self._action_repeat * self._time_step - time_spent\n if time_to_sleep > 0:\n time.sleep(time_to_sleep)\n base_pos = self.minitaur.GetBasePosition()\n camInfo = self._pybullet_client.getDebugVisualizerCamera()\n curTargetPos = camInfo[11]\n distance=camInfo[10]\n yaw = camInfo[8]\n pitch=camInfo[9]\n targetPos = [0.95*curTargetPos[0]+0.05*base_pos[0],0.95*curTargetPos[1]+0.05*base_pos[1],curTargetPos[2]]\n\n\n self._pybullet_client.resetDebugVisualizerCamera(\n distance, yaw, pitch, base_pos)\n action = self._transform_action_to_motor_command(action)\n for _ in range(self._action_repeat):\n self.minitaur.ApplyAction(action)\n self._pybullet_client.stepSimulation()\n\n self._env_step_counter += 1\n reward = self._reward()\n done = self._termination()\n return np.array(self._noisy_observation()), reward, done, {}\n\n def render(self, mode=\"rgb_array\", close=False):\n if mode != \"rgb_array\":\n return np.array([])\n base_pos = self.minitaur.GetBasePosition()\n view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(\n cameraTargetPosition=base_pos,\n distance=self._cam_dist,\n yaw=self._cam_yaw,\n pitch=self._cam_pitch,\n roll=0,\n upAxisIndex=2)\n proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(\n fov=60, aspect=float(RENDER_WIDTH)/RENDER_HEIGHT,\n nearVal=0.1, farVal=100.0)\n (_, _, px, _, _) = self._pybullet_client.getCameraImage(\n width=RENDER_WIDTH, height=RENDER_HEIGHT, viewMatrix=view_matrix,\n projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)\n rgb_array = np.array(px)\n rgb_array = rgb_array[:, :, :3]\n return rgb_array\n\n def get_minitaur_motor_angles(self):\n \"\"\"Get the minitaur's motor angles.\n\n Returns:\n A numpy array of motor angles.\n \"\"\"\n return np.array(\n self._observation[MOTOR_ANGLE_OBSERVATION_INDEX:\n MOTOR_ANGLE_OBSERVATION_INDEX + NUM_MOTORS])\n\n def get_minitaur_motor_velocities(self):\n \"\"\"Get the minitaur's motor velocities.\n\n Returns:\n A numpy array of motor velocities.\n \"\"\"\n return np.array(\n self._observation[MOTOR_VELOCITY_OBSERVATION_INDEX:\n MOTOR_VELOCITY_OBSERVATION_INDEX + NUM_MOTORS])\n\n def get_minitaur_motor_torques(self):\n \"\"\"Get the minitaur's motor torques.\n\n Returns:\n A numpy array of motor torques.\n \"\"\"\n return np.array(\n self._observation[MOTOR_TORQUE_OBSERVATION_INDEX:\n MOTOR_TORQUE_OBSERVATION_INDEX + NUM_MOTORS])\n\n def get_minitaur_base_orientation(self):\n \"\"\"Get the minitaur's base orientation, represented by a quaternion.\n\n Returns:\n A numpy array of minitaur's orientation.\n \"\"\"\n return np.array(self._observation[BASE_ORIENTATION_OBSERVATION_INDEX:])\n\n def is_fallen(self):\n \"\"\"Decide whether the minitaur has fallen.\n\n If the up directions between the base and the world is larger (the dot\n product is smaller than 0.85) or the base is very low on the ground\n (the height is smaller than 0.13 meter), the minitaur is considered fallen.\n\n Returns:\n Boolean value that indicates whether the minitaur has fallen.\n \"\"\"\n orientation = self.minitaur.GetBaseOrientation()\n rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)\n local_up = rot_mat[6:]\n pos = self.minitaur.GetBasePosition()\n return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85 or\n pos[2] < 0.13)\n\n def _termination(self):\n position = self.minitaur.GetBasePosition()\n distance = math.sqrt(position[0]**2 + position[1]**2)\n return self.is_fallen() or distance > self._distance_limit\n\n def _reward(self):\n current_base_position = self.minitaur.GetBasePosition()\n forward_reward = current_base_position[0] - self._last_base_position[0]\n drift_reward = -abs(current_base_position[1] - self._last_base_position[1])\n shake_reward = -abs(current_base_position[2] - self._last_base_position[2])\n self._last_base_position = current_base_position\n energy_reward = np.abs(\n np.dot(self.minitaur.GetMotorTorques(),\n self.minitaur.GetMotorVelocities())) * self._time_step\n reward = (\n self._distance_weight * forward_reward -\n self._energy_weight * energy_reward + self._drift_weight * drift_reward\n + self._shake_weight * shake_reward)\n self._objectives.append(\n [forward_reward, energy_reward, drift_reward, shake_reward])\n return reward\n\n def get_objectives(self):\n return self._objectives\n\n def _get_observation(self):\n self._observation = self.minitaur.GetObservation()\n return self._observation\n\n def _noisy_observation(self):\n self._get_observation()\n observation = np.array(self._observation)\n if self._observation_noise_stdev > 0:\n observation += (np.random.normal(\n scale=self._observation_noise_stdev, size=observation.shape) *\n self.minitaur.GetObservationUpperBound())\n return observation\n\n if parse_version(gym.__version__) < parse_version('0.9.6'):\n _render = render\n _reset = reset\n _seed = seed\n _step = step\n",
"import numpy as np\nimport math\nfrom pybullet_envs.deep_mimic.env.env import Env\nfrom pybullet_envs.deep_mimic.env.action_space import ActionSpace\nfrom pybullet_utils import bullet_client\nimport time\nfrom pybullet_envs.deep_mimic.env import motion_capture_data\nfrom pybullet_envs.deep_mimic.env import humanoid_stable_pd\nimport pybullet_data\nimport pybullet as p1\nimport random\n \n \nclass PyBulletDeepMimicEnv(Env):\n def __init__(self, args=None, enable_draw=False, pybullet_client=None):\n super().__init__(args, enable_draw)\n self._num_agents = 1\n self._pybullet_client = pybullet_client\n self._isInitialized = False\n self._useStablePD = True\n self.reset()\n \n def reset(self):\n \n \n startTime = 0. #float(rn)/rnrange * self._humanoid.getCycleTime()\n self.t = startTime\n if not self._isInitialized:\n if self.enable_draw:\n self._pybullet_client = bullet_client.BulletClient(connection_mode=p1.GUI)\n #disable 'GUI' since it slows down a lot on Mac OSX and some other platforms\n self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_GUI,0)\n else:\n self._pybullet_client = bullet_client.BulletClient()\n \n self._pybullet_client.setAdditionalSearchPath(pybullet_data.getDataPath())\n z2y = self._pybullet_client.getQuaternionFromEuler([-math.pi*0.5,0,0])\n self._planeId = self._pybullet_client.loadURDF(\"plane_implicit.urdf\",[0,0,0],z2y, useMaximalCoordinates=True)\n #print(\"planeId=\",self._planeId)\n self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_Y_AXIS_UP,1)\n self._pybullet_client.setGravity(0,-9.8,0)\n \n self._pybullet_client.setPhysicsEngineParameter(numSolverIterations=10)\n self._pybullet_client.changeDynamics(self._planeId, linkIndex=-1, lateralFriction=0.9)\n \n self._mocapData = motion_capture_data.MotionCaptureData()\n #motionPath = pybullet_data.getDataPath()+\"/motions/humanoid3d_walk.txt\"\n motionPath = pybullet_data.getDataPath()+\"/motions/humanoid3d_backflip.txt\"\n self._mocapData.Load(motionPath)\n timeStep = 1./600\n useFixedBase=False\n self._humanoid = humanoid_stable_pd.HumanoidStablePD(self._pybullet_client, self._mocapData, timeStep, useFixedBase)\n self._isInitialized = True\n \n self._pybullet_client.setTimeStep(timeStep)\n self._pybullet_client.setPhysicsEngineParameter(numSubSteps=2)\n \n \n selfCheck = False\n if (selfCheck):\n curTime = 0\n while self._pybullet_client.isConnected():\n self._humanoid.setSimTime(curTime)\n state = self._humanoid.getState()\n #print(\"state=\",state)\n pose = self._humanoid.computePose(self._humanoid._frameFraction)\n for i in range (10):\n curTime+=timeStep\n #taus = self._humanoid.computePDForces(pose)\n #self._humanoid.applyPDForces(taus)\n #self._pybullet_client.stepSimulation()\n time.sleep(timeStep)\n #print(\"numframes = \", self._humanoid._mocap_data.NumFrames())\n #startTime = random.randint(0,self._humanoid._mocap_data.NumFrames()-2)\n rnrange = 1000\n rn = random.randint(0,rnrange)\n \n self._humanoid.setSimTime(startTime)\n \n self._humanoid.resetPose()\n #this clears the contact points. Todo: add API to explicitly clear all contact points?\n #self._pybullet_client.stepSimulation()\n self._humanoid.resetPose()\n self.needs_update_time = self.t-1#force update\n \t\n def get_num_agents(self):\n \treturn self._num_agents\n \t\n def get_action_space(self, agent_id):\n \treturn ActionSpace(ActionSpace.Continuous)\n \t\n def get_reward_min(self, agent_id):\n \treturn 0\n \n def get_reward_max(self, agent_id):\n \treturn 1\n \t\n def get_reward_fail(self, agent_id):\n \treturn self.get_reward_min(agent_id)\n \n def get_reward_succ(self, agent_id):\n \treturn self.get_reward_max(agent_id)\n \t\n #scene_name == \"imitate\" -> cDrawSceneImitate\n def get_state_size(self, agent_id):\n #cCtController::GetStateSize()\n #int state_size = cDeepMimicCharController::GetStateSize();\n # state_size += GetStatePoseSize();#106\n\t # state_size += GetStateVelSize(); #(3+3)*numBodyParts=90\n #state_size += GetStatePhaseSize();#1\n #197\n return 197\n\n def build_state_norm_groups(self, agent_id):\n #if (mEnablePhaseInput)\n\t #{\n\t\t #int phase_group = gNormGroupNone;\n\t\t #int phase_offset = GetStatePhaseOffset();\n\t\t #int phase_size = GetStatePhaseSize();\n\t\t #out_groups.segment(phase_offset, phase_size) = phase_group * Eigen::VectorXi::Ones(phase_size);\n groups = [0]*self.get_state_size(agent_id)\n groups[0] = -1\n return groups\t\n \n def build_state_offset(self, agent_id):\n out_offset = [0]*self.get_state_size(agent_id)\n phase_offset = -0.5\n out_offset[0] = phase_offset\n return np.array(out_offset)\n \n def build_state_scale(self, agent_id):\n out_scale = [1]*self.get_state_size(agent_id)\n phase_scale = 2\n out_scale[0] = phase_scale\n return np.array(out_scale)\n\n def get_goal_size(self, agent_id):\n return 0\n\n def get_action_size(self, agent_id):\n \t ctrl_size = 43 #numDof\n \t root_size = 7\n \t return ctrl_size - root_size\n \t \n def build_goal_norm_groups(self, agent_id):\n return np.array([])\n \n def build_goal_offset(self, agent_id):\n return np.array([])\n \n def build_goal_scale(self, agent_id):\n return np.array([])\n \n def build_action_offset(self, agent_id):\n \t out_offset = [0] * self.get_action_size(agent_id)\n \t out_offset = [0.0000000000,0.0000000000,0.0000000000,-0.200000000,0.0000000000,0.0000000000,0.0000000000,\n \t -0.200000000,0.0000000000,0.0000000000,\t0.00000000,\t-0.2000000,\t1.57000000,\t0.00000000,\t0.00000000,\n \t 0.00000000,\t-0.2000000,\t0.00000000,\t0.00000000,\t0.00000000,\t-0.2000000,\t-1.5700000,\t0.00000000,\t0.00000000,\n \t 0.00000000,\t-0.2000000,\t1.57000000,\t0.00000000,\t0.00000000,\t0.00000000,\t-0.2000000,\t0.00000000,\t0.00000000,\n \t 0.00000000,\t-0.2000000,\t-1.5700000]\n \t #see cCtCtrlUtil::BuildOffsetScalePDPrismatic and\n \t #see cCtCtrlUtil::BuildOffsetScalePDSpherical\n \t return np.array(out_offset)\n \t \n def build_action_scale(self, agent_id):\n \t out_scale = [1] * self.get_action_size(agent_id)\n \t #see cCtCtrlUtil::BuildOffsetScalePDPrismatic and\n \t #see cCtCtrlUtil::BuildOffsetScalePDSpherical\n \t out_scale=[ 0.20833333333333,1.00000000000000,1.00000000000000,1.00000000000000,0.25000000000000,\n \t 1.00000000000000,1.00000000000000,1.00000000000000,0.12077294685990,1.00000000000000,\n \t 1.000000000000,\t1.000000000000,\t0.159235668789,\t0.159235668789,\t1.000000000000,\n \t 1.000000000000,\t1.000000000000,\t0.079617834394,\t1.000000000000,\t1.000000000000,\n \t 1.000000000000,\t0.159235668789,\t0.120772946859,\t1.000000000000,\t1.000000000000,\n \t 1.000000000000,\t0.159235668789,\t0.159235668789,\t1.000000000000,\t1.000000000000,\n \t 1.000000000000,\t0.107758620689,\t1.000000000000,\t1.000000000000,\t1.000000000000,\n \t 0.159235668789]\n \t return np.array(out_scale)\n \n def build_action_bound_min(self, agent_id):\n \t #see cCtCtrlUtil::BuildBoundsPDSpherical\n \t out_scale = [-1] * self.get_action_size(agent_id)\n \t out_scale = [-4.79999999999,-1.00000000000,-1.00000000000,-1.00000000000,-4.00000000000,\n \t -1.00000000000,-1.00000000000,-1.00000000000,-7.77999999999,-1.00000000000,\t-1.000000000,\n\t \t -1.000000000,\t-7.850000000,\t-6.280000000,\t-1.000000000,\t-1.000000000,\t-1.000000000,\n\t \t -12.56000000,\t-1.000000000,\t-1.000000000,\t-1.000000000,\t-4.710000000,\n\t \t -7.779999999,\t-1.000000000,\t-1.000000000,\t-1.000000000,\t-7.850000000,\n\t \t -6.280000000,\t-1.000000000,\t-1.000000000,\t-1.000000000,\t-8.460000000,\n\t \t -1.000000000,\t-1.000000000,\t-1.000000000,\t-4.710000000]\n\n \t return out_scale\n \n def build_action_bound_max(self, agent_id):\n \t out_scale = [1] * self.get_action_size(agent_id)\n \t out_scale=[\n \t 4.799999999,1.000000000,1.000000000,1.000000000,4.000000000,1.000000000,\n \t 1.000000000,1.000000000,8.779999999,1.000000000,\t1.0000000,\t1.0000000,\n \t \t4.7100000,\t6.2800000,\t1.0000000,\t1.0000000,\t1.0000000,\n\t \t 12.560000,\t1.0000000,\t1.0000000,\t1.0000000,\t7.8500000,\n\t \t 8.7799999,\t1.0000000,\t1.0000000,\t1.0000000,\t4.7100000,\n\t \t 6.2800000,\t1.0000000,\t1.0000000,\t1.0000000,\t10.100000,\n\t \t 1.0000000,\t1.0000000,\t1.0000000,\t7.8500000]\n \t return out_scale\n \t \n def set_mode(self, mode):\n \t self._mode = mode\n \t \n def need_new_action(self, agent_id):\n if self.t>=self.needs_update_time:\n self.needs_update_time = self.t + 1./30.\n return True\n return False\n \n def record_state(self, agent_id):\n state = self._humanoid.getState()\n \n return np.array(state)\n \n \n def record_goal(self, agent_id):\n return np.array([])\n \n def calc_reward(self, agent_id):\n kinPose = self._humanoid.computePose(self._humanoid._frameFraction)\n reward = self._humanoid.getReward(kinPose)\n return reward\n \n def set_action(self, agent_id, action):\n #print(\"action=\",)\n #for a in action:\n # print(a)\n np.savetxt(\"pb_action.csv\", action, delimiter=\",\")\n self.desiredPose = self._humanoid.convertActionToPose(action)\n #we need the target root positon and orientation to be zero, to be compatible with deep mimic\n self.desiredPose[0] = 0\n self.desiredPose[1] = 0\n self.desiredPose[2] = 0\n self.desiredPose[3] = 0\n self.desiredPose[4] = 0\n self.desiredPose[5] = 0\n self.desiredPose[6] = 0\n target_pose = np.array(self.desiredPose)\n \n \n np.savetxt(\"pb_target_pose.csv\", target_pose, delimiter=\",\")\n \n #print(\"set_action: desiredPose=\", self.desiredPose)\n \n def log_val(self, agent_id, val):\n pass\n \n def update(self, timeStep):\n #print(\"pybullet_deep_mimic_env:update timeStep=\",timeStep,\" t=\",self.t)\n for i in range(1):\n self.t += timeStep\n self._humanoid.setSimTime(self.t)\n \n if self.desiredPose:\n #kinPose = self._humanoid.computePose(self._humanoid._frameFraction)\n #self._humanoid.initializePose(self._humanoid._poseInterpolator, self._humanoid._kin_model, initBase=False)\n #pos,orn=self._pybullet_client.getBasePositionAndOrientation(self._humanoid._sim_model)\n #self._pybullet_client.resetBasePositionAndOrientation(self._humanoid._kin_model, [pos[0]+3,pos[1],pos[2]],orn)\n #print(\"desiredPositions=\",self.desiredPose)\n maxForces = [0,0,0,0,0,0,0,200,200,200,200, 50,50,50,50, 200,200,200,200, 150, 90,90,90,90, 100,100,100,100, 60, 200,200,200,200, 150, 90, 90, 90, 90, 100,100,100,100, 60]\n \n if self._useStablePD:\n taus = self._humanoid.computePDForces(self.desiredPose, desiredVelocities=None, maxForces=maxForces)\n self._humanoid.applyPDForces(taus)\n else:\n self._humanoid.setJointMotors(self.desiredPose, maxForces=maxForces)\n \n self._pybullet_client.stepSimulation()\n \n\n def set_sample_count(self, count):\n return\n \n def check_terminate(self, agent_id):\n return Env.Terminate(self.is_episode_end())\n \n def is_episode_end(self):\n isEnded = self._humanoid.terminates()\n #also check maximum time, 20 seconds (todo get from file)\n #print(\"self.t=\",self.t)\n if (self.t>20):\n isEnded = True\n return isEnded\n \n def check_valid_episode(self):\n #could check if limbs exceed velocity threshold\n return true\n \n def getKeyboardEvents(self):\n return self._pybullet_client.getKeyboardEvents()\n \n def isKeyTriggered(self, keys, key):\n o = ord(key)\n #print(\"ord=\",o)\n if o in keys:\n return keys[ord(key)] & self._pybullet_client.KEY_WAS_TRIGGERED\n return False\n",
"import numpy as np\nimport copy as copy\nimport tensorflow as tf\n\nfrom pybullet_envs.deep_mimic.learning.pg_agent import PGAgent\nfrom pybullet_envs.deep_mimic.learning.solvers.mpi_solver import MPISolver\nimport pybullet_envs.deep_mimic.learning.tf_util as TFUtil\nimport pybullet_envs.deep_mimic.learning.rl_util as RLUtil\nfrom pybullet_utils.logger import Logger\nimport pybullet_utils.mpi_util as MPIUtil\nimport pybullet_utils.math_util as MathUtil\nfrom pybullet_envs.deep_mimic.env.env import Env\n\n'''\nProximal Policy Optimization Agent\n'''\n\nclass PPOAgent(PGAgent):\n NAME = \"PPO\"\n EPOCHS_KEY = \"Epochs\"\n BATCH_SIZE_KEY = \"BatchSize\"\n RATIO_CLIP_KEY = \"RatioClip\"\n NORM_ADV_CLIP_KEY = \"NormAdvClip\"\n TD_LAMBDA_KEY = \"TDLambda\"\n TAR_CLIP_FRAC = \"TarClipFrac\"\n ACTOR_STEPSIZE_DECAY = \"ActorStepsizeDecay\"\n\n def __init__(self, world, id, json_data): \n super().__init__(world, id, json_data)\n return\n\n def _load_params(self, json_data):\n super()._load_params(json_data)\n\n self.epochs = 1 if (self.EPOCHS_KEY not in json_data) else json_data[self.EPOCHS_KEY]\n self.batch_size = 1024 if (self.BATCH_SIZE_KEY not in json_data) else json_data[self.BATCH_SIZE_KEY]\n self.ratio_clip = 0.2 if (self.RATIO_CLIP_KEY not in json_data) else json_data[self.RATIO_CLIP_KEY]\n self.norm_adv_clip = 5 if (self.NORM_ADV_CLIP_KEY not in json_data) else json_data[self.NORM_ADV_CLIP_KEY]\n self.td_lambda = 0.95 if (self.TD_LAMBDA_KEY not in json_data) else json_data[self.TD_LAMBDA_KEY]\n self.tar_clip_frac = -1 if (self.TAR_CLIP_FRAC not in json_data) else json_data[self.TAR_CLIP_FRAC]\n self.actor_stepsize_decay = 0.5 if (self.ACTOR_STEPSIZE_DECAY not in json_data) else json_data[self.ACTOR_STEPSIZE_DECAY]\n\n num_procs = MPIUtil.get_num_procs()\n local_batch_size = int(self.batch_size / num_procs)\n min_replay_size = 2 * local_batch_size # needed to prevent buffer overflow\n assert(self.replay_buffer_size > min_replay_size)\n\n self.replay_buffer_size = np.maximum(min_replay_size, self.replay_buffer_size)\n\n return\n\n def _build_nets(self, json_data):\n assert self.ACTOR_NET_KEY in json_data\n assert self.CRITIC_NET_KEY in json_data\n\n actor_net_name = json_data[self.ACTOR_NET_KEY]\n critic_net_name = json_data[self.CRITIC_NET_KEY]\n actor_init_output_scale = 1 if (self.ACTOR_INIT_OUTPUT_SCALE_KEY not in json_data) else json_data[self.ACTOR_INIT_OUTPUT_SCALE_KEY]\n\n s_size = self.get_state_size()\n g_size = self.get_goal_size()\n a_size = self.get_action_size()\n\n # setup input tensors\n self.s_tf = tf.placeholder(tf.float32, shape=[None, s_size], name=\"s\")\n self.a_tf = tf.placeholder(tf.float32, shape=[None, a_size], name=\"a\")\n self.tar_val_tf = tf.placeholder(tf.float32, shape=[None], name=\"tar_val\")\n self.adv_tf = tf.placeholder(tf.float32, shape=[None], name=\"adv\")\n self.g_tf = tf.placeholder(tf.float32, shape=([None, g_size] if self.has_goal() else None), name=\"g\")\n self.old_logp_tf = tf.placeholder(tf.float32, shape=[None], name=\"old_logp\")\n self.exp_mask_tf = tf.placeholder(tf.float32, shape=[None], name=\"exp_mask\")\n\n with tf.variable_scope('main'):\n with tf.variable_scope('actor'):\n self.a_mean_tf = self._build_net_actor(actor_net_name, actor_init_output_scale)\n with tf.variable_scope('critic'):\n self.critic_tf = self._build_net_critic(critic_net_name)\n \n if (self.a_mean_tf != None):\n Logger.print2('Built actor net: ' + actor_net_name)\n\n if (self.critic_tf != None):\n Logger.print2('Built critic net: ' + critic_net_name)\n \n self.norm_a_std_tf = self.exp_params_curr.noise * tf.ones(a_size)\n norm_a_noise_tf = self.norm_a_std_tf * tf.random_normal(shape=tf.shape(self.a_mean_tf))\n norm_a_noise_tf *= tf.expand_dims(self.exp_mask_tf, axis=-1)\n self.sample_a_tf = self.a_mean_tf + norm_a_noise_tf * self.a_norm.std_tf\n self.sample_a_logp_tf = TFUtil.calc_logp_gaussian(x_tf=norm_a_noise_tf, mean_tf=None, std_tf=self.norm_a_std_tf)\n\n return\n\n def _build_losses(self, json_data):\n actor_weight_decay = 0 if (self.ACTOR_WEIGHT_DECAY_KEY not in json_data) else json_data[self.ACTOR_WEIGHT_DECAY_KEY]\n critic_weight_decay = 0 if (self.CRITIC_WEIGHT_DECAY_KEY not in json_data) else json_data[self.CRITIC_WEIGHT_DECAY_KEY]\n \n norm_val_diff = self.val_norm.normalize_tf(self.tar_val_tf) - self.val_norm.normalize_tf(self.critic_tf)\n self.critic_loss_tf = 0.5 * tf.reduce_mean(tf.square(norm_val_diff))\n\n if (critic_weight_decay != 0):\n self.critic_loss_tf += critic_weight_decay * self._weight_decay_loss('main/critic')\n \n norm_tar_a_tf = self.a_norm.normalize_tf(self.a_tf)\n self._norm_a_mean_tf = self.a_norm.normalize_tf(self.a_mean_tf)\n\n self.logp_tf = TFUtil.calc_logp_gaussian(norm_tar_a_tf, self._norm_a_mean_tf, self.norm_a_std_tf)\n ratio_tf = tf.exp(self.logp_tf - self.old_logp_tf)\n actor_loss0 = self.adv_tf * ratio_tf\n actor_loss1 = self.adv_tf * tf.clip_by_value(ratio_tf, 1.0 - self.ratio_clip, 1 + self.ratio_clip)\n self.actor_loss_tf = -tf.reduce_mean(tf.minimum(actor_loss0, actor_loss1))\n\n norm_a_bound_min = self.a_norm.normalize(self.a_bound_min)\n norm_a_bound_max = self.a_norm.normalize(self.a_bound_max)\n a_bound_loss = TFUtil.calc_bound_loss(self._norm_a_mean_tf, norm_a_bound_min, norm_a_bound_max)\n self.actor_loss_tf += a_bound_loss\n\n if (actor_weight_decay != 0):\n self.actor_loss_tf += actor_weight_decay * self._weight_decay_loss('main/actor')\n \n # for debugging\n self.clip_frac_tf = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio_tf - 1.0), self.ratio_clip)))\n\n return\n\n def _build_solvers(self, json_data):\n actor_stepsize = 0.001 if (self.ACTOR_STEPSIZE_KEY not in json_data) else json_data[self.ACTOR_STEPSIZE_KEY]\n actor_momentum = 0.9 if (self.ACTOR_MOMENTUM_KEY not in json_data) else json_data[self.ACTOR_MOMENTUM_KEY]\n critic_stepsize = 0.01 if (self.CRITIC_STEPSIZE_KEY not in json_data) else json_data[self.CRITIC_STEPSIZE_KEY]\n critic_momentum = 0.9 if (self.CRITIC_MOMENTUM_KEY not in json_data) else json_data[self.CRITIC_MOMENTUM_KEY]\n \n critic_vars = self._tf_vars('main/critic')\n critic_opt = tf.train.MomentumOptimizer(learning_rate=critic_stepsize, momentum=critic_momentum)\n self.critic_grad_tf = tf.gradients(self.critic_loss_tf, critic_vars)\n self.critic_solver = MPISolver(self.sess, critic_opt, critic_vars)\n\n self._actor_stepsize_tf = tf.get_variable(dtype=tf.float32, name='actor_stepsize', initializer=actor_stepsize, trainable=False)\n self._actor_stepsize_ph = tf.get_variable(dtype=tf.float32, name='actor_stepsize_ph', shape=[])\n self._actor_stepsize_update_op = self._actor_stepsize_tf.assign(self._actor_stepsize_ph)\n\n actor_vars = self._tf_vars('main/actor')\n actor_opt = tf.train.MomentumOptimizer(learning_rate=self._actor_stepsize_tf, momentum=actor_momentum)\n self.actor_grad_tf = tf.gradients(self.actor_loss_tf, actor_vars)\n self.actor_solver = MPISolver(self.sess, actor_opt, actor_vars)\n \n return\n\n def _decide_action(self, s, g):\n with self.sess.as_default(), self.graph.as_default():\n self._exp_action = self._enable_stoch_policy() and MathUtil.flip_coin(self.exp_params_curr.rate)\n #print(\"_decide_action._exp_action=\",self._exp_action)\n a, logp = self._eval_actor(s, g, self._exp_action)\n return a[0], logp[0]\n\n def _eval_actor(self, s, g, enable_exp):\n s = np.reshape(s, [-1, self.get_state_size()])\n g = np.reshape(g, [-1, self.get_goal_size()]) if self.has_goal() else None\n \n feed = {\n self.s_tf : s,\n self.g_tf : g,\n self.exp_mask_tf: np.array([1 if enable_exp else 0])\n }\n\n a, logp = self.sess.run([self.sample_a_tf, self.sample_a_logp_tf], feed_dict=feed)\n return a, logp\n\n def _train_step(self):\n adv_eps = 1e-5\n\n start_idx = self.replay_buffer.buffer_tail\n end_idx = self.replay_buffer.buffer_head\n assert(start_idx == 0)\n assert(self.replay_buffer.get_current_size() <= self.replay_buffer.buffer_size) # must avoid overflow\n assert(start_idx < end_idx)\n\n idx = np.array(list(range(start_idx, end_idx))) \n end_mask = self.replay_buffer.is_path_end(idx)\n end_mask = np.logical_not(end_mask) \n \n vals = self._compute_batch_vals(start_idx, end_idx)\n new_vals = self._compute_batch_new_vals(start_idx, end_idx, vals)\n\n valid_idx = idx[end_mask]\n exp_idx = self.replay_buffer.get_idx_filtered(self.EXP_ACTION_FLAG).copy()\n num_valid_idx = valid_idx.shape[0]\n num_exp_idx = exp_idx.shape[0]\n exp_idx = np.column_stack([exp_idx, np.array(list(range(0, num_exp_idx)), dtype=np.int32)])\n \n local_sample_count = valid_idx.size\n global_sample_count = int(MPIUtil.reduce_sum(local_sample_count))\n mini_batches = int(np.ceil(global_sample_count / self.mini_batch_size))\n \n adv = new_vals[exp_idx[:,0]] - vals[exp_idx[:,0]]\n new_vals = np.clip(new_vals, self.val_min, self.val_max)\n\n adv_mean = np.mean(adv)\n adv_std = np.std(adv)\n adv = (adv - adv_mean) / (adv_std + adv_eps)\n adv = np.clip(adv, -self.norm_adv_clip, self.norm_adv_clip)\n\n critic_loss = 0\n actor_loss = 0\n actor_clip_frac = 0\n\n for e in range(self.epochs):\n np.random.shuffle(valid_idx)\n np.random.shuffle(exp_idx)\n\n for b in range(mini_batches):\n batch_idx_beg = b * self._local_mini_batch_size\n batch_idx_end = batch_idx_beg + self._local_mini_batch_size\n\n critic_batch = np.array(range(batch_idx_beg, batch_idx_end), dtype=np.int32)\n actor_batch = critic_batch.copy()\n critic_batch = np.mod(critic_batch, num_valid_idx)\n actor_batch = np.mod(actor_batch, num_exp_idx)\n shuffle_actor = (actor_batch[-1] < actor_batch[0]) or (actor_batch[-1] == num_exp_idx - 1)\n\n critic_batch = valid_idx[critic_batch]\n actor_batch = exp_idx[actor_batch]\n critic_batch_vals = new_vals[critic_batch]\n actor_batch_adv = adv[actor_batch[:,1]]\n\n critic_s = self.replay_buffer.get('states', critic_batch)\n critic_g = self.replay_buffer.get('goals', critic_batch) if self.has_goal() else None\n curr_critic_loss = self._update_critic(critic_s, critic_g, critic_batch_vals)\n\n actor_s = self.replay_buffer.get(\"states\", actor_batch[:,0])\n actor_g = self.replay_buffer.get(\"goals\", actor_batch[:,0]) if self.has_goal() else None\n actor_a = self.replay_buffer.get(\"actions\", actor_batch[:,0])\n actor_logp = self.replay_buffer.get(\"logps\", actor_batch[:,0])\n curr_actor_loss, curr_actor_clip_frac = self._update_actor(actor_s, actor_g, actor_a, actor_logp, actor_batch_adv)\n \n critic_loss += curr_critic_loss\n actor_loss += np.abs(curr_actor_loss)\n actor_clip_frac += curr_actor_clip_frac\n\n if (shuffle_actor):\n np.random.shuffle(exp_idx)\n\n total_batches = mini_batches * self.epochs\n critic_loss /= total_batches\n actor_loss /= total_batches\n actor_clip_frac /= total_batches\n\n critic_loss = MPIUtil.reduce_avg(critic_loss)\n actor_loss = MPIUtil.reduce_avg(actor_loss)\n actor_clip_frac = MPIUtil.reduce_avg(actor_clip_frac)\n\n critic_stepsize = self.critic_solver.get_stepsize()\n actor_stepsize = self.update_actor_stepsize(actor_clip_frac)\n\n self.logger.log_tabular('Critic_Loss', critic_loss)\n self.logger.log_tabular('Critic_Stepsize', critic_stepsize)\n self.logger.log_tabular('Actor_Loss', actor_loss) \n self.logger.log_tabular('Actor_Stepsize', actor_stepsize)\n self.logger.log_tabular('Clip_Frac', actor_clip_frac)\n self.logger.log_tabular('Adv_Mean', adv_mean)\n self.logger.log_tabular('Adv_Std', adv_std)\n\n self.replay_buffer.clear()\n\n return\n\n def _get_iters_per_update(self):\n return 1\n\n def _valid_train_step(self):\n samples = self.replay_buffer.get_current_size()\n exp_samples = self.replay_buffer.count_filtered(self.EXP_ACTION_FLAG)\n global_sample_count = int(MPIUtil.reduce_sum(samples))\n global_exp_min = int(MPIUtil.reduce_min(exp_samples))\n return (global_sample_count > self.batch_size) and (global_exp_min > 0)\n\n def _compute_batch_vals(self, start_idx, end_idx):\n states = self.replay_buffer.get_all(\"states\")[start_idx:end_idx]\n goals = self.replay_buffer.get_all(\"goals\")[start_idx:end_idx] if self.has_goal() else None\n \n idx = np.array(list(range(start_idx, end_idx))) \n is_end = self.replay_buffer.is_path_end(idx)\n is_fail = self.replay_buffer.check_terminal_flag(idx, Env.Terminate.Fail)\n is_succ = self.replay_buffer.check_terminal_flag(idx, Env.Terminate.Succ)\n is_fail = np.logical_and(is_end, is_fail) \n is_succ = np.logical_and(is_end, is_succ) \n\n vals = self._eval_critic(states, goals)\n vals[is_fail] = self.val_fail\n vals[is_succ] = self.val_succ\n\n return vals\n\n def _compute_batch_new_vals(self, start_idx, end_idx, val_buffer):\n rewards = self.replay_buffer.get_all(\"rewards\")[start_idx:end_idx]\n\n if self.discount == 0:\n new_vals = rewards.copy()\n else:\n new_vals = np.zeros_like(val_buffer)\n\n curr_idx = start_idx\n while curr_idx < end_idx:\n idx0 = curr_idx - start_idx\n idx1 = self.replay_buffer.get_path_end(curr_idx) - start_idx\n r = rewards[idx0:idx1]\n v = val_buffer[idx0:(idx1 + 1)]\n\n new_vals[idx0:idx1] = RLUtil.compute_return(r, self.discount, self.td_lambda, v)\n curr_idx = idx1 + start_idx + 1\n \n return new_vals\n\n def _update_critic(self, s, g, tar_vals):\n feed = {\n self.s_tf: s,\n self.g_tf: g,\n self.tar_val_tf: tar_vals\n }\n\n loss, grads = self.sess.run([self.critic_loss_tf, self.critic_grad_tf], feed)\n self.critic_solver.update(grads)\n return loss\n \n def _update_actor(self, s, g, a, logp, adv):\n feed = {\n self.s_tf: s,\n self.g_tf: g,\n self.a_tf: a,\n self.adv_tf: adv,\n self.old_logp_tf: logp\n }\n\n loss, grads, clip_frac = self.sess.run([self.actor_loss_tf, self.actor_grad_tf,\n self.clip_frac_tf], feed)\n self.actor_solver.update(grads)\n\n return loss, clip_frac\n\n def update_actor_stepsize(self, clip_frac):\n clip_tol = 1.5\n step_scale = 2\n max_stepsize = 1e-2\n min_stepsize = 1e-8\n warmup_iters = 5\n\n actor_stepsize = self.actor_solver.get_stepsize()\n if (self.tar_clip_frac >= 0 and self.iter > warmup_iters):\n min_clip = self.tar_clip_frac / clip_tol\n max_clip = self.tar_clip_frac * clip_tol\n under_tol = clip_frac < min_clip\n over_tol = clip_frac > max_clip\n\n if (over_tol or under_tol):\n if (over_tol):\n actor_stepsize *= self.actor_stepsize_decay\n else:\n actor_stepsize /= self.actor_stepsize_decay\n\n actor_stepsize = np.clip(actor_stepsize, min_stepsize, max_stepsize)\n self.set_actor_stepsize(actor_stepsize)\n\n return actor_stepsize\n\n def set_actor_stepsize(self, stepsize):\n feed = {\n self._actor_stepsize_ph: stepsize,\n }\n self.sess.run(self._actor_stepsize_update_op, feed)\n return\n",
"#add parent dir to find package. Only needed for source code build, pip install doesn't need it.\nimport os, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(os.path.dirname(currentdir))\nos.sys.path.insert(0,parentdir)\n\nimport gym\nimport numpy as np\nimport pybullet_envs\nimport time\n\ndef relu(x):\n return np.maximum(x, 0)\n\nclass SmallReactivePolicy:\n \"Simple multi-layer perceptron policy, no internal state\"\n def __init__(self, observation_space, action_space):\n assert weights_dense1_w.shape == (observation_space.shape[0], 256)\n assert weights_dense2_w.shape == (256, 128)\n assert weights_final_w.shape == (128, action_space.shape[0])\n\n def act(self, ob):\n ob[0] += -1.4 + 0.8\n x = ob\n x = relu(np.dot(x, weights_dense1_w) + weights_dense1_b)\n x = relu(np.dot(x, weights_dense2_w) + weights_dense2_b)\n x = np.dot(x, weights_final_w) + weights_final_b\n return x\n\ndef main():\n env = gym.make(\"HumanoidBulletEnv-v0\")\n env.render(mode=\"human\")\n pi = SmallReactivePolicy(env.observation_space, env.action_space)\n env.reset()\n while 1:\n frame = 0\n score = 0\n restart_delay = 0\n obs = env.reset()\n \n while 1:\n a = pi.act(obs)\n obs, r, done, _ = env.step(a)\n score += r\n frame += 1\n time.sleep(1./60.)\n\n still_open = env.render(\"human\")\n if still_open==False:\n return\n if not done: continue\n if restart_delay==0:\n print(\"score=%0.2f in %i frames\" % (score, frame))\n restart_delay = 60*2 # 2 sec at 60 fps\n else:\n restart_delay -= 1\n if restart_delay==0: break\n\nweights_dense1_w = np.array([\n[ +0.2289, +0.2584, +0.2595, +0.0173, +0.1293, -0.2980, +0.1410, +0.0982, +0.0216, +0.3724, +0.2204, -0.0734, -0.2420, -0.3443, -0.2738, -0.3825, +0.1504, -0.0930, -0.2680, +0.0685, +0.1592, +0.2534, -0.0787, -0.0426, +0.2591, +0.2134, -0.1631, +0.5168, +0.1444, +0.2736, +0.3623, -0.3472, +0.0393, -0.3056, +0.3850, -0.5231, +0.4511, +0.4223, -0.0905, +0.2265, +0.1662, +0.1092, +0.0426, -0.0209, +0.3260, -0.1788, +0.5045, -0.0254, +0.6684, +0.4659, +0.2193, +0.6121, -0.1771, +0.3024, +0.3233, -0.3380, +0.1834, +0.1947, +0.2840, -0.0212, +0.0610, +0.0254, -0.0687, +0.3342, -0.2010, +0.4851, -0.5739, -0.3228, -0.2242, +0.6149, +0.2704, -0.1006, +0.3950, -0.2684, +0.0090, -0.2419, +0.1112, -0.0795, +0.0021, -0.0317, +0.1345, -0.2847, +0.2323, +0.5374, -0.0119, -0.2098, +0.2074, +0.1693, +0.4537, -0.1453, +0.2661, -0.2997, +0.1043, -0.2340, +0.4472, -0.0415, +0.6437, +0.0279, +0.1609, +0.3353, -0.2240, -0.5433, -0.0053, +0.1863, +0.1038, +0.3337, +0.3889, +0.4159, -0.0836, -0.0826, +0.0872, -0.1362, +0.0061, -0.2982, -0.0074, -0.1452, -0.0655, -0.1369, -0.0493, -0.1082, -0.4080, +0.4732, +0.1229, +0.3087, -0.1222, +0.3846, +0.0719, -0.4536, +0.1202, -0.3903, -0.0445, +0.4052, +0.2922, +0.1095, +0.0317, +0.0974, +0.1149, -0.3794, +0.4364, -0.1597, +0.2889, +0.2431, +0.6867, +0.5117, -0.2517, +0.2835, -0.0365, +0.3303, +0.2569, +0.3636, -0.0945, +0.1853, -0.1341, +0.2595, -0.0174, -0.3227, +0.1999, +0.2348, +0.5826, -0.0480, +0.1449, +0.3866, +0.4740, +0.2486, +0.5011, -0.0334, +0.0953, +0.1072, -0.0901, -0.3235, +0.3258, +0.3003, +0.1229, -0.2257, -0.1920, -0.2025, -0.2022, +0.0843, +0.4563, -0.2654, +0.3158, -0.3102, +0.1105, -0.1171, +0.0465, -0.0692, +0.2824, +0.2598, -0.0289, -0.7774, +0.3501, -0.0635, +0.2257, -0.1644, +0.4091, -0.1967, +0.1614, +0.5468, +0.1365, +0.5054, +0.1986, +0.1368, +0.0886, -0.1305, +0.4297, +0.2482, -0.1914, -0.2909, +0.1784, +0.0690, +0.0224, +0.0378, +0.4991, -0.3410, +0.4281, -0.4574, +0.1952, -0.0444, -0.1939, +0.1637, +0.2385, +0.1354, +0.1817, -0.3849, +0.1909, +0.6585, +0.0997, +0.1376, +0.3598, +0.0611, +0.0124, -0.1492, -0.0659, -0.2671, -0.3925, +0.2333, +0.4184, -0.1364, -0.0361, -0.5937, -0.0327, -0.1142, -0.3334, +0.2963, +0.2350, -0.2600, +0.2784, -0.0720, -0.1594, -0.2652, +0.3922, +0.7291, -0.1830, +0.2253, +0.1965, +0.1972],\n[ +0.2244, +0.6008, -0.2907, -0.1769, -0.1935, +0.0902, +0.3499, +0.0213, +0.1343, -0.4925, -0.0996, -0.2332, -0.0739, -0.3686, -0.8946, +0.0956, +0.0229, -0.1700, -0.1541, -0.1738, -0.2712, +0.2399, -0.9785, +0.0332, -0.1472, +0.2317, -0.2498, -0.5549, +0.2247, +0.1835, +0.3699, +0.4326, +1.0277, +0.5037, +0.1917, -0.0519, +0.6952, +0.0699, +0.5892, -0.2437, +0.4122, +0.8816, +0.1263, -0.2072, +0.7932, +0.1292, -0.0770, +0.0025, +0.5216, -0.0476, +0.1862, +0.5225, -0.1914, +0.2424, +0.9420, +0.3432, +0.0285, +0.1507, +0.1983, -0.3111, -0.2958, -0.3750, -0.3894, +0.4764, -0.0933, -0.1671, -0.3327, -0.1734, +0.3197, -0.2884, +0.0234, -0.3527, -0.4019, +0.4847, +0.8950, +0.1055, +0.1383, +0.2209, -0.0691, +0.6060, +0.3265, -0.0979, -0.2110, +0.6802, -0.4336, -0.6381, -0.1507, +0.4082, -0.1635, -0.0835, -0.0082, +0.5745, -0.2450, +0.7778, -0.2730, -0.6112, -0.0839, -0.0785, -0.6745, -0.1420, +0.4217, +0.3215, -0.2859, +0.3225, +0.0936, +0.0283, -0.1876, +0.4980, -0.4691, -0.0344, +0.1162, +0.1886, -0.1320, +0.4492, +0.0019, -0.0631, +0.2038, -0.3549, +0.2250, -0.2285, -0.0618, -0.0311, +0.7220, +0.0530, -0.3637, +0.2023, -0.3015, +0.1247, +0.2858, -0.2926, +0.2305, +0.2896, +0.1855, -0.3343, -0.1031, -0.3579, -0.6165, -0.3269, +0.0746, +0.2497, -0.3880, -0.5785, -0.7582, -0.1729, -0.3449, -0.1357, -0.5979, -0.7973, +0.1202, +0.6009, -0.0103, -0.0233, -0.0987, +0.4404, +0.4355, -0.0934, -0.0910, -0.0382, -0.0268, +0.0425, +0.0329, +0.7613, +0.1151, +0.1962, +0.3848, +0.6449, +0.0600, +0.8192, +0.2580, +0.4444, -0.5772, -0.1268, -0.0429, +0.0785, +0.1237, +0.5161, -0.3665, +0.0825, +0.1226, +0.4157, +0.4844, -0.5870, -0.6568, +0.1661, +0.0846, +0.9718, +0.8856, +0.4171, -0.4568, -0.0714, +0.0394, -0.1495, +0.1462, -0.1572, +1.3937, +0.1682, +0.4968, -0.3699, +0.0710, +0.2328, +0.4747, -0.4286, +0.4434, -0.0531, +0.8446, +0.0101, -0.4317, -0.2297, +0.4299, -0.1323, +0.4804, -0.2152, +0.0161, +0.0560, -0.3013, +0.2911, +0.3542, +0.3124, +0.3897, +0.1082, +0.2437, +0.0183, +0.2230, +0.0093, +0.1507, -0.3895, -0.2750, +0.0991, +0.1170, -0.5877, +0.4045, +1.0306, -0.1141, -0.0084, +0.3079, +0.4545, +0.0084, +0.1517, -0.0344, +0.4704, -0.2666, -0.0728, -0.0447, +0.4098, -0.4524, -0.4638, -0.4063, -0.2521, -0.2830, +0.1845, -0.3146, +0.4381, -0.0215, +0.2613, -0.1182, +0.4527],\n[ +0.1845, -0.1290, +0.0236, -0.1312, +0.0155, -0.6011, -0.0454, +0.0183, -0.0613, -0.1651, +0.0204, -0.2374, +0.1045, -0.2035, -0.2268, +0.2069, +0.0483, -0.3226, -0.2196, +0.0847, +0.1314, +0.0426, -0.4253, -0.0748, -0.1497, -0.1902, +0.3815, +0.1306, +0.0276, -0.2593, -0.0081, +0.1098, -0.0062, -0.1922, -0.0409, -0.2615, +0.1296, +0.0267, -0.1602, -0.4755, +0.0039, +0.2688, -0.0225, -0.1433, -0.0383, -0.0131, +0.0675, +0.1684, +0.1298, +0.3818, -0.0260, +0.1636, -0.2338, +0.0062, +0.1756, -0.1825, +0.1473, -0.2689, -0.1376, -0.0224, +0.2016, -0.2086, +0.0723, +0.2100, -0.3345, +0.1170, -0.4292, -0.1302, -0.1132, +0.0030, -0.3599, -0.1974, -0.4807, +0.0184, -0.0768, -0.0310, -0.2677, -0.0838, -0.0072, -0.1049, -0.2841, -0.2426, +0.2338, +0.0917, -0.1451, -0.3906, -0.0315, +0.1058, -0.0429, +0.1218, +0.0590, +0.0446, -0.0043, -0.0168, -0.0899, -0.3793, -0.3134, +0.0907, -0.5332, -0.0995, -0.0256, -0.2710, -0.0487, -0.0059, +0.0274, -0.1885, +0.3208, +0.0437, -0.0411, -0.3716, -0.5700, +0.0576, +0.0903, -0.1064, -0.1600, +0.1009, -0.1957, -0.0539, -0.2426, -0.5847, -0.2240, +0.0023, -0.2533, -0.2903, -0.0328, +0.1289, +0.0927, -0.2596, +0.2300, -0.4833, -0.1772, +0.3817, -0.1000, -0.2391, -0.2917, -0.3748, +0.0640, +0.0005, +0.1664, +0.0173, +0.2214, -0.2440, +0.0039, +0.2924, +0.3009, +0.0540, -0.3460, +0.1538, +0.3727, -0.0801, +0.0130, -0.0963, -0.0172, +0.0524, -0.1681, +0.1478, -0.1827, +0.2902, +0.2376, +0.0703, -0.3115, -0.0634, +0.0512, +0.1366, +0.0646, -0.0811, -0.1668, +0.1052, -0.1976, -0.1411, -0.1401, +0.0263, -0.0454, -0.0315, -0.1376, +0.0403, +0.1526, -0.0385, +0.0782, -0.1382, +0.0399, -0.1812, -0.3915, -0.1615, -0.0648, -0.3724, +0.1436, -0.1380, -0.4987, -0.1783, +0.2295, -0.4115, -0.2025, -0.3245, -0.3584, -0.1729, -0.0822, -0.0460, -0.1321, -0.3484, -0.4094, -0.1418, +0.0986, -0.2276, -0.0267, -0.1027, -0.1794, -0.1338, -0.2575, -0.2837, -0.0241, -0.2849, -0.2569, -0.0871, -0.0534, +0.1427, +0.1857, -0.1384, -0.3600, -0.1343, -0.0075, +0.1601, +0.1113, -0.1131, -0.1716, -0.2434, +0.1357, -0.1294, -0.2366, -0.0562, -0.1674, -0.0974, -0.1556, -0.5273, -0.1928, -0.0431, +0.1909, +0.0233, +0.2048, -0.2176, -0.1908, +0.0150, +0.1610, +0.0468, -0.1319, +0.0579, +0.4051, -0.2020, +0.0208, -0.5383, +0.1756, +0.0117, -0.2675, -0.1795, -0.1730, -0.1394],\n[ -0.5736, -0.8805, -0.0769, -0.0851, -0.5427, +0.1977, +0.0607, -0.3635, +0.5918, +0.1243, -0.0683, -0.5963, +0.2201, -0.1754, -0.1193, +0.2689, +0.2383, -0.1014, +0.2498, +0.0947, -0.3494, -0.0848, -0.3292, +0.0194, -0.1043, +0.4501, +0.5483, -0.0839, +0.2682, +0.5032, -0.0208, -0.0950, +0.2171, +0.2045, -0.3694, +0.3404, -0.0883, +0.2092, -0.2164, -0.1036, +0.2583, -0.0949, +0.0715, -0.3988, +0.0751, -0.1982, +0.5441, +0.0172, +0.3297, -0.6622, -0.1357, -0.5829, -0.2161, -0.6473, -0.0565, +0.6117, -0.0156, +0.6255, -0.1497, -0.1722, +0.1335, +0.6251, +0.3700, -0.5719, -0.2368, -0.0315, +0.0146, -0.8732, -0.2498, -0.1137, +0.2604, -0.1385, -0.8775, -0.5170, -0.2435, -0.3753, +0.2906, +0.0193, -0.5174, -0.3639, -0.2548, -0.5402, -0.8794, -0.5529, -0.0559, -0.1246, -0.0725, -0.0145, -0.7285, -0.0017, -0.1507, +0.3688, -0.1245, -0.3651, +0.3866, -0.1138, -0.0853, +0.0368, -0.4360, -0.1958, -0.1419, +0.1774, +0.0723, -0.3591, -0.4659, +0.3450, +0.3742, -0.1436, +0.0044, +0.2917, -0.5689, -0.5904, +0.1288, -0.4701, -0.2539, -0.6716, +0.2295, -0.4429, -0.0556, -0.0518, +0.2292, -1.7909, +0.1799, -0.1646, +0.3310, +0.0519, -0.1858, +0.0612, +0.0647, +0.1269, +0.1987, -0.0585, -0.2811, -0.8582, -0.6569, -0.3871, +0.1939, +0.1120, -0.0105, -0.3577, -0.0086, -0.2489, +0.4663, -0.1103, +0.0332, -0.6252, -0.2411, -0.0892, -0.4744, +0.1257, +0.1445, +0.1788, +0.0429, -0.2699, +0.4812, +0.4112, +0.2460, -0.0158, -0.6195, -0.7866, +0.7380, -0.1607, -0.9005, -0.3402, +0.1250, +0.0292, -0.5294, +0.2517, -0.1519, +0.6130, -0.3528, -0.4301, -0.2510, +0.5858, +0.0060, +0.0751, +0.0733, +0.2363, -0.6337, -0.0453, -0.3818, -0.0374, -0.0048, -0.4378, -0.0780, -0.1101, +0.1504, +0.4377, -0.3238, +0.2260, -0.4677, +0.1361, -0.0218, +0.2108, -0.0987, +0.3155, +0.6500, +0.2126, -0.2016, +0.3768, +0.6421, +0.2673, +0.1952, +0.0513, -0.0657, +0.2197, +0.2465, +0.2605, +0.3151, +0.0719, -0.6572, +0.4819, +0.2985, -0.1793, -0.1759, -0.3330, -0.5562, +0.1846, -0.1096, -0.5457, -0.6485, -0.4409, -0.4658, -0.0819, +0.1681, -0.3892, +0.4901, -0.3008, -0.7256, +0.1596, +0.0896, -0.3508, +0.4520, -0.5112, -0.3458, -0.6592, -0.9615, +0.1979, +0.2483, +0.1385, -0.0924, -0.2448, +0.4041, +0.5250, +0.1655, -0.5895, -0.4537, +0.3295, -0.4612, -0.1340, -0.5730, -0.2680, +0.4814, +0.0250, -1.0258, +0.1863],\n[ -1.0573, +0.3035, -1.0110, +0.1281, -0.5940, -0.0072, +0.4667, +0.7137, +0.0810, -0.8921, -0.1219, -1.0541, -0.7295, +0.7648, +0.1772, -0.1785, -1.0871, -0.1349, +0.3227, +0.6328, -0.8310, +0.8725, -0.4619, -0.3077, +0.8552, -0.3231, -0.1156, -0.5372, -0.4023, +0.8194, -0.8025, -0.5804, +0.5964, -0.0932, +0.5116, -0.2766, +0.1760, -0.1303, +0.6465, -0.0711, -0.1220, -0.5499, +0.1202, +0.1071, +0.2686, -0.1856, -0.2504, +0.0925, -0.4784, +0.9105, -1.1430, -0.5899, -0.1242, +0.5508, +0.7145, +0.2748, -0.3478, -0.7003, +0.4850, +0.1385, +0.3943, +0.2670, -0.4550, +0.0036, -0.5703, -0.8350, -1.1953, -0.0970, +0.3308, +0.7714, +0.1061, +2.0960, +0.0376, -0.7406, +0.0789, +1.5258, +0.9057, +0.4235, -0.5466, +0.1064, +0.2408, +0.7252, -0.2936, +0.4144, -0.3486, -0.7981, +0.0240, -0.1555, +0.9355, -0.4706, -0.7375, +0.9309, +0.7671, -0.0113, -0.2764, -0.0366, +0.2126, +0.6469, -0.4462, -0.2112, +0.6839, +0.4796, -0.1490, +0.8926, -0.2453, +0.0598, -0.0021, +0.3849, +0.4954, -0.1375, -0.1142, +0.8535, +0.8888, -0.3101, +0.7679, -0.5564, -0.2071, -0.3134, -0.0526, -0.1788, +0.3544, +0.6677, +0.3217, -0.6103, -0.0902, +0.3894, +0.8153, -0.5409, -0.0261, +0.7648, +0.3098, +0.5138, -0.1609, +0.3192, +0.4370, -0.1330, -0.0368, +0.8144, -0.1377, +0.9899, +0.2202, +0.5290, +0.4051, +0.0875, +0.4018, -0.0897, +0.4689, +0.1784, +1.2015, -0.2091, +0.3738, +0.7411, -0.1037, -0.2531, +0.3753, +0.1518, -0.1351, +0.3109, +0.2514, +0.2564, -0.2295, +0.5837, +0.1827, -0.1766, +0.1354, -0.0895, +0.8237, +0.4432, -0.3878, -0.0831, +0.7593, -0.9360, -0.4304, +0.0854, -0.9559, +0.1652, +0.2593, +0.3457, -0.5038, -0.1274, +0.4108, -0.0822, -0.1254, +0.4618, -0.0763, -0.4831, -0.4356, +0.5051, -0.4981, +0.2556, -0.1951, +0.5189, +0.0342, +0.2521, +0.1616, -0.0889, -0.0898, +0.3080, +0.2350, -0.2451, +0.2174, +0.3621, +0.7812, +0.8774, +0.7318, +0.1353, +0.0450, -0.8271, +0.5002, -0.7626, +1.3003, +0.4312, -0.6138, -0.1886, -0.2482, -0.5597, -0.0913, +0.0901, -0.3340, +0.4610, -0.7099, +0.3496, +0.3531, +0.2044, -0.1057, +0.8731, -0.6409, +0.5075, +1.0426, -0.7513, -0.3445, +0.1368, -0.0996, +0.4420, +0.2574, +0.2526, +0.2479, +1.3376, -0.0922, +0.2117, -0.1829, -0.0796, +1.4092, -0.2412, +0.0230, +0.3997, -0.3151, +0.0822, -0.7801, -0.5236, +0.0178, -0.9243, -0.5063, +0.5279, -0.2153, +0.6347],\n[ +1.4703, +1.2982, -0.0402, -0.2425, +0.2475, -0.0634, +0.4494, -0.7865, -1.3634, -0.1417, -0.2296, -0.4785, -0.1649, -0.8603, +0.2584, +0.1614, -0.3570, -0.9330, +0.6203, -0.0622, +0.7080, +0.0465, +0.0007, -0.3840, -0.0692, +0.8238, -0.9584, +0.8456, -0.8160, +0.4813, +0.3511, -0.0667, +0.7182, -0.5514, +0.3622, -2.3828, +0.6020, +0.0268, +0.3220, -0.4798, -0.1588, -0.6060, +0.0455, -0.1433, -0.5643, -0.4139, +1.0605, -0.3742, +2.4072, +1.2664, +0.1790, +0.8069, -0.1647, +0.2396, +0.6662, -1.2914, -0.2594, +0.4644, +0.3116, -0.1614, -0.7359, -0.9930, -0.4012, +0.1121, -1.4436, -0.0692, -0.3936, -0.4506, -0.8123, +1.3841, +1.2088, +0.9410, +0.1766, -0.7999, +0.2004, +0.2158, -0.4015, -0.0484, -0.3474, +0.0076, -0.7156, -0.5177, +0.8020, -0.1543, +0.0434, -0.2892, +0.1293, +0.5121, +1.2537, -0.1804, +0.2232, -0.3681, -0.4471, -0.5221, +0.0472, -0.4809, +0.5478, -0.3337, +0.0365, -0.6143, +0.7588, -0.4842, +0.1022, +0.4930, +0.5103, +0.7343, +1.5783, +0.8545, -0.9636, -0.3476, +0.5063, +0.0514, +0.3894, -0.8884, +0.0449, +0.5949, -0.2352, +0.4529, +0.2948, +0.0390, -0.7291, +0.1560, +0.0583, -0.0293, +0.0597, +0.1500, +0.7947, -1.3192, -0.1611, -1.8176, +0.9184, -0.4718, +0.7685, -0.1878, -0.3281, -0.4007, -1.2232, -1.0534, +0.7252, -0.6923, +0.3817, +0.7918, +0.4028, +0.9145, -0.0126, +0.1591, -0.2679, +1.3580, +0.0393, +0.1672, -0.4754, +1.0527, -1.0403, +0.6002, +0.1479, +0.5369, -0.1762, +0.2856, +0.3766, +0.0768, -0.3013, +1.0575, +0.6036, +0.1491, +0.6333, -0.2535, +0.1049, +1.1164, -0.3730, -1.5166, +0.5125, +0.1594, +1.6473, -0.6655, +0.4091, +0.3638, -0.5117, -0.2984, +0.3496, -0.3595, -0.3811, -0.7337, -0.0616, +0.0449, -0.2416, -0.0535, -1.3687, +0.3001, +0.0840, -1.5971, +0.7260, -0.0056, +0.3636, -0.3623, +0.2754, -0.6047, -1.2724, +0.5213, -0.0817, +1.6422, -0.2038, +0.5006, +0.7695, +0.9680, +0.4044, +0.9807, -0.1294, -0.6531, -0.7634, +0.1569, +1.2350, +1.2254, +0.7556, -0.2483, +0.0008, +0.2024, +1.1261, +0.0476, -0.1030, +0.8195, -0.2271, +0.2323, +0.3329, -0.5824, -0.1342, +1.5237, +1.2337, -0.8420, -0.3239, -0.1192, -0.3712, -0.4645, -1.4312, -0.8121, -1.2723, -0.3935, -0.0198, -0.0735, +0.7791, -1.3631, -0.8079, +0.0497, -1.1443, +0.1149, -1.0024, -0.7783, +0.5146, -0.0900, +0.2533, -0.3197, +0.8769, +0.6185, -0.4478, -0.2109, +0.9331, +0.1720],\n[ +0.1914, +0.2367, -0.1629, -0.2121, -0.4821, +0.2042, -0.6208, -0.6690, -0.5308, +0.2751, +0.0058, +0.7246, +0.5435, -0.0560, -0.5556, -0.0165, +0.6507, +0.2775, +0.0431, -0.1270, +0.5966, +0.0977, +0.2673, -0.3136, -0.6983, +0.5473, -0.4528, -0.0306, +0.1088, -0.0717, +0.8475, +1.1961, +0.2308, -0.5281, -0.0913, +0.1937, +0.9934, -0.0981, -0.0031, +0.0337, +0.2783, -0.9124, +0.4286, -0.3317, +0.1689, +0.1181, +0.3386, +0.0006, -0.3645, -0.7711, -0.0095, +0.3991, -0.1333, -0.6688, +0.4254, +0.0559, +0.2671, +0.5579, -0.9046, -0.2728, -0.4878, -0.5582, +0.2866, -0.2315, +0.2442, +0.7093, +0.4266, +0.2287, +0.1558, -0.3636, +0.5561, -1.4425, -0.1192, +0.6250, -0.3334, -0.3746, -0.1556, -0.0636, +0.1641, +0.2846, +0.2501, -0.3944, -0.4040, -0.1150, -0.6399, +0.1609, +0.5772, +0.5262, -0.0361, +0.7536, +0.2497, -0.7278, +0.1851, -0.0259, -0.7170, -0.3231, -0.2127, -0.1993, -0.2452, +0.3888, +0.2922, -0.4279, -0.3269, +0.0756, +0.1988, -0.1777, -0.0172, -0.0312, -0.1341, +0.0269, +0.1057, -0.1237, -0.6615, +0.2952, +0.0767, -0.0778, -0.5447, +0.0285, +0.6785, -0.1794, +0.0174, -0.1047, -0.1765, +0.5859, -0.0837, -0.5167, -0.4280, +0.1787, +0.1762, -0.3390, +0.6731, +0.3160, +1.4737, -0.6632, +0.2942, -0.0664, -0.2121, -0.3356, -0.5989, +0.4079, -0.2348, +0.1857, -0.9283, +0.1377, -0.0273, +0.2580, +0.9982, -0.2876, -0.4685, +0.2032, +0.2644, -0.0163, -0.6819, -0.9446, -1.0937, +0.3750, -0.1436, -0.7771, +0.1031, -0.7357, -0.2943, +0.1850, -0.7978, -0.0747, +0.1661, +1.1391, -0.7675, -0.0677, +0.3038, +0.0354, -0.4304, +0.3333, +0.1617, +0.3656, +1.2621, -0.3526, +0.0040, +0.2239, +0.4336, -0.3180, +0.1273, +0.0488, -0.3459, -0.8226, +0.1833, +0.2386, -0.0214, +0.4261, -0.1491, +0.0678, -0.3771, -0.3311, +0.1703, +0.2722, +0.1684, +0.0372, -0.3637, -0.0246, -0.1785, +0.1548, -0.7042, -0.2042, -0.3849, +0.1776, -0.0536, -0.1493, -0.6161, +0.1739, -0.1201, +0.2798, +0.4395, -0.3635, +0.2738, +0.3116, -0.3052, +0.5248, +0.4516, -0.4132, -0.0536, -0.2435, +0.3823, +0.0218, +0.8631, -0.2524, +0.4060, -0.9034, +0.8162, -0.0182, -0.6250, +1.1502, +0.3362, +0.2115, -0.3169, -0.6787, -0.3379, +0.1962, -0.0973, -0.4042, -0.7209, +0.4764, -0.3201, -0.6573, -0.3488, -0.4370, -0.4269, +0.1515, +0.6850, -0.0542, +0.4701, +0.4224, -0.1287, +0.2937, +0.3044, +0.0041, +0.2738, -0.7350],\n[ -0.5305, -1.0649, -0.0954, -0.2799, -0.3787, +0.9727, -0.0627, -0.2684, +0.5308, -0.2507, -0.2276, -0.2753, +0.7152, -0.1676, -0.6453, +0.2848, -0.5792, -0.4101, +0.4678, +0.5229, -0.5333, +0.2654, -0.1755, -0.2015, -0.1202, +0.7424, -0.0436, -0.7146, -0.3689, +0.3063, -0.0962, -0.5648, +0.3302, +0.8726, +0.4133, +0.3269, -0.3124, -0.3711, +0.0620, +0.1089, +0.7904, -0.6871, +0.1509, -0.6424, +0.6935, +0.1535, -0.2352, +0.2431, +0.2551, -0.9536, -0.6756, -0.5438, +0.5175, +0.0297, +0.3081, +0.7367, -0.5743, +0.5190, -0.6644, -0.2655, -0.8614, +0.5008, +0.1180, -0.6145, +0.0548, -0.0824, +0.1499, -0.4286, -0.2922, +0.1617, +0.4914, -0.1181, -0.0810, -0.5932, -0.1382, +0.2021, -0.2492, +0.4315, -0.3313, -0.7047, -0.1362, -0.6261, -0.8227, -0.2876, -0.3360, -0.0240, +0.4826, -0.3108, -1.4351, -0.0640, -0.0199, +0.5721, +0.2496, -0.3540, -0.6179, -0.1694, +0.3353, -0.1535, -0.2695, +0.0006, -0.0797, +0.6059, -0.0136, -0.1401, -0.1369, +0.5776, -0.3561, -0.7428, +0.1167, -0.2765, +0.2429, -0.5045, +0.2792, -0.3781, +0.1033, -0.2574, -0.1043, -0.2264, +0.4170, +0.4963, +0.1617, -1.0455, +0.0243, +0.7325, +0.1549, +0.2681, -0.0576, +0.7945, -0.1418, +0.9368, -0.4814, -0.5513, -0.3667, -0.9507, -0.5204, +0.1824, -0.0299, -0.3973, -0.1318, +0.1508, +0.6482, -0.3319, +0.3144, -0.0892, -0.4015, -0.1038, -0.0171, -0.1544, +0.0384, +0.0638, +0.1878, +0.4846, +0.3388, -0.2064, +0.0282, +0.0187, -0.2866, +0.2655, +0.1010, -0.3126, +0.2875, -0.2012, -0.5520, -0.7519, -0.0311, -0.4679, -0.2289, -0.1862, -0.6687, -0.0894, -0.4530, -0.4726, +0.2286, +0.4884, +0.2238, -0.4041, +0.4382, +0.0763, +0.2040, +0.1290, -0.1270, -0.3557, -0.3726, +0.2727, +0.1078, -0.3465, -0.1650, +0.1213, +0.1463, +0.4907, -0.6272, +0.2849, -0.1256, -0.0692, -0.1280, +0.6426, +0.8455, -0.2999, +0.2388, +0.3314, +0.0211, +0.1282, +0.7080, +0.2266, -0.4417, -0.5596, +0.0456, +0.0472, -0.0761, +0.3607, +0.0925, +0.5921, +0.5351, -0.1387, -0.0664, +0.0244, +0.0458, -0.1461, +0.4734, -0.3399, +0.1727, -0.3891, -0.2727, +0.1968, +0.3090, -0.2189, +0.0718, -0.4483, -0.5429, -0.1845, -0.6284, +0.1738, +0.0977, +0.1842, +0.1739, -0.5167, -1.2017, +0.1491, +0.6558, +0.4300, +0.8097, +0.0739, -0.3018, -0.1147, +0.2953, +0.2045, -0.2690, -0.2379, -0.3909, -0.1637, -1.1258, -0.3999, +0.3399, +0.0625, -1.1293, +0.3265],\n[ +0.2985, -0.6516, +0.4775, -0.0140, -0.4299, -0.1279, -0.3789, -0.8213, -0.0525, +0.0010, +0.0503, -0.2211, +0.0995, +0.1976, -0.4291, +0.0379, +0.2930, +0.3705, -0.2469, -0.0770, +0.4237, +0.4334, +0.2506, -0.2329, -0.4291, +0.1146, +0.0874, +0.3548, +0.0268, +0.0869, +0.3078, +0.0402, -0.4679, +0.8574, -0.3867, +0.0835, +0.6660, -0.4280, -0.6492, -0.1042, -0.6310, -0.0259, -0.0944, +0.1484, +0.0317, -0.0098, -0.0450, +0.5161, +0.0257, +0.0005, -0.0494, +0.0465, -0.2364, +0.1916, +0.5391, +0.0028, +0.9037, -0.0930, +0.2142, +0.1406, -0.1165, -0.0293, +0.3984, -0.4787, -0.0384, +0.1164, +0.1826, -0.3080, +0.0997, +0.6248, -0.1437, -0.1716, -0.3102, -0.0448, -0.3595, +0.9930, -0.2071, +0.9864, +0.3101, +0.1298, -0.1277, -0.2569, -0.3749, -0.5782, -0.2509, +0.3860, +0.4451, +0.1810, -0.1763, +0.1142, -0.1178, +0.6319, +0.5940, +0.3328, -0.0479, -0.4712, -0.1765, -0.1778, -0.2369, +0.2725, -0.2929, -0.2874, -0.1177, +1.2907, +0.3847, -0.2287, +0.2036, +0.0735, +0.3001, -0.2437, +0.2036, -0.3419, +0.4514, +0.2280, -0.2737, +0.3508, -0.1208, -0.4684, +0.0684, +0.0490, +0.3631, -0.3185, +0.0020, -0.1861, -0.0268, -0.1274, -0.0160, -0.1210, +0.1408, +0.4183, -0.3518, -0.4569, +0.1282, -0.2319, +0.2512, -0.3592, +0.0540, -0.6120, -0.1856, -0.0308, +0.1304, -0.3050, +0.4704, +0.3629, +0.4371, -0.0915, -0.2140, -0.1166, +0.2194, +0.2926, -0.3646, -0.5791, +0.3207, -0.0810, +0.1578, -0.3067, -0.3441, +0.4426, +0.1421, -0.4309, +0.1017, -0.1754, +0.3775, +0.0048, -0.1489, +0.2823, +0.4104, +0.5312, -0.4843, -0.3423, -0.7578, -0.0118, +0.0645, +0.2197, +0.3624, -0.0137, +0.5216, -0.4339, +0.1736, +0.3054, +0.5684, +0.4813, +0.2408, +0.0112, -0.0637, -0.1112, +0.4575, -0.4203, -0.2375, -0.1869, -0.1914, +0.5073, -0.2876, +0.2371, +0.3740, +0.8589, +0.1326, +0.0220, -0.0639, +0.1505, -0.3579, -0.1986, +0.6998, +0.1042, -0.2976, -0.0343, -0.2863, -0.0873, -0.3928, -0.3351, +0.0212, +0.3023, -0.1197, +0.1894, +0.1328, -0.0621, +0.5685, +0.1133, +0.0837, -0.2829, -0.4764, +0.0455, +0.7453, +0.1416, +0.3608, +0.1513, +0.8440, +0.1792, +0.6390, +0.0493, +0.4155, -0.2832, -0.0157, +0.0372, +0.0957, +0.8445, -0.2723, +0.3604, -0.4921, +0.8836, +0.2896, -0.0577, -0.4621, +0.1412, +0.1202, -0.1868, -0.0938, +0.0032, +0.4614, -0.0066, +0.1586, +0.0146, +0.0698, +0.1539, -0.3187, +0.2030],\n[ +0.1593, +0.0673, -0.2547, -0.4164, -0.7417, -0.0947, -0.0055, -0.3648, +0.1302, +0.0556, +0.3797, -0.0956, -0.3993, +0.4257, +0.0176, +0.1360, +0.1971, +0.2499, -0.1564, -0.1776, -0.3742, +0.1572, +0.1570, -0.1127, -0.4366, -0.0255, -0.0662, -0.2298, +0.5912, +0.0107, -0.5855, +0.8709, +0.0119, -0.4277, +0.4122, -0.0525, +0.1397, -0.3018, +0.6338, -0.1003, -0.1246, +0.1427, -0.1638, -0.2808, +0.1959, +0.7202, +0.1200, -0.2316, +0.1650, +0.2782, +0.4346, +0.0802, -0.2768, +0.4015, +0.4213, -0.2252, +0.2750, -0.4314, +0.0254, +0.3742, -0.3016, -0.2429, -0.6429, +0.3386, -0.0971, +0.1080, -0.2511, +0.2752, +0.6792, +0.0080, +0.2215, +0.0504, -0.7341, -0.1637, +0.0300, +0.3588, -0.3274, +0.1918, +0.3612, +0.5223, +0.1182, +0.6870, -0.3318, -0.2261, +0.2921, -0.4550, +0.1582, +0.2888, -0.2669, +0.1638, -0.6216, +0.6262, -0.6351, +0.4334, +0.0973, -0.1839, +0.0321, -0.5037, -0.2649, -0.0552, -0.1270, -0.2993, +0.6138, +0.4850, +0.3384, -0.5962, -0.6860, +0.0256, -0.2307, -0.2724, -0.1256, +0.6033, +0.2383, -0.0195, -0.0630, -0.4518, -0.3697, +0.8157, -0.5800, +0.7963, -0.3184, +0.0568, -0.3475, -1.0642, +0.7602, +0.5601, -0.0858, +0.3643, -0.4833, -0.2227, +0.5590, -0.0992, -0.6019, -0.1924, -0.1267, -0.0245, -0.5970, -0.4330, +0.1239, -1.0436, -0.3711, -0.3417, -0.1319, +0.4664, +0.2170, -0.1831, +0.2072, -0.1819, -0.1112, +0.5897, +0.4138, -0.1745, +0.0069, +0.5481, -0.2551, -0.1321, +0.3068, +0.1709, -0.3829, +0.3386, +0.0268, -0.1402, +0.2465, +0.6522, -0.6667, +0.5380, +0.8128, +0.0613, -0.6277, +0.2254, +0.0309, -0.1557, +0.0074, +0.0135, -0.5031, +1.1359, +0.0562, -0.6937, +0.1323, +0.5053, +0.0667, -0.5712, -0.3070, +0.7015, +0.2751, -0.2162, -0.2375, -0.5178, +0.6995, -0.2979, +0.2039, -0.2180, +0.6633, +1.0514, +0.7916, +0.4500, +0.3561, -0.2985, -0.7077, +0.0663, -0.6082, -0.4968, +0.0381, -0.3695, -0.1673, -0.1879, +0.3407, -0.1239, -0.4802, -0.1217, +0.5713, +0.3748, +0.0324, -0.2357, +0.1038, +0.2317, -0.2465, -0.1541, +0.0306, -0.1631, -0.6562, -0.1803, -0.1607, -0.1324, +0.2002, +0.0519, +0.1840, +0.2045, +0.1503, -0.2435, -0.1554, -0.1546, +0.1199, -0.7402, +0.5261, -0.2881, -0.7153, +0.8782, -0.2197, +0.3991, +0.1447, +0.2319, +0.2099, +0.3708, +0.1532, +0.0936, -0.0373, +0.3127, -0.0938, -0.2230, +0.3024, -0.2431, +0.1687, +0.5369, -0.6572, +0.0691],\n[ -0.9205, -0.6407, -0.3495, -0.1292, -0.2954, -0.2824, -0.5187, -0.1155, +0.1093, +0.0234, -0.0082, +0.0037, +0.4904, -0.0225, +0.5050, +0.2188, +0.4062, +0.0352, -0.0210, -0.7050, +0.1355, +0.6423, -0.6829, -0.3672, -0.2787, +1.0915, +0.0694, -0.4533, +0.6736, +0.2396, +0.0699, -0.7669, -0.8898, -0.5430, -0.6663, +0.1795, -0.8685, -1.0686, +0.1690, -0.0744, -0.2449, +0.3012, -0.1798, -0.3383, +0.4449, -0.8733, +0.0579, -0.1487, +0.4444, -0.7043, +0.0664, -0.0857, +0.0319, -0.1203, -0.7229, +0.1748, -0.2829, +0.8293, +0.1026, -0.2243, +0.3540, -0.1479, -0.5573, +0.2202, +0.1052, +0.1821, +0.2858, -0.2647, +0.0733, -0.5249, +0.9333, +0.1392, -0.4069, +0.2683, -0.0897, -0.1305, -0.2437, -0.2181, -0.2209, +0.0429, -0.1205, +0.1112, -0.1993, -0.0594, -0.0557, -0.3577, -1.1923, +0.0014, -0.6420, +0.1696, -0.9703, +0.7222, -0.0252, +0.1879, +0.3040, +0.2103, +0.0601, +0.0977, -0.2715, +0.2625, -0.7554, -0.0208, +0.0804, +0.1709, -0.0788, +0.2233, +0.1557, -0.4419, -0.1501, -0.1374, -0.0640, -0.0173, +0.5292, -0.3886, +0.8104, +0.1731, +0.4099, +0.0172, +0.2222, +0.4837, -0.1392, -0.2487, -0.3176, +0.9001, +0.5629, -0.0966, -0.7590, +0.7981, -0.3387, +0.1281, -0.0880, -0.1736, -0.4041, +0.2927, +0.2257, -0.2617, +0.0081, -0.0981, -0.3232, -0.8960, -0.4555, -1.0944, +0.2450, -0.6488, -0.3626, +0.3229, -0.7959, -0.9218, -0.1168, -0.0386, -0.0772, -0.3662, -0.2033, +0.5171, -0.2605, +0.2099, +0.0587, -0.1684, +0.5608, -0.2795, +0.4050, -0.2488, -0.0009, -0.3548, +0.2713, +0.2540, +0.4301, +0.1538, +0.5952, +0.0035, -0.0855, +0.1828, +0.5056, -0.0285, -0.2480, +0.3974, -0.1071, +0.7901, -0.1271, +0.0085, +0.0022, -0.0033, -0.5544, -0.3821, -0.4543, +0.5065, -0.2156, +0.4301, -0.0898, +0.0038, -0.3771, -0.1299, +0.3414, +0.2850, +0.2536, +0.2735, -0.2499, +0.1810, -0.8185, +0.2789, +0.0172, +0.1249, +0.5116, -0.3276, +0.0151, -0.1496, -0.3928, +0.0160, +0.1973, +0.6946, -1.0572, +0.2596, +0.5213, +0.1074, +0.0397, +0.0158, -0.1003, +0.0849, -0.6143, +0.0286, +0.1211, -0.3642, -0.2030, -1.3791, -0.2391, -0.0444, -0.2008, -0.4846, +0.4913, -0.1766, -0.7605, -0.6237, +0.0567, -0.2466, +0.8322, +0.1745, -0.5219, -0.2627, -0.0347, +0.0840, +0.8085, -0.8310, -0.2293, -0.5016, +0.3874, -0.3295, -1.6434, -0.0489, -0.2061, +0.2168, -0.8213, -0.3119, +0.3437, -0.3040, +0.2633, -0.6758],\n[ -0.1224, +0.1807, +0.0476, -0.1733, -0.2275, -0.6607, +0.5602, +0.2290, +0.4946, -0.3229, +0.2211, +0.5021, +0.2795, -0.4379, -0.4183, +0.0239, -0.0520, -0.3631, +0.5901, -0.8528, -0.3056, -0.0029, -0.2342, -0.2650, -0.0214, +0.1637, +0.3739, -0.1126, -0.0839, -0.1313, +0.0461, -0.5930, +0.2383, +0.3599, -0.3978, -0.2775, +0.1222, -0.6615, +0.1524, -0.2367, -0.3254, -0.1699, -0.2446, +0.0040, -0.2987, -0.0853, +0.0497, -0.1563, -0.0894, -0.0426, +0.3357, -0.5865, -0.3145, -0.2334, +0.4475, +0.6045, -0.1532, -0.5043, -0.3072, -0.2198, +0.1998, -0.1104, +0.1984, -0.0500, -0.1988, -0.0554, +0.6667, +0.2950, +0.4560, +0.0905, +0.8948, +0.4141, +0.0889, +0.2876, +0.8547, +0.0443, +0.4713, +0.0125, +0.4692, +0.0556, +0.3437, -0.2034, +0.0493, +0.0987, -0.5343, -0.5624, +0.5240, +0.6423, +0.0898, -0.3092, -0.3829, +0.4628, -0.3896, -0.2335, +0.4425, +0.1577, -0.3808, +0.7042, -0.1099, +0.2177, -0.4898, -0.2984, +0.2604, -0.0835, -0.5402, +0.0921, -0.1487, +0.1235, -0.0274, +0.1865, -0.3032, +0.1477, -0.8785, +0.2594, +0.2667, +0.1290, +0.4497, -0.5279, +0.2041, -0.2774, -0.2247, +0.1755, +0.2179, +0.2883, -0.2374, -0.2710, -0.6661, +0.2488, -0.2718, +0.3415, -0.0217, +0.1544, +0.2134, +0.0385, -0.3567, +0.1695, +0.3289, +0.5405, -0.1381, -0.5697, -0.1969, -0.0151, -0.4153, -0.3042, -0.4061, -0.9120, -0.3237, -0.4619, -0.2990, -0.3770, -0.1387, +0.0114, -0.7310, +0.1120, +0.1241, -0.2797, +0.0220, -0.0135, +0.3210, -0.6078, -0.4427, -0.1668, +0.0040, -0.6242, +0.8404, -0.5844, +0.6519, +0.2185, -0.0381, -0.3671, +0.4789, +0.0554, +0.1012, +0.0864, -0.0963, -0.3227, -0.1091, +0.5487, -0.5835, +0.2718, +0.6050, -0.0901, -0.1951, -0.1213, -0.1814, -0.2178, -0.2861, -0.3037, -0.0871, -0.4600, +0.2746, -0.5300, -0.5216, +0.7110, +0.1588, +0.0332, -0.7698, +0.7929, +0.4748, -0.0828, -0.1833, +0.0244, -0.4698, -0.2932, +0.7227, +0.3943, +0.6195, -0.4069, +0.5510, +0.4044, -0.0376, +0.1132, +0.2594, +0.2696, -0.0090, -0.5220, +0.3694, +0.2535, +0.2949, -0.3292, +0.1338, -0.1614, -0.4826, -0.6461, -0.0891, +0.6627, -0.4489, +0.1577, +0.7618, +0.4944, -0.3768, -0.0778, +0.0620, -0.2107, +0.0132, -0.1325, -0.0489, +0.2153, -0.2410, +0.1829, +0.1007, -0.1304, -0.2355, -0.7770, +0.3267, -0.3735, -0.2366, +0.1976, -0.1219, +0.3927, +0.2558, -0.0528, +0.6828, +0.6081, -0.0640, -0.1716],\n[ +0.5773, +0.4005, +0.2367, -0.2554, -0.0570, -0.3491, -0.1217, +0.1797, +0.2174, +0.2224, +0.1831, +0.3781, -0.1077, -0.4088, -0.3937, +0.1849, +0.6256, +0.1961, -0.3727, -0.1188, -0.2571, -1.0813, +0.1191, -0.0072, +0.3410, -0.1113, +0.0178, +0.0221, -0.2449, +0.1956, +0.4295, +0.0470, -1.0544, +0.3908, -0.2403, +0.0338, +0.1049, -0.1989, -0.6795, +0.1352, +0.3580, -0.0550, +0.1559, +0.1566, -0.2989, +0.3722, +0.3373, +0.1194, -0.0818, +0.0315, +0.2551, +0.0579, +0.0571, +0.2316, -0.0330, -0.4553, -0.3433, -0.1070, -0.3248, +0.3737, -1.0162, -0.2596, -0.5808, +0.0423, -0.0359, -0.5898, +0.0525, +0.4582, +0.0296, -0.4855, +0.1305, -0.5326, +0.1646, +0.2046, -0.6652, -0.1011, +0.0367, +0.0979, -0.1058, -0.8457, -0.1656, -0.5374, +0.7134, -0.6131, +0.0588, -0.5346, -0.4256, +0.2006, +0.5099, -0.5682, -0.3149, -0.1147, -0.9188, -0.0958, -0.2129, -0.6066, -0.0144, -0.2814, +0.0966, -0.1576, +0.1431, -0.1485, +0.2355, -0.1664, -0.0622, -0.0011, -0.0136, +0.1440, -0.6193, -0.1540, -0.1121, -0.2571, +0.0337, +0.5311, -0.7750, +0.2000, +0.1121, -0.1559, -0.5731, +0.3587, -0.0157, +0.0921, +0.2889, -0.0086, -0.1163, -0.1031, +0.4536, +0.0260, +0.4147, -0.0971, -0.6322, -0.1779, +0.1588, +0.1106, -0.6539, -0.1867, -0.0918, -0.4986, +0.1861, -0.8405, +0.0673, +0.1445, -0.2178, +0.0211, -0.3776, -0.2325, -0.7193, -0.2383, +0.1055, -0.3297, -0.6075, -0.0166, -0.0599, +0.3519, +0.2016, -0.1683, -0.0618, +0.0966, +0.4444, +0.0336, -0.0360, -0.0038, -0.0901, +0.0575, +0.1595, +0.0420, +0.3149, +0.1565, -0.5973, +0.0455, -0.2269, +0.7422, -0.1403, +0.1556, -0.5552, -0.1018, -0.1478, -0.1449, -0.3502, -0.3411, -0.6673, +0.1856, -0.1387, -0.0826, +0.2279, +0.3411, +0.3251, -0.8518, +0.4285, -0.3885, +0.0961, +0.2770, +0.5029, +0.0081, -0.0688, +0.2335, -0.2451, +0.0313, -0.1630, +0.4845, -0.1214, -1.0611, -0.9483, -0.0859, +0.7039, -0.0264, +0.1340, -0.3957, -0.1174, +0.1597, -0.1261, +0.0289, +0.1340, +0.1144, -0.1680, +0.3226, -0.1484, -0.3115, +0.0072, -0.3260, +0.4108, +0.1639, -0.9366, +0.4429, -0.1095, -0.2999, -0.0186, -0.5972, -0.1980, -0.2521, +0.2721, +0.2089, +0.1511, -0.1393, -0.4629, -0.8785, +0.0836, -0.2853, -0.3804, -0.4894, +0.1630, -0.3580, -0.0938, +0.3543, -0.1880, +0.4280, -0.0910, -0.1145, -0.4794, -0.0023, +0.0837, +0.2889, -0.0548, -0.1878, -0.2514, +0.2941],\n[ +0.1971, +0.1297, +0.3127, -0.1900, +0.2138, -0.5950, -0.0518, +0.5522, +0.7271, -0.4612, +0.0603, -0.0771, +0.0421, +0.2994, -0.7150, -0.6847, +0.4128, +0.2148, +0.7659, -0.1203, +0.0513, -0.6664, -0.4696, -0.1599, -0.0016, -0.2961, +0.0878, +0.1158, +0.3799, +0.1267, +0.2748, -0.7240, -0.1967, +0.8070, -0.4856, +0.2131, +0.4358, -0.0098, -0.1944, +0.7715, +0.0676, +0.2041, -1.1034, -0.3384, +0.6977, +0.1520, +0.3113, -0.3375, +0.1434, +0.0769, +0.4804, -0.4066, -0.1047, +0.1328, -0.6792, +0.1742, +0.1389, -0.0741, -0.2742, +0.1880, -0.7640, -0.3254, +0.0108, -0.5652, -0.7209, -0.2656, +0.4557, +0.1764, -0.2182, -0.3519, -0.1209, +0.1609, +0.2707, -0.0814, -0.2446, -0.1418, +0.0642, -0.4075, -0.3181, +0.1488, -0.3156, -0.0194, -0.1112, -0.0664, +0.8506, +0.0797, +0.0967, +0.0589, +0.2103, -0.1941, -0.0901, -0.1765, +0.2050, -0.5338, +0.2230, +0.0117, +0.4295, -0.6978, -0.2315, -0.1358, -0.1298, +0.1919, +0.7137, +0.4695, -0.2569, +0.3268, -0.1098, -0.6414, -0.3317, -0.5824, +0.3109, +0.5197, +0.0289, +0.2862, -0.1670, -1.1588, -0.5376, +0.7066, -0.4736, -0.6062, +0.0383, +0.4817, +0.3675, -0.2474, -0.1373, +0.6097, +0.1162, +0.0026, +0.5885, +0.5167, +0.1943, -0.2286, -0.0977, -0.0914, -0.2177, -0.4933, -0.2152, +0.3393, +0.4035, +0.4174, -0.0249, -0.1241, +0.2562, +0.2692, -0.4636, +0.2140, -0.2008, -0.2862, -0.0860, -0.3197, -0.2499, +0.4412, -0.1225, -0.0678, -0.5300, +0.0226, +0.5928, +0.4112, +0.3408, +0.3258, +0.3220, +0.6597, -0.3444, +0.3276, -0.3681, -0.4247, -0.2516, +0.2093, -0.5528, +0.0715, +0.3478, +0.5149, +0.7822, -0.1180, +0.3608, -0.1642, +0.1633, -0.3586, +0.3944, -0.3117, +0.3464, +0.2220, -0.1847, +0.2722, -0.1410, +0.6442, +0.2111, -0.5102, +0.0657, -0.0787, +0.1314, +0.1596, +0.3922, +0.4387, +0.3768, +0.0938, -0.2301, -0.4222, -0.2457, +0.1377, +0.0971, -1.0034, -0.6783, -0.2950, +0.6848, +0.3718, -0.0991, +0.3351, +0.4113, -0.0339, -0.0272, +0.1386, +0.1174, +0.5951, -1.3231, +0.1352, -0.0537, -0.0648, -0.0348, -0.3936, +0.4308, -0.5901, +0.3274, +0.4185, +0.2364, -0.0838, +0.1790, -0.2632, -0.5750, -0.3647, -0.7477, -0.3476, +0.6007, +0.2378, -0.2952, -0.8994, +0.0876, +0.5734, -0.1559, +0.1650, +0.4103, -0.0516, -0.4388, +0.6463, -0.4788, +0.0584, +0.0616, +0.1933, -0.1242, -0.5378, -0.0676, +0.5118, +0.2571, +0.5174, +0.5873, -0.0766],\n[ +0.0258, +0.0557, +0.0314, -0.1428, +0.1264, +0.2882, +0.1731, +0.0442, -0.6218, -0.1942, -0.3289, +0.2295, -0.1792, -0.3100, -0.3131, -0.3692, -0.1941, +0.5551, +0.0759, +0.0739, -0.2385, -0.4435, -0.1647, +0.3799, +0.1403, -0.2910, -0.1646, -0.1996, -0.0809, -0.3370, +0.1309, -0.1390, +0.1873, -0.1753, -0.1671, +0.0357, +0.0419, +0.1547, -0.2335, +0.3820, +0.0704, +0.1450, +0.3619, -0.4785, +0.0094, +0.2105, -0.0015, -0.4892, +0.0153, +0.1744, +0.6894, +0.0375, -0.2539, +0.3752, -0.0939, +0.0410, -0.0005, +0.0414, -0.5551, +0.0786, +0.2726, -0.2081, +0.1608, +0.0056, -0.3801, +0.2772, -0.0787, -0.3693, -0.1018, +0.1075, +0.0577, +0.1752, +0.2847, +0.1793, -0.2131, +0.1969, +0.0438, +0.0763, -0.2106, +0.1562, -0.0079, -0.0419, -0.6081, -0.3683, -0.0284, +0.0196, +0.2458, +0.0486, +0.3165, -0.1404, +0.0881, -0.2001, +0.0943, -0.2257, +0.1904, -0.2215, -0.0143, +0.1636, -0.1493, +0.0209, -0.3232, -0.1058, +0.1450, +0.2953, -0.0649, -0.1792, -0.4404, +0.0648, -0.0782, -0.0980, -0.1962, +0.0114, -0.4174, +0.1230, +0.2255, -0.4465, +0.0099, -0.0002, -0.0327, -0.2182, -0.0044, +0.1385, +0.2404, -0.1530, -0.2449, -0.1187, -0.1197, -0.0196, +0.0636, +0.2732, -0.0020, -0.3308, -0.2261, -0.2961, -0.0110, +0.1877, -0.1727, -0.0579, +0.3186, -0.1244, +0.4831, -0.3772, +0.2442, -0.5991, -0.0244, -0.0651, +0.3005, +0.2811, -0.1175, -0.1017, +0.0363, +0.1163, -0.1070, +0.2171, -0.2766, -0.1591, -0.2473, +0.2177, -0.5437, +0.2643, +0.1680, -0.2968, -0.1783, -0.3845, -0.2310, -0.1699, +0.1597, +0.0023, +0.0930, +0.3626, +0.1183, +0.3463, +0.1107, +0.1850, +0.1242, -0.0231, -0.2174, -0.2698, -0.5214, -0.3067, +0.0808, +0.6624, -0.0719, +0.0046, +0.2116, -0.0666, +0.1001, -1.0531, +0.4140, -0.6715, -0.0883, -0.2107, +0.1025, -0.2391, +0.0068, +0.5813, +0.0922, -0.7574, +0.1401, +0.1576, -0.0085, -0.2051, +0.2217, +0.0145, +0.0643, +0.2933, -0.1660, -0.0310, +0.3889, +0.2739, +0.0619, -0.0057, -0.0851, -0.3741, -0.0350, +0.0032, -0.1186, +0.3271, -0.7010, -0.0641, -0.2619, +0.2942, +0.0946, -0.2396, -0.0092, +0.2894, +0.4296, -0.1338, +0.4397, -0.3429, +0.5862, +0.2123, -0.0220, +0.0683, -0.0801, -0.2240, +0.1695, -0.0177, +0.1851, -0.2303, +0.1154, -0.0554, -0.1827, +0.0404, +0.5515, -0.3435, +0.1356, -0.6155, +0.0421, +0.4062, -0.1319, -0.1664, +0.2044, -0.3144, +0.3057, +0.5167],\n[ +0.2190, +0.5299, +0.0416, -0.0133, +0.2273, +0.1847, +0.2117, -0.3650, +0.1088, +0.7693, +0.2472, -0.1210, -0.3569, -0.1793, -0.3154, +0.2999, +0.6809, +1.1643, -0.4968, -0.0260, -0.3963, +0.2722, -0.2396, +0.0662, -0.1695, -0.3343, -0.4404, +0.2240, -0.2892, +0.3186, +0.0070, -0.3156, +0.5497, +0.1461, +0.4193, +0.5595, -0.1487, +0.2295, -0.1106, +0.8187, +0.2716, -0.3308, -0.2466, +0.7375, -0.5231, -0.8374, -0.1546, +0.8101, +0.0305, +0.4414, +0.9101, -0.0790, -0.7719, -0.1223, +0.2108, -0.3328, -0.1319, +0.0044, -0.7621, -0.2411, -0.3317, -0.3602, +0.3811, -0.5627, -0.3574, +0.1514, -0.1217, +0.1223, -0.3627, -0.3312, +0.0908, +0.3559, -0.2127, +0.4198, -0.1993, +0.5076, +0.6647, +0.6660, -0.3889, +0.3641, +0.0583, +0.6410, +0.3024, +0.7780, -0.0566, -0.0725, +0.0005, -0.6199, +0.1416, -0.1018, +0.1975, +0.0407, -0.6696, +0.1270, +0.2403, -0.0984, +0.1143, -0.1629, +0.5482, -0.5193, -0.0165, +0.5192, +0.7334, -0.1055, -0.0946, -0.2747, -0.9139, +0.5439, -0.0660, -0.1288, +0.0253, -0.3988, -0.4482, +0.7088, +0.5435, -0.5111, +0.0793, +0.0429, -0.4865, +0.4337, +0.3207, -0.1065, +0.5435, +0.0121, +0.0708, +0.4475, +0.0082, -0.0752, +0.5082, +0.5027, -0.4079, -0.1469, -0.0291, -0.2024, +0.4415, +0.6201, +0.0561, +0.7860, -0.0309, -0.4140, -0.4525, -0.0112, -0.2588, -0.1160, -0.1658, -0.3977, -0.1903, +0.0040, +0.5195, -0.5027, +0.1395, +0.3001, -0.0364, -0.0070, -0.1940, -0.0794, -0.7707, +0.0793, +0.2436, -0.2028, +0.7458, -0.4411, +0.3906, -0.3629, -0.3652, -0.1732, +0.0129, -0.1646, +0.1341, +0.3027, +0.3714, +0.8555, +0.0846, -0.1275, +0.1576, -0.5475, +0.2914, -0.4467, -1.0526, +0.4156, +0.1801, +0.5511, -0.2876, -0.3708, +0.4191, -0.5156, -0.5698, -0.2032, -0.3767, +0.9636, +0.0591, -0.2111, -0.2921, +0.2434, +0.1629, +0.5713, -0.0247, -0.3819, -0.0096, +0.1990, +0.1771, +0.1748, -0.6692, +0.6622, +0.8074, +0.1628, -0.0205, +0.2880, -0.4322, +0.2115, -0.7574, +0.6190, +0.0132, -0.2705, -0.5313, -0.2903, +0.6175, -0.2226, -0.1388, -0.5078, -0.6529, +0.2888, +0.2174, +0.3050, -0.2648, -0.1298, +0.0513, +0.0026, +0.2277, +0.6358, -0.3906, -0.3793, +0.6255, -0.4248, -0.5383, +0.2805, +0.1700, +0.1024, -0.2525, +0.0197, +0.0756, +0.2159, -0.4651, -0.3313, -0.4396, +0.4773, -0.5920, -0.1574, -0.0513, -0.7548, +0.1198, +0.6633, -0.0992, +0.4179, +0.5154, +0.1625],\n[ +0.4234, +0.4392, -0.2747, -0.2705, -0.6422, -0.0934, +0.0052, -0.0935, +0.0754, +0.1551, -0.0347, +0.1696, +0.2298, +0.1517, -0.1544, -0.7179, +0.3967, -0.4788, +0.2861, -0.2384, +0.3171, +0.3328, -0.2257, -0.2352, +0.3594, +0.1360, -0.1726, -0.0534, -0.3732, +0.4586, +0.6655, +0.1862, -0.0504, -0.5245, -0.2335, -0.1044, +0.1731, -0.4958, +0.3857, +0.1822, -0.2132, +0.2401, -0.1770, -0.3829, -0.1447, +0.0365, +0.1743, -0.0582, +0.1387, +0.1938, -0.4010, +0.2540, -0.1261, -0.1798, -0.1797, -0.3228, -0.1266, -0.0110, +0.4102, +0.1474, -0.3089, -0.3147, -0.4659, +0.7590, -0.0920, -0.0002, -0.4629, -0.1343, -0.1630, -0.1713, +0.0623, +0.1539, -0.2611, +0.2111, +0.0032, +0.0254, -0.1147, -0.5070, +0.2582, +0.4978, -0.2704, +0.2963, -0.2424, -0.2377, +0.0542, -0.5813, -0.3314, +0.0234, +0.1353, +0.1885, -0.4818, +0.4946, -0.3437, -0.0408, -0.3601, +0.1318, +0.2486, -0.3088, -0.4411, +0.5277, -0.1257, -0.4646, -0.4539, -0.0095, +0.6496, -0.1052, -0.6746, +0.3025, -0.0855, -0.1113, +0.1304, -0.2482, +0.0107, -0.3439, -0.1315, -0.2316, +0.1768, -0.2268, -0.1223, -0.5271, +0.3784, -0.5252, -0.2377, +0.1388, -0.3378, +0.2181, -0.0689, +0.2941, +0.0044, +0.1891, -0.0509, +0.0914, +0.2977, -0.1663, -0.3623, -0.0772, +0.0197, -0.2234, -0.3112, -0.0609, -0.1381, +0.0890, +0.0460, +0.0142, -0.4865, -0.1751, +0.2371, -0.1852, -0.6088, +0.0152, -0.0823, -0.1289, -0.2044, -0.7310, -0.2056, +0.1072, +0.0079, -0.1789, +0.1253, -0.2312, -0.3431, -0.1412, -0.0953, -0.3622, +0.3766, +0.3203, +0.1998, -0.1852, -0.4630, +0.1017, +0.0380, -0.3577, +0.0552, -0.0491, +0.0620, -0.2254, +0.5100, +0.3085, -0.9197, +0.0245, +0.1613, -0.2888, -0.3367, -0.7743, +0.2930, +0.5275, +0.2395, +0.3260, -0.4876, +0.1441, +0.1083, -0.2077, -0.0162, -0.0100, -0.0559, -0.0036, +0.1453, +0.0042, +0.2518, -0.2407, +0.1021, +0.0405, -0.2128, +0.0866, +0.2753, -0.2614, -0.0256, -0.6157, -0.2181, -0.3962, +0.5585, -0.2790, -0.3463, -0.2301, -0.3256, +0.1199, +0.3906, -0.2450, +0.2601, +0.2751, +0.2546, +0.3675, -0.1279, -0.3002, -0.2151, -0.3924, -0.6061, +0.0396, -0.2452, -0.3420, +0.0579, +0.4261, -0.2200, -0.0324, -0.2346, -0.5050, -0.0047, -0.1375, -0.6326, -0.2100, +0.2774, +0.0850, +0.1070, -0.2795, +0.2149, -0.4067, +0.2429, -0.1144, +0.4880, +0.0949, +0.3567, +0.0152, +0.0013, -0.2505, -0.1949, +0.0630],\n[ -0.1723, -0.1557, -0.4950, -0.3447, +0.3134, -0.3579, -0.0001, +0.0962, -0.5796, -0.4150, -0.5110, +0.4075, +0.0227, +0.0725, -0.9421, +0.6107, +0.2790, +0.1984, +0.2337, +0.5453, -0.0340, +0.3049, -0.1807, -0.1085, +0.4351, +0.4050, -0.1805, -0.2090, +0.3533, -0.0195, -0.0786, +0.8944, -0.2721, +0.7169, -0.2861, -0.1510, -0.2619, -0.3671, -0.2015, +0.2595, +0.0351, +0.2994, -0.3666, +0.2408, +0.3073, -0.0179, +0.0976, -0.3664, +0.1841, +0.0979, -0.1209, +0.2309, -0.2827, +0.3789, +0.4197, -0.0155, +0.1979, +0.2720, -0.1160, +0.4626, -0.5456, +0.0007, -0.2535, -0.1234, +0.1810, +0.1973, -0.3088, -0.1346, +0.3206, -0.1146, +0.2446, -0.3067, -0.4639, +0.1647, -0.2162, +0.3796, +0.0619, +0.6112, +0.1175, +0.5665, +0.1142, +0.3709, -0.1419, +0.1321, +0.0650, +0.1952, -0.3417, +0.3802, -0.6273, -0.0162, +0.1213, +0.0823, -0.5304, +0.9111, +0.5029, +0.8862, +0.2029, +0.2394, +0.4913, -0.2721, -0.0655, -0.0001, +0.0483, -0.5096, -0.0732, -0.4347, -0.4974, +0.0450, +0.2935, -0.1081, +0.7021, -1.1752, +0.0609, -0.9516, -0.4281, +0.0092, +0.2223, -0.7367, +0.0107, -0.5940, -0.1206, -0.0405, -0.2688, +0.0516, -0.1647, +1.0320, -0.6207, -0.2238, +0.2180, -0.1574, -0.0412, +0.1640, -0.8342, +0.5910, +0.5872, -0.1396, +0.0310, -0.4284, -0.1957, -0.5202, -0.4918, +0.1178, +0.1810, +0.2900, +0.3115, +0.0185, +0.0360, -0.0422, -0.0245, +0.0866, -0.1750, -0.2731, -0.2825, -0.5676, -0.1056, +0.3177, +0.3432, +0.2608, +0.3884, +0.5627, -0.2243, -0.0137, -0.1918, -1.0110, -0.2613, -0.0348, +0.3585, -0.6610, +0.5136, -0.0330, +0.3686, -0.6808, +0.2505, +0.3316, +0.7917, -0.1283, +0.9045, +0.1989, -0.5997, -0.9126, +0.2693, +0.3214, -0.7779, -0.7518, -0.2985, +0.6057, +0.1136, +0.2035, +0.2291, +0.2812, -0.0236, -0.9562, +0.1740, -0.0238, +0.7070, +0.0373, -0.2822, +0.1867, +0.0245, -0.3107, -0.4408, -0.2246, -0.3152, +0.5446, -0.2249, -0.4331, -0.3255, -0.4219, -0.5227, -0.0210, +0.0635, -0.2103, -0.5634, +0.8767, +0.2529, -0.1215, -0.3635, -0.1327, -0.3813, +0.4531, -0.6627, +0.1316, +0.2156, +0.1508, -0.2161, -0.1154, -0.6417, +0.8709, -0.2289, +0.0605, -0.4046, +0.3373, -0.1148, -0.6212, +0.0371, -0.1410, -0.2213, +0.8599, -0.4573, +0.6222, +0.3107, -0.1569, +0.4160, -0.0845, +0.1652, +0.0080, +0.1743, -0.0456, +0.3639, +0.2093, -0.1005, +0.0093, -0.2168, -0.7153, -0.1972, +0.7276],\n[ +0.1697, -0.3885, +0.1069, -0.0617, +0.5131, -0.1418, +0.1427, -0.5709, +0.0191, -0.0495, -0.3026, -0.1129, +0.1109, -0.0662, +0.3090, +0.3430, -0.2976, +0.1814, -0.4145, -0.1168, -0.1641, -0.3613, +0.2260, -0.3825, -0.1149, +0.5480, +0.3160, +0.1604, -0.1252, -0.1538, -0.2604, +0.2282, -0.2813, +0.1356, -0.5893, -0.1053, -0.1946, -0.7235, +0.2499, -0.6875, +0.0987, +0.0351, -0.1473, +0.0171, -0.3876, -0.1652, -0.0297, +0.1009, -0.1188, -1.0803, +0.0354, -0.3797, -0.0477, +0.2496, +0.1565, +0.0354, -1.0098, -0.0470, -0.0345, -0.2341, +0.0760, -0.0247, -0.4161, -0.0920, +0.3157, +0.5730, -0.1212, +0.0573, +0.1007, +0.1926, +0.4577, -0.2041, -0.1005, +0.1324, +0.2268, -0.3710, -0.0038, -0.2247, +0.3495, -0.3643, -0.2977, -0.5127, -0.0201, +0.1074, +0.4485, -0.2529, -0.1624, -0.2576, -0.0633, +0.5198, +0.4709, +0.2370, -0.1645, +0.0109, -0.1159, -0.1174, +0.2032, -0.2863, -0.1481, +0.0215, -0.3431, -0.7387, +0.0869, +0.2565, -0.7215, -0.2867, -0.2441, -1.6201, -0.1918, -0.1217, +0.1225, -0.1834, -0.4723, +0.2542, +0.0229, +0.2454, -0.2479, -0.0542, +0.2266, +0.0513, -0.1715, +0.3481, -0.0674, +0.0048, +0.0435, -0.0930, -0.0589, -0.1182, -0.2407, +0.1002, +0.1340, +0.5007, -0.1613, -0.1241, -0.3080, +0.1454, +0.3897, -0.2256, -0.0927, +0.1028, -0.4274, -0.2913, -0.3130, -0.9492, +0.2260, -0.4201, +0.1871, -0.0975, -0.8489, +0.7287, -0.4117, +0.1555, -0.2486, -0.6040, +0.4002, -0.2970, -0.1902, +0.1478, +0.3497, +0.0713, -0.0653, -0.5390, -0.3187, +0.2181, +0.2413, +0.5533, -0.3434, -0.1390, -0.2986, +0.1452, -0.7319, +0.0479, -0.4350, +0.2009, +0.0184, -0.5783, -0.1971, -0.0501, +0.1205, -0.6514, -0.4555, -0.1106, -0.0623, -0.1877, -0.2276, +0.0869, +0.5072, -0.1449, +0.1115, -0.3480, -0.8555, +0.0199, -0.3390, +0.2017, -0.1187, -0.3836, +0.2586, +0.0085, -0.1478, -0.1012, +0.0896, +0.4176, +0.5783, -0.2789, -0.3044, +0.0019, -0.0417, -0.0133, -0.1092, +0.0913, -0.0325, +0.2607, -0.0013, -0.0174, +0.1431, -0.1434, -0.1505, -0.0888, +0.3194, +0.0493, -0.1195, +0.3872, +0.0543, +0.6214, -1.1019, -0.3721, -0.3201, +0.3204, -0.1039, -0.0099, +0.0641, -0.3761, +0.2542, -0.1680, +0.3555, -0.0990, -0.6406, +0.2056, -0.5322, +0.1438, +0.1698, -0.4285, +0.2757, -0.2258, -0.0983, +0.1210, -0.2695, -0.0074, -0.0644, -0.0990, -1.3496, +0.1073, +0.3525, -0.0115, +0.0264, -0.2596],\n[ +0.2747, +0.5628, +0.4431, -0.4808, +0.1649, +0.5614, +0.3499, +0.3167, -0.4618, -0.0132, -0.3237, -0.2952, +0.4245, -0.1691, +0.1089, +0.1352, -0.2851, +0.1287, -0.6465, -0.2492, +0.4377, +0.2717, +0.4748, -0.5543, -0.4054, +0.1491, +0.2907, +0.6462, +0.2216, -0.4695, -0.0424, +0.2164, -0.2030, -0.4614, -0.1250, +0.0055, -0.0856, -0.1442, +0.4880, -0.2420, -0.4811, +0.1583, -0.4418, +0.9357, +0.0392, -0.6858, -0.0939, +0.5365, -0.1688, -0.7388, -0.0071, -0.3334, +0.1075, +0.2025, -0.3026, +0.3052, -0.2410, -0.0626, +0.2712, -0.0920, -0.0153, +0.2765, -0.0545, +0.2745, +0.0655, +0.3442, +0.3462, +0.3263, +0.2914, -0.5316, +0.3494, +0.0859, -0.2454, +0.2092, -0.1163, +0.3634, +0.2525, +0.5571, +0.2256, -0.5412, -0.0111, +0.0994, -0.0691, +0.3054, +0.2804, -0.0680, -0.1856, -0.1495, +0.4269, +0.2529, +0.2101, -0.0740, -0.8437, +0.3873, -0.1279, +0.0644, -0.4079, -0.5276, -0.3977, -0.4074, -0.4552, -0.3181, +0.1966, +0.1559, +0.0435, -0.1219, -0.1680, +0.0140, -0.4742, -0.2062, +0.2468, +0.0087, -0.2799, -0.2163, +0.1322, +0.1842, -0.3525, +0.1758, +0.4053, -0.0979, -0.2324, +0.4251, +0.0993, +0.1651, -0.2595, +0.1306, -0.0781, +0.1477, -0.4104, -0.1264, +0.1643, +0.3303, -0.1632, -0.5046, +0.0847, -0.0272, +0.3258, +0.0115, -0.2498, -0.0496, -0.0365, +0.2946, -0.2972, -0.5459, +0.3055, -0.0819, -0.1421, +0.1174, -0.3425, -0.0407, +0.1862, -0.0683, -0.3621, -0.1234, +0.1474, +0.0456, +0.2276, -0.2268, +0.7288, +0.0816, +0.1983, +0.1715, -0.1798, -0.5754, +0.1822, -0.1222, +0.3782, +0.1634, -0.3839, -0.1345, -0.1396, -0.4157, +0.7586, +0.0151, +0.2993, +0.6449, -0.2801, +0.0825, +0.0071, -0.2395, -0.2420, +0.3180, -0.6414, +0.1926, -0.3371, -0.2227, -0.1492, -0.2123, -0.0785, +0.4464, -0.6735, +0.7092, +0.2349, +0.2622, +0.2318, +0.2083, +0.2080, -0.6163, +0.5206, -0.2141, -0.1516, +0.3485, +0.2487, -0.3222, +0.2153, +0.4927, +0.0316, -0.0842, +0.2031, +0.4124, +0.0955, +0.2378, +0.1805, -0.3313, -0.0765, -0.2072, +0.1117, +0.4379, +0.4960, +0.1179, +0.5476, -0.4134, +0.1773, +0.3737, -0.0206, -0.3819, +0.0122, -0.3059, -0.3299, +0.1291, -0.2483, -0.0893, -0.0693, -0.1249, +0.3299, -0.4633, +0.3046, +0.1212, -0.1704, +0.2537, -0.1089, +0.1156, -0.0764, -0.1748, +0.4580, +0.2688, -0.0216, -0.3047, +0.0361, +0.0442, -0.4634, -0.0529, +0.7424, +0.0897, -0.4915, -0.2530],\n[ -0.2950, +0.4745, -0.2429, +0.2752, -0.4050, -0.0669, -0.1100, -0.2696, +0.5789, -0.3232, -0.6611, +0.0717, -0.5163, +0.1445, -0.0622, -0.0038, -0.2640, +0.3361, +0.1008, +0.4852, -0.5955, -0.1323, +0.3321, +0.3346, -0.3011, -0.0732, -1.1028, +0.0692, -1.0819, +1.2074, -1.0188, -0.2155, +0.2934, +0.0449, +0.0060, -0.8519, -0.2445, +0.3793, -0.2954, -0.1761, +0.1100, -0.1667, +0.1866, +0.0724, +0.1035, -0.1747, +0.1722, -0.3217, +0.8777, +0.5600, -0.4940, +0.0429, +0.2103, -0.0161, +0.0652, -0.0538, -0.6381, +0.0505, -0.2626, -0.8758, -0.8780, -0.3226, +0.3723, -0.0063, +0.3529, -0.2120, -0.8369, -0.4227, +0.0098, +0.6233, -0.0428, -0.0706, +0.0185, -1.4554, -0.2413, +0.1361, -0.0933, -0.2917, -0.7164, -0.2296, +0.2872, -0.6325, -0.4693, +0.5704, +0.2275, +0.1236, +0.0047, +0.2918, +0.3270, -0.3250, -0.6201, -0.2348, +0.3860, -0.0198, +0.7052, -0.5551, +0.8388, -0.0369, -0.5169, -0.3586, +0.3014, -0.1938, +0.2685, -0.0955, -0.1103, +0.2058, -0.3227, +1.0233, -0.4240, +0.1558, +0.2731, +0.0126, +0.5597, -0.0101, -0.1268, -0.3800, +0.1101, +0.2421, -0.2984, -0.2336, +0.1988, +0.3700, -0.4579, -0.0235, -0.1603, -0.7767, +0.2749, -0.5447, +0.0028, +0.0438, -0.4079, +0.1064, -0.2033, -0.1493, -0.2249, +0.2252, -0.2091, -0.2542, -0.7724, +0.2091, +0.0220, -0.2310, -0.1476, -0.3213, +0.1362, -0.2202, +0.3902, -0.1610, -1.1772, -0.2014, +0.3702, +0.0760, -0.3432, +0.2867, -0.4002, -0.5128, +0.6702, -0.3667, -0.0299, +0.1372, -0.4944, +0.3034, +0.3064, -0.1679, +0.2280, -0.5870, -0.2718, -0.5096, -0.2144, -0.9115, -0.4486, -0.0969, -0.1706, -0.2283, +0.0728, -0.0999, +0.4275, -0.0496, +0.2265, +0.6409, +0.5212, -0.2511, -0.0128, -0.5112, +0.7315, -0.6277, -0.8890, +0.1119, -0.0329, -0.4842, +0.3657, -0.0832, -0.0453, -0.4431, -0.7345, -0.4028, -0.6844, +0.2773, -0.0776, +0.5961, +0.2588, -0.1628, -0.2956, +0.6240, -0.0442, -0.1976, -0.0601, +0.0487, +0.1059, -0.4802, +0.2312, +0.1878, -0.1641, -0.3063, +0.0779, -0.5947, +0.3276, -0.1574, +0.0518, -0.8603, -0.6282, +0.0159, +0.2217, -0.0711, +0.4798, +0.0434, -0.2916, -0.1463, +0.0058, -0.0726, -0.1971, -0.1869, -0.6205, -0.2644, -0.8322, +0.5034, -0.0311, +0.2060, -0.2229, -0.0838, -0.4383, -1.3797, +0.0462, +0.3815, -0.6875, -0.0992, -0.1150, +0.5152, -0.9405, -0.1500, -0.1374, +0.1917, -0.5055, -0.9638, -0.3250, -0.2241],\n[ -0.1536, -0.3932, -0.1488, +0.0040, -0.1288, -0.1545, -0.9172, -0.2805, +0.3230, +0.1461, -0.1107, +0.2029, +0.1632, -0.2643, +0.2472, +0.3046, +0.2814, -0.1184, -0.5096, +0.0984, -0.3366, +0.2242, -0.0239, +0.4157, +0.2858, +0.1108, -0.0776, -0.1283, -0.2030, +0.1134, -0.0902, -0.1824, +0.0418, +0.3266, +0.4316, -0.2477, +0.2924, -0.0527, +0.1449, +0.2986, +0.2485, +0.1540, +0.0001, -0.8476, -0.0689, -0.1016, +0.3742, -0.1236, +0.2377, +0.3154, +0.1240, +0.2560, +0.5795, -0.3371, +0.3826, -0.2159, -0.3485, +0.1224, -0.7865, -0.3446, -0.2398, -0.2691, -0.0612, -0.2650, +0.4368, -0.7490, +0.1430, -0.6888, -0.1218, +0.1478, -0.1616, -0.4055, -0.0134, -0.2593, -0.5647, +0.1908, -0.6317, +0.7954, +0.0961, -0.5094, +0.5672, +0.1861, -0.0810, +0.6770, +0.5015, +0.6221, -0.4789, +0.2531, +0.1142, -0.3287, -0.0953, +0.1372, +0.4031, +0.0750, -0.0161, -0.2726, +0.5245, +0.3697, -0.6535, -0.4510, +0.0919, +0.0658, -0.4687, -0.0160, -1.0360, +0.3085, +0.1988, -0.0155, -0.0580, +0.6356, +0.4824, -0.0580, -0.6040, +0.1401, -0.5050, -0.3169, -0.0570, +0.2261, -0.2034, +0.2546, +0.1917, -0.2450, -0.5480, -0.0658, +0.4291, -0.0303, -0.4937, -0.4524, -0.0388, +0.3961, +0.4950, +0.0312, -0.2034, +0.5666, -0.1002, -0.5486, +0.0753, +0.0175, -0.7026, +0.3155, -0.0880, +0.2177, +0.0881, +0.2664, +0.0051, -0.2471, +0.4793, -0.2982, +0.1036, +0.1263, +0.0034, -0.4423, +0.0933, +0.3059, +0.4064, -0.3276, +0.0026, +0.0185, -0.6529, -0.6750, -0.5380, +0.1447, +0.1747, +0.3788, +0.1551, -0.0605, -0.6776, +0.2446, +0.5547, -0.0101, +0.1820, +0.1259, -0.0514, +0.0479, -0.2650, -0.1059, -0.1688, -0.1912, -0.3360, +0.2134, +0.0543, +0.5636, +0.3412, -0.5392, +0.0621, +0.4772, -0.3313, -0.4376, +0.0272, +0.1554, +0.3807, +0.4939, +0.3175, -0.4498, +0.5755, -0.0546, +0.0159, +0.0351, +0.0451, +0.3587, +0.0944, -0.0031, -0.0826, +0.2568, +0.5953, -0.6936, +0.1513, +0.0973, -0.0282, +0.1344, +0.1280, +0.0350, +0.2338, -0.0971, +0.2113, +0.0762, -0.6498, -0.7180, +0.6955, -0.6532, +0.2342, +0.3941, +0.3652, +0.0793, -0.0786, -0.0365, -0.0705, +0.3362, +0.0218, +0.3218, +0.1325, +0.0795, +0.0799, -0.3366, -0.0008, -0.7087, +0.1497, +0.1490, +0.2054, -0.3338, +0.2178, -0.5476, +0.0694, -0.3220, -0.2363, -0.6905, -0.0137, -0.0039, -0.3057, -0.0075, -0.2926, +0.3981, +0.1954, -0.6497, -0.6387, -0.5863],\n[ -0.0360, +0.2954, -0.0415, -0.1861, -0.1073, -0.0619, +0.0758, +0.2595, -0.1324, +0.0786, -0.4354, -0.2461, -0.1916, -0.2344, -0.1291, +0.2328, +0.3387, -0.0731, -0.6766, +0.1840, +0.0911, -0.1266, -0.0519, +0.1181, +0.1365, -0.0523, -0.1781, +0.2005, +0.1115, +0.1210, +0.0721, -0.1915, +0.0981, +0.0564, -0.4853, +0.2200, +0.1986, +0.2435, -0.0983, +0.2328, +0.0097, +0.2252, -0.3074, -0.2446, -0.1512, +0.0477, -0.1672, -0.1571, -0.3081, -0.0384, +0.1514, -0.0964, -0.1442, -0.3555, -0.4037, -0.3527, +0.2746, +0.0331, +0.3087, -0.4382, -0.1313, +0.1540, -0.2501, -0.0540, -0.0858, -0.3254, +0.2537, +0.1154, +0.3133, +0.2732, +0.0159, -0.0697, +0.3618, -0.2276, +0.0914, -0.6782, +0.1307, -0.1598, +0.1400, -0.3742, -0.2462, -0.1944, -0.0343, +0.4345, +0.1273, -0.2421, +0.1062, -0.0204, +0.0289, +0.3268, -0.1223, +0.1921, -0.0763, +0.2320, -0.0406, +0.2471, -0.1578, -0.3309, +0.3415, +0.1961, +0.2112, -0.1143, +0.1991, +0.0273, +0.0975, +0.2010, -0.0719, +0.1204, +0.1723, +0.1155, -0.0569, -0.1840, +0.2899, -0.0957, +0.2138, -0.0352, -0.7646, -0.4011, +0.5825, -0.0675, -0.2865, +0.2258, -0.0888, +0.4040, +0.5791, -0.3169, +0.1193, -0.2115, +0.1814, +0.2504, -0.1733, +0.0491, +0.0677, +0.2845, +0.0169, +0.0868, -0.0024, -0.0365, -0.2270, -0.4361, +0.3346, +0.2319, -0.0769, +0.2725, -0.2883, -0.1010, +0.0275, -0.1357, +0.1794, -0.2117, +0.2064, +0.1074, -0.1082, +0.0635, +0.0389, -0.7832, +0.1141, +0.1320, +0.0181, -0.1715, +0.1375, +0.4326, +0.1225, -0.0650, -0.0209, -0.2055, -0.3685, -0.2018, +0.0845, -0.1548, +0.0742, -0.0552, -0.5630, -0.0593, -0.6809, -0.0612, -0.1252, +0.0674, +0.3169, +0.2488, -0.0968, -0.2377, -0.0607, +0.3838, -0.2299, -0.5658, -0.3862, +0.3168, -0.3866, +0.0213, +0.4963, -0.6488, -0.5193, +0.5468, +0.1999, -0.2567, -0.1807, +0.1057, -0.1875, -0.1282, +0.0276, +0.4583, -0.4058, -0.0258, -0.3120, -0.1087, -0.2016, +0.1688, -0.1348, +0.2886, -0.7916, +0.0877, +0.1170, -0.3574, -0.6124, +0.2224, +0.4414, +0.0925, -0.2001, -0.0743, -0.0906, +0.0475, -0.1264, +0.2501, -0.1218, +0.1349, +0.1751, -0.2361, +0.5184, -0.1585, +0.1610, +0.1194, -0.0824, -0.1333, +0.1845, -0.4864, -0.1342, -0.0673, -0.1059, -0.1255, +0.1713, -0.2342, +0.1031, +0.0191, +0.0026, -0.0553, -0.1135, +0.2985, +0.2850, +0.4020, -0.1087, +0.0884, -0.3541, +0.3092, +0.0849, -0.2151],\n[ +0.2371, -0.0086, -0.2611, +0.5156, +0.0931, +0.0796, +0.1728, +0.0679, -0.5360, -0.0178, -0.4290, +0.0138, -0.1435, -0.2081, -0.2986, +0.6750, +0.3418, +0.3486, -0.2263, +0.1717, -0.2510, -0.3704, +0.1700, +0.7958, +0.2257, +0.0034, -0.4423, -0.0504, -1.1018, -0.4064, -0.4251, +0.1119, -0.6379, +0.4920, +0.8266, +0.0255, +0.3594, +0.4602, +0.3663, -0.0724, -0.1700, -0.3765, +0.3345, -0.3255, -0.0887, +0.9938, -0.3264, -0.0687, -0.1137, -0.0124, -0.0392, -0.0047, +0.5795, -0.4190, +0.4339, +0.0758, -0.0027, -0.1153, +0.1485, +0.4894, +0.1099, -0.2054, +0.6032, -0.0731, -0.2392, +0.1160, -0.1188, -0.0067, +0.2892, -0.4823, -0.0291, +0.4673, -0.3526, -0.0929, +1.1909, +0.2589, -0.0504, -0.3820, -0.1289, +0.3804, +0.1888, +0.2932, -0.2874, -0.2412, -0.0845, +0.2648, -0.0194, +0.1119, +0.0614, +0.5552, -0.3978, +0.3185, -0.9749, +0.1223, -0.1325, +0.0353, -0.3382, -0.4137, -0.2405, -0.2323, +1.0141, -0.4834, +0.3897, -0.1186, -0.0549, -0.2652, -0.0160, +0.8072, +0.3652, -0.5397, +1.1077, -0.4531, +0.0320, +0.7525, +0.3837, -0.1597, -0.5915, -1.0873, +0.2604, -0.1406, +0.0787, +0.4561, +0.0474, -0.1794, +0.3019, +0.3779, +0.0220, +0.2327, -0.7537, +0.1829, -0.2856, +0.0857, -1.0650, -0.3748, -0.2974, -0.7783, +0.8731, -0.1243, -0.1016, +0.1358, -0.3743, +0.9547, -0.7992, -0.6159, -0.1482, -0.3578, +0.1896, +0.5530, +0.4486, +0.4260, +0.5332, -0.0449, +0.2954, +0.1244, +0.3628, -0.2508, -0.7484, -0.8054, -0.0311, -0.1260, +0.7015, -0.0675, -0.0999, -0.0363, -0.0300, -0.4767, -0.1877, -0.4087, -0.0022, -0.7186, -0.5304, +0.5916, +0.5709, +0.0832, -0.7458, +0.5229, +0.4158, -0.0808, -0.0479, -0.1827, -0.0359, +0.1044, -0.6888, +0.3843, +0.3503, -0.0043, -0.0708, +0.2169, -0.2123, +0.0380, +0.1926, -0.5396, +0.0643, +0.1430, +0.2271, -0.5528, +0.0703, +0.1330, +0.2271, -0.2817, -0.5850, +0.4538, -0.2435, -0.1733, +0.7802, +0.4799, -0.4109, +0.5072, -0.3197, +0.6073, +0.6545, +0.1636, +0.0254, +0.6118, -0.3745, +0.7355, -0.5632, -0.6298, -0.0379, +0.4363, -0.5521, +0.6507, -0.0700, +0.1930, -0.8462, -0.1050, -0.3672, +0.1103, +0.2060, -0.7312, -0.6317, +0.1302, +0.0955, -0.5235, +0.3899, -0.0225, -0.0077, +0.4365, +0.2099, +0.2631, -0.2255, -0.6623, +0.1293, -0.6884, -0.2619, -0.1395, -0.1443, +0.6374, +0.8680, +0.8132, +0.7087, -0.2445, +0.4986, -0.0446, -0.0006, -0.8373],\n[ -0.4495, +0.0435, +0.4813, +0.1369, -0.3641, -0.2529, +0.4496, -0.0242, -0.2999, -0.3478, -0.2722, -0.0916, +0.1491, -0.1968, -0.2539, +0.1463, -0.2696, +0.2267, +0.2962, +0.5283, +0.0416, +0.7199, -0.0238, +0.0954, -0.2894, +0.1260, +0.2921, +0.6348, -0.8794, -0.3574, +0.1670, -0.2380, -0.7382, -0.1575, -0.0566, -0.2299, -0.0549, -0.1437, +0.2151, +0.1275, +0.0698, +0.1757, -0.2951, -0.4897, +0.0375, +0.5689, +0.0023, -0.0353, -0.0830, +0.0415, -0.1865, +0.5987, +0.3522, -0.3112, -0.5514, -0.3481, -0.1245, +0.6629, -1.5164, -1.5245, -0.1184, +0.2531, +0.2720, -0.2686, -0.9922, -0.4516, -0.0557, +0.0438, -0.1536, -0.1301, -0.0277, -0.2421, -0.9948, +0.5539, -0.6173, +0.3850, +0.2736, -0.0939, +0.1821, +0.6746, -0.3509, -0.0593, -0.0309, +0.1863, -0.4364, +0.8391, +0.0684, -0.6878, +0.0378, +0.0772, +0.2807, -0.2284, -0.0043, -0.1348, -0.0927, +0.4267, -0.3053, +0.4963, +0.5246, +0.7464, +0.0170, +0.0081, +0.1008, +0.3554, -0.1083, +0.0900, -0.0259, +0.2350, +0.2738, -0.0943, +0.1602, -0.8808, -0.3479, +0.1440, -0.2565, -0.8828, -0.4877, +0.1551, -0.4075, +0.1506, -0.1975, +0.2705, +0.4446, +0.1308, -0.4963, +0.5065, +0.4221, -0.3242, +0.3413, -0.4956, +0.1187, +0.1829, -0.3531, +0.1247, +0.1899, +0.3358, -0.3549, -0.4318, -0.2254, +0.1786, +0.0941, -0.6469, -0.2490, +0.3554, -0.5808, -0.0775, -0.6145, -1.3435, +0.1474, +0.0131, -0.6835, +0.6978, +0.1364, -0.6990, -0.0145, -1.0213, -1.1805, +0.7311, +0.1982, -0.4777, -0.1177, -0.2967, -0.1379, -0.9473, -0.2823, -0.1279, -0.2873, -0.5012, -0.1892, -1.1920, -0.6524, -0.3917, +0.2036, +0.4123, -1.2438, -0.5606, -0.2587, +0.1305, +0.2363, +0.1463, -0.0899, -0.5979, +0.4246, -0.1457, -0.1571, +0.2682, -0.3809, -0.3298, +0.5987, +0.2087, +0.0190, -0.5439, -0.1221, +0.0441, +0.4264, +0.1032, -0.3863, +0.0826, -0.5298, +0.1179, +0.4471, -0.0058, +0.1359, -0.5553, -0.2015, +0.0624, +0.5981, -0.0567, +0.3412, -0.2411, +0.1909, -0.4750, +0.3180, -0.3431, -0.4751, +0.2987, +0.6077, +0.4846, +0.0018, -0.0260, -0.5347, -0.2805, -0.1223, -0.2192, +0.1721, +0.2594, -0.4876, -0.5535, +0.0242, +0.6930, -0.7116, +0.1902, -0.1624, -0.0507, -0.7388, -0.0348, -0.1045, -0.5571, -0.1837, +0.2429, +0.2504, -0.4475, -0.1865, +0.0849, +0.3409, +0.8343, +0.2234, -0.5084, -0.1988, -0.7860, -0.5043, -0.0522, +0.2207, -0.1742, +0.0785, +0.3266],\n[ -0.1545, -0.2636, +0.7711, +0.5888, +0.8420, -0.3952, +0.4993, +0.0223, +0.1174, -0.1090, -0.1476, +0.3352, +0.5241, -0.6029, -0.3128, -0.1732, -0.4034, +0.0029, -0.0042, +0.2734, +0.4653, +0.4844, +0.7529, -0.0139, -0.0961, +0.1176, +0.3521, +0.0775, +0.0548, -0.1762, +0.3629, -0.1580, -0.1869, -0.0555, +0.6377, +0.0195, +0.0749, +0.2786, +0.0565, +0.7454, +0.4380, -0.2898, -0.6027, +0.3066, +0.3890, -0.2317, +0.1276, -0.3984, -0.0154, -0.1493, -0.1296, -0.2045, -0.2730, -0.1757, +0.2943, +0.2368, +0.2421, +0.4279, -0.0585, -0.5660, -0.0633, +0.5286, +0.4202, -0.0919, -0.2269, +0.1888, +0.2327, -0.0919, -0.2479, +0.3121, +0.0653, -0.6794, +0.7241, -0.1175, -0.2181, -0.0647, +0.1438, -0.1630, -0.1400, +0.4939, +0.2837, -0.1447, -0.0997, -0.5197, -0.1330, -0.0634, -0.6257, +0.0281, -0.3211, +0.2312, -0.1216, -0.3033, -0.1893, +0.7291, -0.0157, -0.3289, -0.4493, -0.5517, -0.1759, +0.6222, -0.4931, +0.2985, -0.4296, -0.2574, -0.2905, +0.0091, -0.5682, +0.0978, +0.1111, -0.0548, +0.2864, -0.4165, +0.8250, -0.1752, -0.0190, -0.1323, +0.2348, -0.6032, -0.2997, +0.5765, -0.1065, -0.1482, +0.5952, +0.1394, +0.1673, -0.2770, +0.4918, -0.2861, +0.6747, +0.0634, -0.6803, +0.1890, +0.1198, -0.3153, +0.2313, +0.0112, +0.0932, -0.1102, -0.2010, -0.3737, -0.5680, -0.4689, +0.5138, +0.2031, -0.6002, -0.1868, +0.3376, -0.2676, -0.9375, -0.2736, -0.2545, -0.0246, +0.0679, -0.0150, -0.0617, -0.2441, +0.4709, +0.0774, +0.5169, -0.3557, +0.0490, +0.7436, +0.6572, +0.0357, -0.0924, -0.4672, -0.1624, -0.7978, -0.3220, +0.0738, +0.7238, -0.2530, +0.5674, +0.4540, -0.7096, -0.0164, -0.1263, +0.9431, -0.5657, +0.2935, -0.2642, -0.2049, -0.0366, +0.8266, -0.1993, +0.2611, -0.1295, -0.6901, -0.2303, -0.0508, +0.0599, +0.2460, -0.5177, -0.4144, -0.2723, +0.3891, -0.3817, -0.0499, -0.2704, +0.2270, +0.3747, -0.1594, -0.2215, +0.7694, -0.0915, +0.0059, -0.6710, -0.3997, -0.0833, +0.2102, +0.4514, -0.3927, +0.2041, -0.8319, -0.4655, -0.2734, -0.1729, -0.2223, -0.2689, +0.6059, -0.5239, -0.2485, -0.3970, +0.3389, -0.3576, +0.2457, +0.1344, -0.0006, +0.2879, +0.0049, +0.2234, -0.2334, -0.1920, -0.1798, -0.2480, -0.2451, +0.1456, -0.1634, -0.0665, +0.5494, +0.2914, +0.2998, -0.0831, -0.1296, +0.3089, +0.1795, -0.1477, -0.0371, -0.8516, -0.1523, -0.1577, -0.0086, +0.0418, -0.7372, +0.2951, +0.0362],\n[ -0.4565, +0.1782, +0.1290, -0.5322, -0.2860, -0.2451, -0.3503, -0.1212, -0.6014, +0.2923, -0.2355, -0.4092, -0.3082, +0.6207, -0.6511, -0.1271, -0.3121, -0.6387, -0.1085, -0.3078, +0.0726, -0.1056, -0.7585, +0.0335, -0.1210, +0.2206, +0.5102, -0.4960, -0.3887, +0.0633, -0.2038, -0.0613, -0.1608, -0.4403, +0.1844, +0.2427, -0.0712, +0.1452, -0.3168, +0.2375, +0.5895, -0.3812, +0.4553, -0.3796, +0.0759, +0.0808, -0.0043, -0.1435, -0.1343, +0.4849, -0.4057, +0.0537, -0.3170, -0.5482, -0.0744, -0.2475, +0.2786, +0.0103, +0.1002, +0.2105, +0.0228, +0.2931, -0.1969, -0.5573, -0.5076, +0.3885, +0.2231, -0.7409, -0.3342, +0.1119, +0.1228, +0.3823, -0.4331, +0.0602, +0.0816, -0.3968, -0.1841, +0.2059, +0.3797, +0.0335, -0.0525, -0.1171, -0.4932, +0.0667, -0.3642, +0.2155, -0.2907, -0.0247, -0.8014, +0.1727, +0.2656, -0.3467, -0.1870, -0.0004, +0.2753, +0.0611, -0.0361, +0.0040, +0.3672, -0.6810, +0.3802, +0.1425, -0.1164, +0.0589, -0.3594, +0.1147, -0.0726, +0.1166, +0.1405, -0.8273, -0.1676, -0.0557, -0.2434, +0.1277, +0.5011, -0.1945, -0.3558, -0.3259, +0.0800, -0.1651, +0.5381, +0.0205, +0.0540, +0.3779, -0.3028, -0.0529, -0.6128, +0.1707, -0.3605, -0.0364, -0.3162, -0.0557, -0.1434, -0.3231, -0.3164, -0.7789, +0.0809, +0.0999, -0.0837, -0.1528, +0.2895, -0.0212, -0.0732, +0.2575, -0.1690, -0.2843, +0.0976, -0.0577, +0.0625, +0.5292, -0.2402, -0.4092, -0.0631, -0.0600, +0.1563, -0.0879, -0.1808, +0.0164, -0.1146, -0.5358, -0.1802, +0.0685, +0.2012, +0.2354, -0.1941, -0.2628, +0.2707, +0.0396, -0.2286, +0.1113, -0.3468, +0.2619, +0.0130, -0.2485, -0.1131, +0.1862, +0.0415, -0.6855, -0.1715, -0.3101, +0.2030, -0.4367, +0.3013, +0.0863, -0.5386, +0.0915, +0.2793, -0.1328, -0.0632, -0.1778, +0.0966, -0.0656, +0.1870, -0.4183, +0.2612, -0.3949, -0.2119, -0.7062, -0.1619, +0.1240, +0.5384, +0.0755, +0.2566, +0.0666, +0.1773, -0.0047, +0.2708, -0.0026, +0.0031, +0.2468, -0.0893, +0.3492, -0.1193, -0.7335, +0.3597, -0.9848, -0.3577, -0.0779, -0.3795, -0.3974, +0.0866, +0.3102, -0.5583, -0.7425, +0.1512, +0.1052, -0.1454, +0.2425, -0.1251, +0.1301, -0.5012, -0.6570, +0.7020, -0.1142, -0.0283, +0.1393, +0.1112, +0.1435, -0.3354, -0.2273, +0.4505, -0.0457, -0.2140, -0.4433, -0.4714, -0.0525, -0.0225, +0.1967, -0.0158, +0.1003, +0.0867, -0.9314, -0.2636, +0.3661, -0.5848, +0.0565],\n[ -0.1112, -0.4525, +0.1614, +0.0399, +0.5304, -0.3198, -0.4914, -0.2721, -0.0057, +0.1912, +0.3123, +0.1551, +0.3333, -0.0465, -0.1155, +0.1781, +0.0136, -0.4149, +0.1566, -0.5089, -0.0009, -0.0626, -0.1944, -0.1314, -0.1063, -0.2397, +0.2927, -0.6858, +0.3245, +0.2261, -0.2639, -0.0988, -0.2489, +0.0032, +0.2910, +0.1492, -0.3025, +0.0991, +0.2780, +0.3589, -0.2876, -0.0963, +0.1392, -0.5810, -0.7109, +0.6384, +0.0976, -0.5051, +0.0422, +0.6395, +0.2289, -0.6906, +0.6869, -0.2494, +0.0840, +0.5636, +0.4438, +0.0787, +0.3340, +0.0412, +0.3545, -0.4366, -0.4846, +0.0560, -0.0517, +0.2010, +0.1060, -0.5708, +0.4587, +0.0208, +0.0620, -0.2796, +0.5296, -0.2107, +0.3797, -0.3168, +0.6043, +0.1749, +0.4162, +0.4593, -0.7969, +0.6965, +0.4833, +0.1571, -0.1353, -0.0926, -0.0610, +0.0594, -0.8100, -0.3465, -0.8444, +0.1071, -0.1125, -0.1348, +0.1900, -0.3160, -0.0849, -0.5535, +0.2228, -0.0280, -0.2032, -0.5068, -0.1945, -0.4944, -0.3757, +0.2530, +0.2590, +0.0177, -0.3995, +0.1453, +0.4474, -0.2493, -0.1195, +0.1928, +0.9237, +0.0135, -0.6365, +0.1230, -0.3276, -0.0830, +0.2579, -0.3146, -0.1939, +0.4047, -0.0049, -0.3189, -0.0661, +0.1790, -0.2341, +0.1557, +0.2112, +0.1689, +0.1799, +0.0173, -0.3950, -0.5424, +0.0521, -0.3810, +0.1368, +0.1286, -0.4210, +0.4710, +0.0680, +0.2577, -0.2847, +0.2011, -0.0889, -0.3098, +0.3893, +0.1595, -0.2627, -0.1097, +0.1000, +0.1536, +0.3436, -0.1249, -0.4113, +0.2356, -0.0156, +0.1952, -0.0951, -0.3113, -0.3086, +0.4344, +0.0703, -0.2342, -0.3115, -0.1909, +0.4157, -0.3784, -0.1582, +0.1850, -0.2052, +0.1344, -0.0284, -0.4493, +0.0625, -0.1307, +0.3509, -0.0012, +0.8178, -0.2761, +0.7609, +0.1238, +0.0534, -0.2405, -0.2435, -0.1970, +0.0690, +0.4685, +1.0030, +0.6136, +0.6953, +0.0036, +0.5347, -0.4444, -0.1588, -0.4121, -0.3876, +0.3125, +0.2208, -0.0865, -0.2455, +0.0981, +0.4245, -0.5058, +0.2232, +0.1998, +0.1941, -0.2040, -0.0108, +0.2213, +0.0371, -0.3715, -0.4102, -0.1646, -0.1220, +0.6978, -0.2699, +0.1139, +0.0043, -0.2600, +0.4152, -0.2855, -0.2182, +0.7740, +0.1168, +0.1696, +0.1662, -0.4700, -0.0897, -0.5900, +0.0786, +0.2272, -0.1016, +0.3904, -0.3343, -0.1979, -0.2305, -0.0750, -0.1703, -0.4604, +0.2909, -0.3099, +0.1186, -0.3282, +0.2542, +0.0348, -0.2425, +0.4281, +0.1888, -0.6300, -0.0389, -0.2371, -0.1128, +0.1885],\n[ +0.6639, +0.0702, -0.1304, -0.4967, +0.3758, -0.1061, -0.0523, -0.2161, +0.0816, -0.3597, +0.0542, +0.0536, -0.3311, -0.1367, -0.5922, -0.3262, -0.3618, -0.8526, -0.5574, -0.1029, +0.1654, -0.3574, -0.5698, -0.0729, -0.4110, +0.1946, -0.5148, -0.2157, +0.0880, -0.4988, +0.1084, -0.2813, +0.4106, -0.0524, -0.1673, -0.7740, +0.1078, +0.1820, -0.5635, -0.0145, -0.1669, +0.0116, -0.1952, +0.1940, +0.2119, -0.0359, +0.3704, +0.2859, +0.1880, -0.0113, +0.0125, +0.4600, -0.2778, +0.0657, +0.7902, -0.1208, +0.1688, -0.1355, +0.3225, +0.4388, -0.2043, -0.5396, +0.0596, -0.6605, -0.4528, -0.4741, -0.3760, +0.4426, -1.1438, -0.0723, +0.2735, -0.1089, -0.3427, -0.0726, +0.0420, -0.2223, -0.7000, -0.2843, -0.4384, +0.2913, -0.1338, -0.5119, +0.5250, -0.0908, +0.5554, +0.1226, -0.0437, +0.2352, -0.2992, -0.2623, +0.3665, -0.0842, +0.0260, +0.0963, -0.6214, -0.3901, +0.0785, -0.0952, -0.0248, -0.3269, -0.1007, +0.0385, -0.1838, -0.4408, +0.0586, +0.3316, +0.2261, +0.1234, -0.1497, -0.8627, -0.2077, -0.0247, -0.3589, -0.2200, -0.0796, -0.3080, +0.0493, +0.1684, -0.0319, -0.4871, -1.0854, +0.0254, -0.3065, -0.1966, -0.3679, +0.1414, -0.0580, -0.2385, +0.2040, +0.0429, +0.1307, -0.8378, -0.1806, -0.4079, +0.4829, -0.5028, -0.1583, -0.4943, -0.0458, -0.1000, -0.5349, +0.2198, -0.1819, +0.4630, -0.0457, +0.0766, +0.3230, +0.6088, -0.0285, -0.3704, -0.3567, -0.3936, -0.1021, +0.4362, -0.1535, -0.1291, +0.0761, -0.3656, -0.3966, -0.4077, +0.6730, -0.0519, -0.6391, +0.2173, -0.2885, +0.2854, -0.2147, +0.1787, +0.2168, -0.4785, +0.1279, -0.0088, -0.4949, -0.8904, -0.6216, -0.1224, -0.2990, -0.4242, +0.3770, -0.3137, +0.0934, -0.5931, -0.2990, +0.3737, -0.6431, -0.1939, -0.2330, -0.1893, -0.2435, -0.1946, -0.0799, -0.1570, -0.5438, +0.0471, +0.2994, +0.2884, +0.2453, -0.6113, +0.4730, +0.2125, -0.6701, +0.0616, -0.2903, -0.2056, +0.2428, -0.1759, -0.8047, -0.2883, -0.6022, -0.4987, -0.2738, +0.1943, +0.2759, -0.3151, -0.3028, -0.0858, -0.6176, -0.4943, -0.2982, +0.3932, -0.3796, -0.1953, -0.3258, +0.0818, +0.1012, +0.0787, +0.3576, -0.5450, -0.2397, +0.1890, +0.4258, +0.1003, -0.4817, +0.3342, -0.1117, -0.1249, -0.0892, -0.1996, +0.0142, -0.0099, +0.0581, -0.7400, -1.2277, +0.0064, +0.0318, -0.4549, -0.0940, -0.0705, -0.0682, -0.2008, +0.1345, +0.5597, -0.2645, -0.0604, -0.3416, -0.2864],\n[ -0.0028, -0.0898, -0.4007, +0.4676, -0.1535, +0.2170, +0.0180, -0.0810, +0.5363, +0.0032, +0.3269, -0.1136, -0.1009, -0.2048, -0.3535, +0.4315, +0.1272, -0.1089, -0.5999, +0.0208, +0.0936, -0.1613, +0.4883, +0.1000, -0.2159, +0.4113, -0.0223, +0.6549, -0.3449, +0.0485, +0.2421, -0.4096, +0.0556, -0.4409, -0.4039, +0.0445, +0.3850, +0.0889, +0.0026, -0.1220, +0.3864, +0.3739, +0.1646, +0.5352, -0.0789, -0.0276, +0.2423, -0.3945, +0.1482, -0.0888, -0.1047, +0.6800, -1.1911, -0.7345, +0.5066, +0.0200, -0.0569, -0.0783, +0.1944, +0.4563, +0.1254, +0.3341, +0.8698, +0.3332, +0.4348, -0.3389, -0.4066, +0.2644, -0.2798, -0.0424, +0.1193, +0.0831, -0.7383, -0.0038, +0.2701, -0.0101, -0.6089, +0.1930, -0.1343, +0.5417, -0.3725, +0.0902, -0.1180, +0.3325, +0.3892, +0.1325, +0.9921, -0.0492, +0.2405, +0.2264, +0.2860, +0.0994, +0.0847, +0.9777, +0.0296, +0.4773, -0.1188, +0.6873, +0.5104, +0.0378, -0.2487, +0.3510, -0.3927, -0.0066, -0.3028, +0.1421, +0.0103, -0.0162, -0.2787, -0.3621, +0.7588, -0.1543, -0.1931, -0.2935, +0.1967, +0.2697, +0.4855, +0.5111, +0.1222, -0.3406, -0.2549, +0.1323, -0.0580, -0.1195, -0.1470, +0.6529, +0.2743, -0.0332, +0.1778, +0.2766, +1.1343, -0.3576, -0.6760, -0.2872, -0.8008, -0.0279, -0.0802, -0.0202, -0.1213, -0.5831, -0.2366, -0.4305, +0.3012, -0.1903, +0.2265, -1.0208, -0.1138, -0.1229, -0.7328, +0.1313, -0.1682, -0.7280, -0.4280, +0.6649, -0.3497, +0.2514, -0.2782, -0.0772, -0.4907, -0.0475, +0.0505, +0.6654, -0.5389, -0.2224, +0.0454, +0.0665, -0.2657, -0.1661, +0.3128, +0.5484, +0.2041, -0.0153, -0.5880, +0.1951, -0.1350, +0.2217, +0.2999, -0.1773, -0.1246, +0.2815, +0.3207, -0.3308, -0.0356, -0.2777, -0.1428, -0.4047, -0.1064, -0.0044, +0.4314, +0.0760, -0.0951, -0.1282, -0.0204, -0.0225, -0.4182, +0.3567, -0.0197, -0.3210, +0.7637, +0.2886, -0.3230, +0.0109, +0.3165, +0.0088, -0.3390, +0.6050, +0.0018, +0.0298, -0.2559, -0.2123, +0.1931, +0.0521, +0.1330, +0.2284, -0.4433, -0.2233, +0.6624, -0.0413, -0.5773, +0.0100, +0.0626, -0.5787, +0.5139, -0.7480, -0.1786, +0.2617, -0.1276, -0.0327, +0.3131, +0.2643, -0.0968, +0.1409, -0.3147, +0.6659, -0.4285, -0.2072, -0.0548, +0.1973, +0.0799, +0.4575, +0.3171, -0.2175, -0.2405, -0.0591, -0.2228, +0.1632, -0.0162, -0.1352, -0.2736, -0.6039, -0.5106, +0.1240, +0.3908, +0.0334, -0.1636, -0.2803],\n[ +0.0894, -0.1313, -0.5267, -0.3037, +0.0560, -0.0589, -0.0811, +0.2186, -0.0574, -0.2032, +0.4292, +0.1375, -0.1661, +0.1344, +0.1416, -0.2339, -0.1400, -0.1990, -0.2908, +0.0187, -0.1002, -0.5780, +0.0609, -0.2537, -0.2208, -0.0817, -0.0625, +0.3360, +0.3056, -0.0835, +0.0158, +0.1561, -0.5691, -0.0537, -0.0060, +0.0156, -0.0768, +0.1259, +0.0801, -0.3652, -0.2935, -0.0957, -0.3882, -0.0806, -0.1767, -0.1223, -0.2980, -0.0602, +0.0748, +0.1073, -0.2273, -0.0432, +0.0205, -0.1205, +0.0701, +0.2412, -0.0545, +0.4093, +0.1213, -0.1339, +0.2695, -0.1651, -0.3032, +0.2259, +0.2772, -0.4091, -0.2053, +0.1683, +0.3017, +0.0795, -0.0465, -0.1519, +0.1913, -0.0878, -0.3027, -0.1999, -0.1650, +0.0766, +0.3527, +0.0193, -0.0428, -0.3020, +0.1492, +0.0307, -0.0845, -0.3916, +0.2446, -0.2790, -0.1062, +0.0129, -0.0355, -0.0179, +0.0488, -0.1491, -0.1360, -0.5012, -0.1146, -0.6466, -0.0591, -0.2473, -0.0138, -0.0251, -0.3729, -0.4206, +0.0316, +0.4263, +0.1346, +0.0591, -0.0302, -0.3306, +0.2092, +0.1464, -0.2104, -0.0266, -0.5471, +0.3618, -0.0120, -0.0801, -0.6509, -0.1565, +0.2461, -0.2794, +0.1373, -0.3501, -0.0141, -0.3659, -0.0823, +0.0433, -0.4913, +0.1867, -0.1787, -0.2530, -0.0858, +0.0631, +0.4656, -0.0758, +0.1346, +0.3409, -0.0766, +0.1549, -0.1865, +0.0163, +0.1177, -0.1294, -0.0539, -0.1648, -0.3973, -0.0093, +0.0441, +0.2224, +0.2009, -0.1244, +0.2471, -0.1195, +0.2794, -0.1653, -0.0777, +0.2899, -0.0727, -0.0180, -0.1459, +0.1405, +0.2990, -0.3099, -0.0493, -0.0159, -0.3430, +0.2833, -0.0530, -0.1604, -0.0790, -0.0641, +0.4894, +0.1193, +0.0302, +0.0596, -0.3592, -0.0790, -0.3708, +0.1281, -0.1684, -0.1438, +0.1309, -0.1472, +0.0636, +0.0655, -0.2718, -0.3099, -0.2452, -0.3439, -0.2408, -0.2503, +0.0348, +0.1378, -0.1481, +0.0495, -0.0593, -0.2129, -0.1782, -0.1991, -0.0801, -0.0231, -0.2544, +0.3121, -0.6851, +0.1245, +0.1280, +0.0607, +0.1181, +0.2037, -0.0248, -0.0015, +0.2572, +0.1095, -0.4549, +0.0161, -0.0200, -0.3575, -0.3766, -0.1733, -0.1862, -0.2414, -0.4685, -0.1836, -0.3641, -0.1041, +0.0687, -0.1580, +0.1813, -0.0349, +0.2789, +0.1194, -0.2338, +0.3210, +0.0017, -0.4004, +0.2656, +0.2545, -0.2993, +0.1564, -0.3086, +0.0336, +0.0466, -0.0968, -0.2550, +0.2261, -0.0329, -0.1153, -0.1083, -0.2512, +0.0684, -0.1142, +0.1333, -0.0650, +0.2348, +0.3584],\n[ +0.4732, -0.1606, +0.6582, -0.2787, -0.4086, +0.4893, -0.4595, +0.1070, -0.2998, -0.3381, +0.4717, +0.0695, +0.0694, -0.1232, -0.3385, +0.5312, -0.7533, -0.6750, +0.5398, -0.0136, -1.7436, -0.4604, +0.1915, -0.0144, -0.8186, +0.6056, -1.0079, -0.6666, -0.5122, +0.1853, +0.3952, +0.3490, -0.4289, -0.8478, -0.7121, -0.3623, -1.4662, +0.2201, +0.5418, -0.3230, -0.6259, +0.1299, -0.6091, -0.0094, +1.4666, -0.0719, -0.9252, +0.3602, +0.1149, +0.3030, -0.2658, -0.4848, -0.1590, -0.5769, +0.0430, +0.2988, +0.0350, +0.5103, +0.7586, +0.2644, -0.0157, -0.6312, -0.2371, +0.1164, -0.9660, -0.9717, -0.1487, -0.1394, -0.5667, +0.3729, -0.3456, +0.3801, +0.4136, +0.3297, +0.6520, +0.3534, +0.5755, +0.4200, +0.3596, -0.0550, +0.0103, -0.2814, +0.1925, -0.1471, +0.2325, -0.1932, +0.2907, +0.9803, +0.4304, -0.2703, +0.4069, +0.0140, +0.3637, -0.1827, +0.2379, -0.0605, +0.1779, -1.0385, -0.3431, +0.3169, +0.8508, -0.5855, +0.4819, -0.2676, -0.2594, +2.1076, -1.0596, +0.4236, -0.3660, +0.6359, +0.4310, -1.0984, -0.9941, +0.8232, +0.0712, +0.3779, -0.7529, +0.0185, +0.2977, -0.6670, +1.1753, +0.0192, -0.2653, -0.9421, -0.1875, -0.4444, +0.2785, +0.6543, -0.0886, +0.1154, -0.7106, -0.5924, -1.0759, -0.0793, -0.5093, +0.2754, +0.4661, +0.6569, -0.0456, +0.8637, -1.5313, -0.3027, +0.2600, -0.4001, -0.0731, -0.9646, -1.3496, -0.5664, +0.6005, +0.7289, +1.0823, +1.0336, -1.1099, -1.7396, -0.0138, -0.7396, -0.4744, +0.5657, -0.4586, +0.7394, +0.3641, +0.1860, -0.2638, -0.0889, -0.1331, -0.8559, -0.1339, +1.0111, -0.7366, -0.8960, +0.9621, -0.2681, +0.3126, +0.2824, +0.1901, +0.1823, +0.1325, -0.3736, -0.2325, +0.3365, +0.2287, -0.0605, +0.2358, +0.0979, +0.5472, -0.2089, -0.0947, -0.2847, -0.4178, -0.4625, +0.9301, -0.4498, -0.1724, +0.1641, -0.2897, +0.7335, +0.1285, +0.5672, +0.0944, -1.2222, -0.4112, +0.0133, -0.3927, +0.7011, +0.1873, -0.9979, -0.6419, +0.0528, +0.2162, -0.9908, -0.0299, -0.0079, -0.1600, +0.4196, -0.5376, -0.5944, +0.5843, -1.4553, -0.3420, -0.0111, -0.3172, -0.7095, +0.3238, -1.5251, -0.6495, +0.1290, +0.5290, +0.8379, -0.0186, +0.3914, +0.6254, -0.9157, +0.5162, -0.7548, +0.2652, -0.1713, +0.6144, +0.3886, -1.0187, +0.4616, -0.1282, +0.1885, +0.4676, -0.5539, -1.3952, +0.1609, -1.3798, -0.9262, -0.0862, +0.4384, +0.1909, +0.6169, -0.2354, +0.0560, -0.4729, +0.5102],\n[ -0.2623, -0.5500, -0.8934, -0.5827, -0.3098, +0.0409, -0.5424, +0.1890, +0.2709, -0.4536, -0.2234, -0.0921, -0.0883, -0.1581, -0.2704, +0.1434, +0.5878, -0.1153, +0.3998, -0.5189, -0.0398, -0.8323, -0.2917, -0.3440, +0.1746, +0.1797, -0.0405, +0.1575, -0.4795, -0.0746, -0.0424, -0.3207, +0.5470, +0.0724, +0.6805, -0.1680, +0.8424, -0.1252, -0.9788, +0.5110, -0.3236, -0.0378, -0.3926, +0.3545, +0.6891, +0.0941, -0.4593, +0.3817, -0.4011, -0.1867, +0.1241, -0.0399, +0.0399, +0.4695, +0.0817, +0.2802, -0.2934, +0.1742, +0.2839, -0.6293, -0.0622, -0.3654, +0.3869, +0.0138, +0.3932, +0.3312, +0.0329, -0.5800, -0.2554, -0.0443, -0.1353, +0.3262, -0.1477, +0.3355, +0.4989, +1.0883, -0.1484, -0.0356, -0.1507, +0.0998, +1.0423, +0.8652, +0.0962, -0.7090, +0.0557, +0.4462, -0.5165, +0.3132, -0.1200, -0.7751, -0.2078, +0.0040, +0.1418, +0.0903, +0.1258, -0.7325, -0.0397, +0.2254, -0.0790, +0.6261, -0.1110, +0.0273, -0.3680, -0.6284, -0.5848, -0.0510, -0.3352, -0.4350, +0.7128, -0.0535, -0.3427, +0.1700, -0.1675, +0.5452, -0.0314, +0.0352, +0.0463, -0.2302, +0.2769, -0.1505, -0.0960, -0.1267, +0.0688, -0.3363, -0.3650, +0.1264, -0.6723, -0.1585, -0.3805, -0.5702, +0.3047, +0.0726, -0.1396, +0.5884, -0.5775, -0.3510, +0.4869, -0.2920, +0.2757, -0.7034, -0.5380, +0.2403, +0.4119, +0.0880, +0.4356, -0.1930, +0.0740, -0.1306, -0.5824, -0.1174, +0.3561, +0.0697, +0.1986, -0.1393, +0.6282, -0.6205, -0.1164, +0.2631, +0.2449, +1.0627, -0.6674, +0.7347, -0.0015, -0.2780, +0.0399, +0.2840, -0.5423, -0.3100, -0.2520, -0.5139, +0.2372, +0.0145, -0.3361, +0.2923, +0.0887, +0.0496, +0.1057, +0.5594, -0.0216, +0.0017, -0.0902, -1.1624, +0.2974, -0.0896, +0.0014, -0.1058, -0.2326, -0.4239, -0.6721, -0.1929, -0.0481, +0.2104, +0.4768, +0.2210, -0.2804, -0.0495, -0.2478, +0.0522, -0.0218, -0.0644, +0.2885, -0.2548, +0.0693, -0.4215, -0.0662, -0.4346, +0.2659, +0.0776, -0.1521, -0.4458, -0.0882, -0.0582, +0.8444, -0.5577, -0.3347, -0.0834, -0.6310, +0.3409, -0.3974, -0.6091, +0.3574, +0.2273, -0.4237, +0.1367, -0.1327, +0.0987, -0.9152, +0.5622, -0.0676, -0.6499, +0.3606, +0.4399, -0.3099, -0.6367, +0.3527, +0.0088, -0.2020, -0.1820, +0.0846, -0.6191, +0.1769, +0.3711, -0.1939, -0.3972, +0.0426, +0.0640, +0.5209, +0.2030, +0.3277, -0.5862, +0.1393, -0.2087, +0.5449, +0.1910, +0.0607, -0.1638],\n[ +0.3326, -0.2192, -0.1261, -0.7672, -0.0726, +0.0749, +0.1123, +0.2187, +0.0284, +0.2690, -1.0084, +0.3601, -1.6456, +0.1946, +0.3244, -0.8317, +0.0052, +0.3999, -0.0599, -0.2789, +0.4927, -0.0105, +0.6244, +0.6483, +0.2249, +0.2885, -0.0790, +0.6272, +0.2172, +0.7959, -0.3381, -0.4279, +0.1254, +0.8320, -0.3991, -0.4147, +0.0855, -0.0195, -1.0794, -0.2158, +0.6702, +1.1494, +0.5560, +0.2204, +0.5365, -0.4700, +0.1475, +1.1094, -0.3511, +0.0416, -0.2719, +0.1314, +0.6246, +0.5698, -0.1744, +0.2456, -0.5748, +0.2735, +0.2794, -0.3873, -0.3518, -0.4836, -0.9445, -0.7092, -0.1050, -0.5319, -0.0415, -0.4103, -0.8021, -0.4810, -0.3130, +0.2015, -0.2658, +0.7122, -0.9539, -0.1401, +0.3261, -0.1090, +0.1664, +0.8160, +0.8901, -0.4834, -0.9219, +0.2213, +0.7483, -0.0979, +0.4085, +0.6627, -0.0915, -0.1968, -0.3771, +0.0993, +0.7787, +0.2916, -0.1056, -0.9319, -0.6357, -0.2824, -0.2307, -0.0626, +0.4354, +0.1002, -0.4637, -0.6664, -0.5818, +0.3427, -0.1501, -0.4927, +0.1051, +0.9686, +0.4639, +1.0309, +0.4054, +0.5349, -0.5620, +0.0207, -0.0858, -0.6535, +0.4465, -0.0544, +0.4589, -0.3666, +0.5363, -0.2222, -0.2747, +0.3050, -1.1584, +0.1305, +0.3800, +0.8084, +0.6366, +0.2893, +0.1092, -0.0087, +0.9411, -0.5530, -0.4102, +0.2868, +0.3403, +0.1751, -0.2593, -0.2496, +0.0389, +0.9976, -0.4269, +0.1595, +0.6351, +0.2016, -0.0273, +0.3620, +0.8245, +0.5060, -0.2563, +0.2884, -0.3763, -0.3475, -0.6837, +0.1288, -0.4847, +0.2518, -0.6590, +0.4302, +0.0284, -0.3004, +0.1761, +0.0353, -0.3861, +0.6957, +0.1880, -0.5363, +0.7874, -0.0366, -0.1364, +0.5343, +0.6380, +1.6283, +0.0523, +0.5491, +0.3073, +0.7776, -0.5433, -1.0954, -0.4400, +0.3572, +0.4507, +0.2235, +0.4191, -0.0030, +0.2167, +0.0241, -0.2959, -0.1926, -0.0548, +0.6186, +0.1567, +0.0447, -0.1605, -1.2927, -0.0799, -0.8015, +1.0467, +0.1503, -0.1165, -0.3578, +0.0835, +0.3518, +0.6534, -1.6320, -0.4974, -1.2648, +0.0539, -0.7212, -0.4364, -0.5707, -0.3332, +0.8711, -0.2449, +0.2042, +0.1523, +0.2714, -0.1228, -0.3889, -0.8836, -0.0737, -0.1996, +0.3627, +0.1584, +1.3699, +0.5986, +0.4335, +0.7518, +0.4746, -0.6336, -0.6632, -0.1306, -0.2863, -1.0185, -0.5354, +0.0729, -0.2683, +0.2188, +0.0261, -0.3163, -0.0834, +0.0905, +0.2801, +0.6009, -0.2417, -0.5704, -0.4598, -0.5413, -0.0954, -0.1351, +0.9718, +0.4152, -0.8522],\n[ -0.1711, -0.1063, +0.0715, -0.3206, +0.1503, +0.3791, -0.1802, -0.0566, +0.2265, -0.1800, -0.0480, -0.3589, +0.0047, +0.2423, +0.3973, -0.1336, -0.1008, -0.2685, -0.0073, -0.0393, -0.1822, -0.3134, +0.3055, +0.2837, +0.2647, +0.0811, -0.0392, +0.1127, -0.1816, +0.1328, +0.0866, -0.1994, +0.0885, +0.0459, +0.6323, -0.0745, -0.1675, -0.1628, -0.2264, +0.2328, +0.2803, +0.3329, -0.0857, -0.0834, +0.0058, +0.2387, -0.0153, +0.1075, -0.1300, -0.0398, +0.0271, -0.0736, +0.0525, -0.3868, +0.1940, -0.4156, -0.0022, +0.0494, -0.0423, -0.2591, -0.0749, +0.0908, -0.0273, +0.1700, -0.1291, +0.1797, -0.2058, -0.1395, -0.4264, -0.2898, -0.1726, -0.0213, +0.1089, +0.0923, -0.0280, +0.2194, +0.0694, +0.4390, -0.0365, +0.2571, +0.0334, -0.0207, -0.0985, +0.0378, +0.0380, -0.0329, +0.3669, -0.1063, -0.1286, -0.0433, +0.1327, -0.0711, +0.4962, -0.2371, -0.1473, -0.0841, -0.1811, +0.0358, +0.1089, -0.1886, -0.1936, -0.2577, -0.0633, +0.4586, -0.1914, +0.2095, -0.1539, +0.0479, +0.2096, -0.0109, +0.4658, +0.1918, -0.1946, -0.4688, +0.0825, -0.0409, -0.0128, +0.0776, +0.0104, -0.1060, +0.0913, +0.1380, -0.3030, +0.2420, -0.1261, +0.0273, +0.2801, +0.1011, -0.2845, -0.0056, +0.0449, -0.2219, -0.3166, -0.0995, +0.3312, +0.0378, +0.3752, +0.1273, -0.0218, +0.1467, +0.7130, -0.1773, -0.0020, +0.0221, -0.2904, -0.3301, -0.0714, -0.3943, -0.2730, -0.3627, +0.0126, +0.1011, +0.0552, -0.0592, +0.1397, +0.4257, -0.5063, -0.1791, +0.3605, -0.0214, +0.0775, +0.3434, -0.4018, -0.1175, +0.0876, -0.2879, +0.1134, +0.4348, -0.0254, +0.0863, +0.1131, +0.1835, +0.2308, +0.3229, +0.3254, +0.4667, -0.3888, -0.1415, +0.0981, +0.0166, -0.1201, +0.2934, +0.3937, +0.2242, -0.5198, -0.3040, -0.0367, +0.1663, -0.3662, +0.1434, -0.2027, +0.0861, +0.0767, +0.2295, -0.0056, +0.0315, +0.0541, +0.2837, -0.1139, +0.0808, -0.0422, +0.0673, +0.1571, -0.3419, -0.1489, -0.1222, -0.0932, -0.1747, -0.2475, +0.2277, -0.2570, -0.1233, +0.2159, +0.0146, +0.0969, +0.0559, +0.2020, -0.1456, +0.4296, -0.3832, +0.5871, +0.3385, +0.1100, -0.1362, -0.0197, -0.1770, -0.1053, -0.1967, +0.1094, -0.1991, +0.4991, -0.0012, -0.1015, -0.0932, -0.3859, +0.1861, -0.2871, +0.4395, -0.2905, +0.3829, -0.1480, -0.0701, +0.1316, +0.0267, +0.1225, +0.2529, -0.2509, -0.2783, +0.1156, +0.1564, -0.0783, -0.0978, +0.2723, +0.2160, -0.1469, +0.3120],\n[ +0.0315, -0.2696, -0.0547, +0.1758, -0.2117, +0.4463, +0.8609, -0.1051, -0.0676, -0.3982, +0.9245, -0.1324, -0.1016, +0.6617, -0.2366, -0.4399, +0.3019, +0.1689, -0.0158, -0.1818, -0.4215, +0.0936, +0.5601, -0.0098, -0.1283, -0.0398, -0.0200, +0.2282, +0.1735, +0.0721, -0.2770, -0.0388, +0.0866, -0.0641, -0.3395, -0.0183, -0.2307, +0.2022, -0.0453, +0.1410, +0.1753, -0.2238, -0.0288, +0.3948, +0.1377, -0.2461, -0.2781, +0.1382, +0.0858, +0.0087, -0.2323, +0.0826, +0.3201, +0.0201, +0.1398, +0.4103, +0.1346, +0.1654, -0.2246, +0.4345, -0.0825, +0.0604, -0.4920, -0.1070, -0.2675, +0.0284, -0.6419, -0.0281, -0.1501, -0.1668, -0.0765, +0.0647, -0.0766, +0.1001, -0.1794, +0.2516, +0.4290, -0.7728, +0.2099, +0.0732, -0.2205, -0.5481, +0.1747, +0.2618, -0.1887, +0.2324, +0.3140, -0.0721, +0.0421, -0.0456, +0.0669, -0.3454, -0.0662, +0.4177, -0.1038, +0.2780, +0.0996, -0.5959, -0.5961, -0.4114, +0.2390, -0.3022, -0.1677, -0.3278, -0.3484, -0.1997, -0.0566, -0.0892, +0.7018, +0.6478, +0.1005, -0.5961, +0.1566, +0.0825, +0.2346, -0.3356, -0.3185, +0.0755, +0.0411, -0.0793, +0.3258, -0.1396, +0.1283, +0.1896, -0.5171, +0.1120, -0.1224, +0.0367, +0.0743, +0.1430, +0.0132, +0.0149, +0.1147, -0.4125, -0.2080, +0.1968, +0.8142, -0.0721, +0.3152, -0.2247, +0.3908, +0.0971, -0.1424, +0.1374, +0.2706, -0.3770, +0.1096, -0.0568, +0.3264, +0.4506, -0.4569, +0.1727, -0.2381, +0.4214, +0.1191, -0.3278, +0.1992, -0.2243, -0.4143, +0.8708, -0.2386, +0.7603, -0.1501, +0.0901, +0.4924, -0.2237, -0.3817, +0.1332, +0.1977, +0.2710, +0.2726, -0.0557, +0.0365, -0.2051, +0.4295, -0.0173, -0.0251, -0.2283, -0.5470, +0.2785, +0.4134, +0.1839, -0.2030, -0.0077, -0.3212, +0.1286, -0.2258, -0.2953, -0.3600, +0.0362, -0.2548, +0.1616, -0.2418, +0.2031, +0.1168, +0.1147, -0.0406, -0.0521, -0.8852, -0.2120, -0.0571, +0.0538, +0.2702, -0.2204, -0.2196, +0.0400, +0.1007, -0.4083, -0.1119, +0.3729, +0.3893, -0.2311, +0.3263, +0.3975, +0.2199, +0.1641, +0.7579, -0.3420, +0.3819, +0.0741, +0.7023, -0.1790, +0.3570, +0.1058, -0.1609, -0.0125, +0.1419, -0.2185, +0.1101, +0.2555, -0.5080, -0.2319, -0.0974, -0.4406, +0.3373, +0.7219, -0.2865, +0.0826, -0.1259, +0.1188, -0.0105, -0.1039, -0.0604, +0.3308, +0.0726, +0.0293, +0.4745, -0.6289, -0.2740, -0.5608, +0.1218, -0.0781, +0.0437, +0.1166, -0.0665, +0.7765],\n[ +0.1900, +0.1304, +0.1306, +0.1685, +0.0547, -0.1907, -0.0987, +0.3965, +0.0377, +0.3136, +0.3067, +0.0101, -0.0786, -0.2354, +0.2159, +0.2700, +0.1332, +0.0464, +0.0145, -0.3109, -0.1940, +0.3194, -0.0398, -0.0600, -0.1790, +0.0580, +0.3900, +0.3714, -0.3819, +0.1061, +0.0045, -0.2275, +0.0156, +0.1695, +0.1446, +0.0291, +0.2628, -0.0779, +0.2689, +0.1189, +0.4291, +0.3251, -0.2835, -0.1831, +0.5168, +0.2814, -0.2532, +0.1688, -0.0255, +0.0595, -0.0628, -0.0292, +0.4426, +0.0847, -0.0432, +0.1771, -0.1106, +0.0703, -0.0249, +0.0689, -0.0916, -0.0069, -0.0343, -0.1095, -0.0536, +0.0074, -0.0784, -0.1075, -0.2998, -0.0088, +0.1853, +0.0493, -0.1932, +0.0370, +0.2847, -0.0820, -0.1846, -0.0661, -0.3196, +0.3797, -0.0899, +0.5101, +0.0220, +0.4859, +0.3703, -0.0893, +0.0720, +0.0627, +0.0008, +0.2151, +0.2527, -0.0721, +0.2133, +0.1272, +0.2210, -0.2012, +0.1135, +0.8663, +0.3867, +0.5365, +0.1263, +0.2323, -0.0163, -0.1178, +0.4859, +0.1277, -0.5319, +0.0241, +0.1482, -0.2447, +0.1440, -0.1298, -0.0774, +0.1329, +0.2049, -0.1232, +0.0122, +0.2751, -0.1623, -0.0355, +0.2232, +0.1650, +0.0788, +0.3453, +0.3085, -0.0324, +0.2696, -0.1154, -0.0087, +0.0087, -0.0158, -0.0721, -0.2802, +0.6397, +0.1834, +0.1206, +0.1670, -0.0771, +0.3098, +0.0817, -0.2540, -0.1481, +0.3749, -0.1109, -0.1901, +0.0763, +0.0562, -0.0834, +0.4640, +0.2060, -0.0520, +0.2273, -0.5758, +0.0613, -0.3936, +0.4325, +0.3668, +0.6940, +0.0661, +0.1322, +0.0727, -0.1327, -0.2523, -0.0368, +0.3519, +0.0042, -0.1765, -0.0588, +0.0173, +0.3305, -0.0382, -0.0072, +0.1676, +0.4529, +0.1004, +0.0037, +0.4982, -0.1013, -0.1774, -0.1828, +0.0592, -0.0361, +0.1937, -0.3102, -0.0329, +0.7132, +0.1081, +0.1713, -0.1541, -0.1572, +0.0455, +0.0631, -0.3422, -0.0472, +0.0655, -0.1291, +0.1069, +0.3075, +0.2226, -0.1803, -0.1247, -0.0509, -0.1519, -0.1252, +0.2028, +0.1468, -0.0986, +0.0458, +0.0799, +0.2962, -0.2362, +0.1770, -0.0585, +0.1376, -0.1471, -0.0011, +0.3521, +0.1243, -0.4805, -0.2684, +0.2582, +0.5082, +0.1511, +0.5160, -0.3806, +0.1276, +0.1041, -0.3635, +0.0573, +0.0853, -0.1572, +0.2395, -0.0647, +0.1263, +0.2338, +0.2568, +0.0774, +0.3536, +0.4368, +0.1272, +0.1465, +0.0028, +0.2233, -0.0329, -0.0857, +0.0750, +0.1408, +0.3485, -0.0137, +0.1533, -0.1600, -0.1337, -0.0974, -0.0299, -0.0323, +0.0912],\n[ -0.3504, +0.3179, -1.0918, -0.3050, +0.9112, +0.5211, +0.6364, -0.0960, -0.5305, +2.3346, -0.7544, +0.0837, -1.0392, +0.7178, -0.1126, +0.7165, +0.7288, +0.6258, +0.0957, -1.2260, -0.3948, +0.3753, -0.4606, -1.5339, -0.2479, +0.2242, +0.6865, +0.0099, -0.0191, +0.2671, -1.2197, +0.0032, +0.1167, +0.3508, +0.8007, -0.7487, -0.4238, -0.6390, +0.5996, +1.5764, +1.4712, -0.0523, +0.3140, -0.9890, +0.3494, +0.4810, +0.6032, +0.9013, +0.8956, +0.0639, -0.6232, -0.0639, +1.3263, -0.6084, -0.2796, -0.4429, -0.1116, +1.2581, -0.3688, +0.4144, +0.1753, -0.8812, +0.1593, +1.7526, +0.2204, +0.7330, +0.7426, -0.1957, -0.5876, +0.3344, -0.8807, -0.3715, -0.2438, +0.2862, +0.1338, -0.8705, +1.3052, -0.7570, -0.9733, +0.0851, -0.1355, +0.7726, +0.9775, +1.8582, +0.4909, +0.2100, +0.6480, -0.9390, -0.0109, +0.8414, -0.0162, +0.1212, -0.3045, +1.2366, +0.6475, +0.0346, -0.5155, -0.4277, -0.3017, -1.3542, +0.5016, +0.3839, +1.0181, -0.2053, +1.2256, +0.6403, +0.2995, +0.1694, +1.1001, -1.2428, -0.6839, -0.0096, +0.2364, +0.6608, -0.0468, +1.1051, -0.4258, +0.2412, -0.0888, +0.4045, -0.8473, -0.3771, +0.0673, -0.7719, +2.2755, +0.1519, +0.2434, -0.3192, +1.2985, +0.1204, -1.2696, +0.5239, +0.3562, +1.0429, +0.1553, -1.0302, -0.3452, +0.4931, -0.8529, -0.2207, -1.4133, -0.7739, -0.9373, -0.0316, -0.0720, -0.3610, -0.9920, -0.2442, +0.1684, +0.8980, -0.3976, +0.5449, -1.5636, -0.8419, -0.1692, +1.3491, +0.0253, +1.9946, +0.3635, +0.5639, +0.1305, -1.3483, +0.7867, -0.0294, +1.3528, -0.4485, +0.0917, -0.0045, -0.8327, +0.2827, +0.4188, -0.2105, +0.3741, +0.7789, +0.7423, +0.3054, +0.5409, -0.3485, +0.2274, +0.8441, +0.2397, +0.5060, +0.4379, +0.3308, -0.3921, +0.2715, -1.0825, -0.5958, +1.3096, +0.7103, +0.1227, +0.1876, +0.7009, +0.2243, +0.6355, -0.1630, +0.2463, +0.1666, -0.3954, -0.5123, -0.7415, -0.0754, +0.2274, -0.2862, +0.2964, +0.0954, +1.1562, -0.3559, +0.4572, +0.9555, -0.5453, +0.1497, +0.0105, -1.4956, +1.3926, +0.0228, -0.3674, -0.1625, -0.9157, +0.7016, -0.6211, +0.5463, +0.3265, +0.9720, -0.3992, -0.4322, -0.4880, -1.3482, +0.7272, -0.0962, +0.1547, +0.6783, +0.1496, -0.0992, +0.4034, +0.1924, -0.1571, -0.0865, +2.5906, +1.2446, -0.5539, -0.0809, +0.5130, -0.0844, +0.9177, -1.3037, -0.0150, -0.6602, -0.0371, +0.3352, -0.4679, -0.8509, -0.6062, +1.6027, -0.5644, -1.3218],\n[ +0.2795, +0.0642, -0.4321, +0.1457, +0.0121, -0.6645, +0.5804, +0.1799, +0.0117, +0.5672, -0.3526, +1.0581, +0.0324, +0.3915, +0.1572, +0.0034, +0.2864, +0.1973, -0.4619, +0.3294, -0.0396, -0.3446, -0.0199, -0.4083, -0.5971, +0.0460, +0.5415, +0.6182, +0.2403, -0.1356, -0.1049, +0.4659, -0.2931, -0.2250, -0.6128, +0.4385, -0.0995, +0.3239, +0.6134, +0.3091, +0.2143, +0.1731, +0.0841, +0.1188, -0.3338, +0.3281, +0.2010, +0.5196, -0.1062, +0.2815, +0.2324, -0.1379, +0.0957, -0.2191, +0.1873, +0.2349, -0.1210, -0.0056, +0.0639, +0.0467, +0.4666, -0.0640, +0.1107, -0.0418, -0.2104, +0.1299, +0.4768, -0.1160, -0.0516, -0.1398, +0.3253, -0.0154, -0.2684, -0.1671, +0.4381, +0.1137, +0.2647, -0.2082, -0.5011, +0.0124, +0.2318, -0.0503, +0.0276, +0.8962, +0.2799, +0.3159, -0.6777, -0.0680, +0.1874, +0.1644, +0.1518, +0.1808, +0.0803, -0.2494, +0.5158, -0.2165, +0.3377, +0.8342, +0.0527, -0.6154, +0.1771, -0.1781, -0.0620, -0.7147, -0.3531, +0.2948, +0.0663, +0.3923, +0.6540, +0.0174, +0.6028, -0.1087, +0.4604, -0.0161, +0.2117, +0.3383, -0.2849, +0.9566, -0.4082, +0.2355, +0.0818, -0.0087, -0.1493, +0.1262, +0.5054, -0.1072, -0.1005, +0.2100, +0.2903, +0.3424, -0.1168, +0.1297, -0.4548, -0.4380, +0.6081, -0.8408, +0.4011, -0.0471, +0.3515, -0.2566, +0.8423, +0.1594, +0.1761, -0.0926, -0.3069, +0.2977, +0.0840, +0.2581, -0.1953, +0.2386, +0.2111, +0.4178, -0.1321, -0.0219, +0.5732, -0.0419, -0.2215, -0.5132, +0.3878, +0.0862, -0.3056, +0.3618, +0.7162, -0.0857, +0.1711, -0.1898, +0.2286, +0.6806, +0.3809, +0.2188, +0.2804, -0.2275, +0.6766, +0.1952, +0.0073, -0.3949, -0.1420, +0.3417, -0.3001, +0.1640, -0.1118, -0.5296, -0.0676, -0.1672, +0.2324, -0.0308, +0.0786, +0.9096, +0.7681, -0.2398, +0.0431, -0.1578, +0.3719, -0.3167, +0.8528, -0.0046, -0.0758, +0.2415, -0.2559, -0.1975, +0.2967, -0.1169, +0.0701, +0.1372, -0.0018, +0.2277, -0.2830, -0.1139, +0.4305, -0.3147, -0.3947, -0.2646, -0.1233, +0.6040, +0.2712, +0.5285, -0.3873, +0.1587, -0.0579, -0.4679, -0.2148, -0.1199, -0.3221, +0.0139, +0.2610, -0.2710, -0.0914, -0.3749, +0.8469, -0.0296, +0.0442, +0.4900, +0.0576, +0.9743, +0.1762, -0.1878, -0.2236, +0.2311, -0.1199, -1.0481, -0.3038, -0.3667, +0.0169, -0.1208, +0.0286, +0.1759, +0.1569, +0.1607, +0.8348, +0.0245, +0.1323, -0.3582, +0.6256, -0.2299, +0.2870, +0.0372],\n[ +0.4222, -0.0573, +0.5081, -0.3760, +0.7451, -0.3354, +0.9356, +0.3947, +0.3637, -0.1050, -0.9781, +0.0794, -0.0114, -0.2344, +0.9890, +0.2250, +0.6542, +1.0132, +0.4594, +0.1899, -0.5553, -1.0695, -0.0464, -1.0895, -1.4412, -0.4980, +0.1758, +0.2815, +0.6579, -0.0964, -0.0780, +0.3784, -0.4967, -0.2489, -0.4366, +0.4137, +0.5941, +0.9789, +0.0680, -0.1903, +0.4329, -1.2772, -0.2180, +0.1931, -0.5630, -0.7352, +1.4870, -0.7178, -0.2086, +0.0265, -0.1702, +0.1937, -0.0929, -0.7027, +0.1547, +0.8569, -0.3356, +0.3963, +0.7436, +0.5573, -0.0482, +0.1243, +0.5669, +1.2589, -0.1877, -0.2490, +0.0755, +0.6648, -0.4557, -0.9240, +0.0784, +0.3344, -0.4727, -0.2906, +0.2908, -0.0087, -0.8507, +0.9188, -1.0174, -0.3030, -0.1066, -0.1418, +0.6422, -0.4228, +0.8031, -0.0486, +0.0789, +0.0159, +0.4195, +0.7959, -0.4948, +1.0052, -0.4370, -0.3082, +0.4453, -0.8982, +0.1833, -0.0529, -0.2152, -0.6723, -0.5515, -0.5945, +0.1152, +0.2134, -0.1846, -0.0981, +0.0175, +0.3448, -0.5466, -0.3407, +0.8680, -0.2085, +0.9641, -0.1393, -0.2101, -0.2797, +0.4820, -0.4386, -0.8097, +0.0695, +0.3653, +0.4081, -0.0723, +0.6429, -1.1749, +0.0525, -0.3654, -0.7180, +0.1074, -0.2005, +0.3934, -0.5641, -0.3977, -0.7490, -0.7064, -0.2359, +0.3631, -0.7209, +0.4213, +0.5278, +0.1775, -0.0226, +0.2676, -0.8443, +0.6325, -0.6958, -0.0446, +0.0145, +0.6466, +0.4083, +0.2505, -0.3254, -1.3595, -0.2900, +0.3684, +0.2849, -0.0168, -0.9716, +0.0890, +0.1455, +0.1932, -0.2316, -0.2257, +0.1177, +0.1743, -0.0457, -0.7849, +0.7017, -0.4356, +0.1590, +0.2895, -0.5398, +0.2746, -0.1955, -0.9924, +0.4579, +0.0071, -0.0746, +0.2151, -0.1827, +0.5044, -0.5401, -0.4354, +0.1411, -0.2633, +0.7402, +0.0019, -0.0907, +0.3447, -0.6141, -0.2552, +0.5652, +0.2047, -0.5309, +0.2966, +0.0315, +0.2328, -0.1916, -0.2069, -0.1830, +0.6357, +0.4643, -0.5741, +0.4861, +0.7827, -0.0479, -0.3617, -0.1293, -0.0768, -0.3499, +0.2789, -0.1915, +0.0334, +0.0698, -0.1463, +0.4034, -0.1668, +0.9289, -0.9585, -0.4268, -0.3682, +0.1801, -0.8269, -0.3515, +0.3098, +0.1076, +0.6027, -0.1172, +0.0588, +0.7357, -0.0441, -0.1213, +0.2665, -0.4022, -0.2895, -0.3017, -0.3352, -0.3031, -0.1225, -0.1627, -0.6134, -0.3426, +0.4558, -0.3361, +0.5679, +0.5502, -0.0751, -0.7189, +0.6709, +0.5030, +0.3367, -0.7531, +0.9245, -0.8705, +0.2312, +0.0640],\n[ -0.0035, -0.0034, -0.2528, -0.3608, +0.3682, -0.2742, -0.0113, -0.3332, +0.0137, -0.0303, -0.0216, +0.0627, +0.1116, +0.0642, -0.3810, +0.0072, -0.0297, -0.1337, +0.2035, -0.1883, +0.0202, -0.0721, +0.1509, -0.2001, +0.5420, -0.1128, -0.1005, +0.1079, +0.1587, +0.0785, -0.0149, -0.0047, +0.3506, +0.3381, -0.1188, +0.1962, -0.0130, -0.0012, -0.2426, +0.1351, +0.1945, +0.1089, +0.2607, +0.0582, -0.2137, +0.6271, +0.1350, +0.0122, -0.1709, -0.2135, -0.1653, +0.0632, +0.4087, -0.0052, +0.6810, +0.0148, -0.0323, +0.3402, +0.0679, +0.1630, -0.0387, -0.0974, -0.0687, -0.0846, +0.1532, +0.1129, -0.0719, +0.0268, -0.1528, +0.0340, -0.0595, +0.3099, +0.0794, -0.0118, +0.0639, -0.3403, +0.0216, +0.0587, +0.1669, +0.1529, -0.0114, -0.5345, -0.0442, -0.2288, +0.4265, -0.3686, +0.1109, +0.0872, +0.1012, +0.1267, -0.0799, +0.0821, +0.1215, -0.1414, +0.1897, +0.3936, +0.1082, +0.0621, +0.4993, +0.1550, +0.0853, -0.1830, -0.0109, +0.2747, +0.0066, -0.0596, -0.0209, -0.1222, +0.6425, +0.0010, -0.1389, +0.2046, -0.0143, -0.1457, +0.2317, -0.3896, +0.1399, +0.0099, -0.0574, +0.3197, -0.2081, +0.1138, +0.0407, -0.0042, +0.0752, +0.0459, -0.2480, +0.0822, -0.3238, +0.0392, +0.3801, +0.1197, +0.2401, -0.4082, +0.0690, +0.1263, +0.1912, +0.2382, +0.0859, -0.4645, +0.1211, +0.0598, -0.1411, +0.2445, -0.0240, +0.1098, +0.1856, +0.2397, -0.0041, +0.1188, -0.0047, +0.0424, +0.5290, +0.3475, +0.1254, +0.0685, +0.0748, -0.0148, +0.1090, -0.0598, -0.0045, -0.2743, +0.1616, +0.1719, -0.1873, -0.0924, -0.0310, -0.1332, -0.1907, +0.1881, -0.4096, +0.1428, +0.2975, -0.0189, +0.2020, +0.2217, -0.0367, +0.3606, +0.4407, -0.2666, +0.0850, -0.0286, -0.1606, +0.1058, +0.0790, +0.1514, -0.0547, +0.1220, -0.2964, +0.1235, +0.4847, +0.0590, +0.0304, +0.2212, +0.2509, -0.1166, +0.0666, -0.1935, +0.0430, -0.1518, +0.3587, +0.0948, +0.2795, +0.4758, +0.0747, -0.1021, +0.3524, +0.0737, +0.1654, +0.6264, -0.0327, -0.1234, +0.0612, -0.0495, -0.2775, -0.2223, -0.0795, -0.0540, +0.4101, +0.0887, +0.6357, -0.0557, +1.0091, +0.3728, -0.2812, +0.0505, -0.2041, -0.0258, -0.0876, +0.0441, +0.0864, -0.0484, +0.3001, +0.3026, +0.0090, -0.2822, -0.1953, -0.2267, -0.2283, -0.0550, +0.4666, -0.2008, -0.1510, -0.0588, +0.0770, +0.3002, +0.0200, +0.1235, -0.0181, +0.1489, +0.1830, -0.0221, +0.2327, -0.2378, -0.1699, +0.4160],\n[ -0.1059, +0.1515, +0.0140, +0.1541, +0.3425, -0.3510, +0.1152, -0.0096, +0.0941, +0.2030, -0.5814, +0.1163, -0.1798, +0.2130, -0.0308, -0.2670, -0.0307, +1.1302, -0.5876, -0.1339, +0.0085, -0.4369, -0.0089, +0.2912, -0.0841, -0.0179, +0.0648, -0.2874, -0.3967, +0.1606, +0.4970, +0.0788, +0.1305, +0.6643, +0.8430, +0.0646, +0.0448, -0.3034, -0.1129, -0.3283, +0.0435, +0.5129, -0.1075, -0.1883, +0.4669, -0.3403, -0.1316, +0.3523, -0.0285, -0.1050, -0.0914, -0.0981, +1.3083, -0.3445, -0.5618, +0.0772, +0.1022, -0.0018, -0.1475, -0.3236, +0.1725, -0.3069, +0.1140, -0.1664, +0.1786, -0.4218, +0.2003, +0.1524, +0.2598, +0.0738, +0.0487, +0.4012, -0.3416, +0.1218, -0.1210, +0.9181, +0.6536, +0.4306, +0.0462, +0.6724, +0.0628, -0.5143, +0.2787, +0.1601, +0.2327, -0.0700, -0.0750, -0.2287, +0.2918, -0.3260, +0.0710, +0.0433, -0.6924, +1.1640, +0.4052, +0.4894, -0.0450, +0.6232, +0.1674, -0.1411, -0.1690, -0.3918, -0.5933, +0.4397, +0.5854, +0.6074, -0.2093, -0.2082, -0.2854, -0.2730, +0.4739, -0.1440, -0.0470, +0.3595, +0.0505, +0.0261, +0.2556, +0.1997, +0.1467, -0.6146, -0.1772, +0.3620, +0.2029, -0.1267, -0.5105, +0.2807, -0.7753, +0.0937, -0.0990, -0.0566, +0.1463, +0.4945, -0.4445, +0.2097, -0.4238, +0.3291, +0.4771, -0.0934, +0.3328, +0.0125, +0.2216, +0.1743, -0.1884, -0.1027, +0.1037, +0.9080, +0.7220, +0.3006, -0.4461, +0.2261, +0.3679, -0.0083, +0.1107, -0.2835, -0.2122, +0.9465, +0.2494, +0.2009, +0.3854, +0.3620, -0.0232, +0.7130, +0.5914, +0.2367, -0.1907, +0.0126, -0.4836, -0.7799, +0.1886, +0.7844, +0.0632, -0.1651, +0.5767, -0.3709, +0.2219, -0.4823, -0.4274, +0.1896, -0.2534, +0.2759, -0.0053, +0.3047, +0.0830, -0.0579, -0.5030, -0.3829, +0.2817, -0.4349, -0.1430, +0.1322, +0.3572, +0.0090, -0.5643, +0.1411, -0.4972, -0.3413, -0.1593, +0.0419, -0.3883, +0.0232, -0.1065, -0.0463, +0.3074, +0.6826, +0.1211, +0.1497, -0.0864, +0.0145, +0.6440, -0.1601, +0.2847, +0.4424, +0.1406, +0.3737, -0.0508, -0.0550, +0.2662, -0.1365, -0.2198, -0.1154, +0.1846, +0.3939, -0.1867, +0.2783, +0.0214, -0.2052, +0.2852, +0.4517, -0.0070, +0.9497, -0.0211, -0.5972, +0.0932, -0.0961, +0.0633, +0.5786, -0.2438, -0.1156, +0.1324, -0.0539, +0.1527, +0.5762, -0.0996, +0.2633, -0.0446, +0.2198, -0.1069, -0.1196, -0.3112, +0.2806, -0.0446, -0.3673, -0.3333, -0.5466, -0.6300, -0.0539],\n[ +0.0886, -0.1736, +0.2303, +0.7947, +0.0393, -0.2798, -0.5487, -0.2241, -0.5310, +0.5406, -0.0532, -0.0064, +0.2742, -0.3217, -0.2300, +0.1303, -0.2625, -0.4393, +0.0598, -0.2584, +0.0027, +0.2343, +0.3005, -0.3532, -0.7355, -0.1263, -0.0904, +0.0143, +0.0917, +0.3467, -0.1259, +0.0210, -0.0186, -0.0360, -0.4055, +0.0844, +0.3825, +0.1753, +0.3408, +0.2307, +0.4749, +0.1994, +0.1054, +0.3573, -0.2327, +0.0036, -0.2009, -0.1823, +0.0889, -0.0884, -0.2532, +0.3909, -0.2113, -0.2426, +0.1061, +0.2335, +0.3052, +0.1240, -0.2379, -0.0317, +0.0107, -0.3895, -0.0050, -0.0169, +0.1984, -0.1232, -0.2768, +0.4420, +0.0246, +0.3070, -0.1335, +0.2652, -1.1367, +0.2838, +0.3087, -0.5247, +0.1373, -0.5731, +0.0517, -0.8331, +0.0370, +0.0661, -0.1572, +0.1051, -0.4990, -0.9321, +0.2448, +0.2690, +0.2054, -0.4378, +0.0132, -0.3105, -0.1545, -0.3607, +0.4425, -0.1347, +0.2146, +0.2794, -0.1716, -0.0498, +0.0685, +0.4673, -0.5200, +0.4520, -0.0439, +0.0930, -0.0961, -0.2375, +0.2483, -0.1084, +0.0855, +0.3815, +0.1763, -0.2518, -0.0570, -0.2764, -0.9503, +0.4452, -0.2262, +0.2897, +0.2107, -0.2425, -0.2231, +0.1219, -0.2070, -0.3488, -0.2947, +0.0958, +0.0930, +0.1411, +0.3361, -0.2040, +0.2504, -0.7070, +0.2827, -0.1822, -0.1630, -0.0348, +0.0428, +0.0818, -0.1700, +0.0074, +0.1539, +0.0177, +0.3407, -0.0548, +0.5441, -0.1306, -0.4407, +0.0646, -0.3218, -0.0806, +0.1349, -0.2540, -0.0872, +0.4692, -0.4425, -0.5095, -1.0056, +0.4027, -0.0200, -0.1376, +0.0475, -0.7649, -0.3253, -0.6835, -0.0361, +0.1053, -0.0829, +0.3581, +0.3582, +0.2497, +0.0393, +0.5963, +0.3694, -0.2755, -0.4194, +0.3549, +0.2054, +0.2459, -0.3597, -0.1185, +0.0509, +0.6428, -0.3443, +0.2562, +0.0889, +0.3400, -0.6770, -0.0713, -0.2477, +0.2696, -0.1843, +0.4073, -0.2708, -0.0514, +0.2385, +0.0235, -0.1136, +0.4380, -0.0392, +0.0524, -0.4062, +0.1885, -0.7391, +0.2537, +0.0709, -0.3841, -0.6341, +0.1817, -0.4291, -0.5313, -0.9104, +0.3972, +0.0307, +0.2267, -0.0018, +0.2399, -0.1271, -0.1045, +0.1824, -0.9698, +0.0084, -0.2353, -0.0986, -0.3136, -0.0781, -0.0959, -0.3397, -0.8343, -0.6744, +0.2505, +0.0941, +0.0670, +0.2594, -0.5116, -0.1491, +0.4280, +0.3119, +0.6404, +0.1156, +0.2095, -0.0151, -0.0291, +0.0717, -0.2272, -0.3254, +0.0794, +0.0487, +0.1850, -0.2387, -0.0572, -0.2224, -0.4618, -0.5881, -0.0762],\n[ +0.1613, -0.0121, -0.6823, +0.4505, +0.0159, -0.6711, -0.4259, -0.0010, -0.1023, -0.0402, +0.1031, -0.1431, -0.0213, +0.4424, -0.1080, -0.2636, -0.0149, -0.1129, -0.4833, +0.1006, -0.8493, -0.2696, +0.3725, +0.1174, +0.3757, +0.0964, -0.1318, +0.1640, +0.0253, +0.2252, -0.1748, -0.0377, -0.0120, +0.1607, +0.0376, +0.0201, +0.0198, -0.3454, -0.2380, +0.4885, -0.2846, -0.3210, -0.3668, +0.1292, +0.0733, -0.1518, -0.1435, +0.7051, +0.0440, -0.3134, +0.1986, -0.0950, -0.2036, -0.5937, -0.5377, +0.0555, +0.1399, -0.1929, -0.1455, -0.0545, -0.5609, +0.4115, -0.3768, -0.6463, +0.4130, -0.0706, +0.1245, +0.2189, -0.1688, -0.4369, -0.0189, -0.1132, +0.0900, -0.0476, -0.3192, -0.0721, +0.3550, +0.0791, -0.1973, +0.2124, -0.3421, -0.2505, -0.4525, -0.3532, -0.4306, +0.0726, -0.1936, +0.3790, +0.0773, +0.1141, +0.3051, -0.1605, -0.1697, -0.3239, -0.0387, +0.0490, +0.4097, -0.1983, -0.3103, -0.1348, +0.0983, -0.1906, -1.0984, -0.3224, +0.1721, +0.0596, -0.0802, +0.1193, +0.5012, +0.4600, +0.1982, -1.2366, -0.3397, +0.0045, +0.0963, +0.0420, +0.0534, -0.6935, -0.1704, +0.2974, -0.5040, -0.4112, +0.1151, -0.3827, -0.6324, +0.1939, -0.0368, +0.1991, -0.2959, +0.1311, +0.0653, -0.1839, -0.3214, +0.2320, +0.2095, +0.0678, +0.0375, -0.1832, +0.4512, +0.2901, -0.4755, +0.2493, +0.2288, +0.0834, -0.3465, +0.4282, +0.0664, -0.2354, +0.3027, +0.2787, +0.2102, +0.1778, -0.2229, +0.2759, -0.2128, +0.5151, -0.0355, -0.2695, +0.0498, -0.6670, -0.2849, -0.1452, +0.4245, +0.4908, +0.4679, +0.0071, +0.3998, +0.0380, -0.1295, -0.2110, -0.0502, +0.0330, +0.5156, +0.2586, -0.1972, +0.1425, -0.5514, -0.1277, +0.3379, -0.1527, -0.0876, -0.1846, +0.3335, -0.2494, -0.1005, +0.0055, +0.1809, +0.2810, -0.2826, +0.1018, -0.1270, +0.2189, +0.2438, +0.0231, -0.0637, -0.4122, -0.1142, +0.0933, -0.0771, +0.0415, -0.0525, +0.1933, +0.5861, +0.1639, -0.0109, -0.1786, -0.3391, -0.0341, +0.4664, +0.3602, -0.2455, -0.1581, +0.0035, -0.6658, -0.4299, +0.0850, -1.0917, -0.1338, +0.4143, +0.0696, -0.3031, +0.3610, +0.2661, +0.1521, +0.6565, +0.1507, -0.1110, +0.1716, +0.2436, -0.5497, -0.6088, -0.4964, +0.0426, -0.4534, -0.0126, -0.2565, -0.2301, +0.0974, +0.1669, +0.5493, +0.0544, +0.2104, +0.0747, +0.2049, +0.2100, +0.1637, -0.5843, +0.3274, -0.4244, +0.4082, +0.2544, +0.0823, +0.2458, +0.1169, +0.1472, +0.0367]\n])\n\nweights_dense1_b = np.array([ +0.1494, +0.0039, -0.0527, -0.0962, +0.0315, -0.1903, -0.0055, -0.0615, -0.0553, -0.1921, -0.1072, -0.0451, -0.1178, -0.0108, -0.0561, -0.1273, -0.1139, -0.0796, -0.0175, -0.0684, -0.0933, -0.1067, -0.1255, +0.0234, -0.0703, -0.2935, -0.1144, -0.1077, -0.1229, -0.1456, -0.1558, +0.0667, +0.0781, -0.0370, -0.1065, -0.0846, -0.0311, +0.0140, -0.1026, -0.1604, -0.1456, -0.2161, -0.0539, -0.1573, -0.0612, -0.0970, -0.2703, -0.1411, -0.2202, +0.1398, -0.0699, -0.1028, -0.0294, -0.1635, +0.1305, -0.1769, +0.1233, -0.3288, +0.0167, +0.0028, -0.0550, -0.1999, +0.0864, +0.0176, -0.0370, +0.0562, -0.0984, +0.0783, +0.0478, -0.1250, -0.2548, -0.1360, -0.0914, -0.0499, -0.0393, +0.0658, -0.1763, -0.1398, -0.0844, -0.1208, -0.0743, -0.1800, +0.0300, -0.0678, -0.0124, -0.0126, -0.0269, -0.1089, -0.0306, -0.0458, -0.0984, -0.0951, -0.0395, +0.0311, -0.1592, -0.1021, -0.3524, -0.1006, -0.1488, -0.2021, -0.0736, -0.1389, -0.0688, +0.0139, +0.0820, -0.1823, -0.0241, -0.0035, -0.0373, +0.0326, -0.0615, +0.0254, -0.0134, +0.0239, -0.1209, -0.0289, -0.1025, -0.1184, -0.1214, -0.0290, -0.0975, +0.0992, -0.0952, -0.1083, -0.1422, -0.0548, -0.0305, -0.2508, -0.0206, -0.2072, -0.0452, +0.0112, -0.0011, +0.0417, +0.0035, +0.0200, -0.1942, -0.0485, -0.1137, +0.0025, +0.0786, +0.0299, -0.1345, +0.1579, -0.0281, -0.0762, -0.0884, +0.0861, -0.0782, -0.1839, -0.0320, -0.2262, +0.0617, -0.1688, -0.2112, -0.1037, -0.1056, -0.0510, -0.0038, +0.0895, -0.2475, -0.0174, -0.1610, +0.0683, -0.2035, -0.0045, -0.1483, +0.0695, -0.1126, -0.1221, +0.0023, -0.0862, -0.1456, -0.1136, -0.0341, +0.0209, -0.0928, -0.0567, +0.0725, -0.0127, -0.0353, -0.1072, -0.0613, -0.1992, +0.0168, -0.1877, -0.1331, -0.1073, -0.1444, +0.0069, +0.0563, -0.1909, -0.1581, -0.0970, -0.0138, -0.0487, -0.1580, -0.1599, -0.0073, -0.2733, -0.1690, -0.2064, +0.0071, -0.2040, -0.1194, -0.0529, -0.1376, -0.1651, -0.0822, -0.1775, -0.1370, -0.2621, -0.1093, -0.0536, -0.0812, +0.0422, +0.0078, -0.0437, -0.2098, -0.1435, +0.0691, -0.1066, -0.1223, +0.0092, -0.0332, -0.0460, +0.0013, -0.0559, -0.1134, -0.1300, -0.0909, +0.0010, -0.2052, -0.1058, -0.0746, +0.0049, +0.0770, -0.0889, +0.0130, -0.1252, -0.2867, -0.0034, +0.0268, -0.0217, -0.0712, -0.0839, +0.0532, -0.1541, +0.0243, -0.2173, +0.0914, -0.1751, -0.1907, -0.0940, -0.0697, -0.1361])\n\nweights_dense2_w = np.array([\n[ -0.0446, -0.0941, -0.3955, -0.1329, +0.3261, -0.2340, +0.1304, +0.1444, -0.0064, +0.4035, +0.5651, +0.2906, -0.2569, -0.1496, -0.6781, +0.3623, +0.3923, -0.5059, +0.0361, -0.2793, -0.3257, -0.5450, +0.1372, -0.3239, +0.1807, +0.4815, -0.5801, +0.0871, +0.0759, -0.4970, +0.1630, -0.2405, +0.1363, -0.2391, -0.3084, -0.1048, -0.4797, +0.1461, -0.4725, -0.4272, -0.1229, -0.3213, -0.3729, -0.2070, -0.2260, +0.0191, +0.4376, +0.2038, +0.4303, -0.3538, -0.3511, -0.0173, -0.0621, +0.1285, -0.0412, -0.4655, -0.5779, -0.1277, -0.0267, -0.3498, +0.1073, +0.0636, -0.7525, +0.1612, +0.3821, -0.1038, -0.8780, +0.3191, +0.2394, -0.0068, +0.0812, -0.2313, -0.2938, -0.3093, -0.3838, -0.0023, -0.5775, +0.4613, +0.3519, -0.1342, -0.4099, +0.0764, -0.2711, -0.0370, +0.0079, +0.0552, +0.4197, -0.9073, -0.2105, +0.2615, -1.1096, +0.0082, +0.4138, +0.2725, +0.0815, +0.3513, -0.7101, -0.1694, -0.0091, +0.2357, -0.2003, +0.3896, -0.0686, -0.6690, -0.0813, +0.2245, +0.2243, -0.3500, +0.3599, +0.0891, -0.5524, -0.1980, -0.3951, +0.0085, +0.0538, +0.6534, -0.8200, +0.1080, -0.3786, +0.1675, -0.0115, +0.1537, +0.4711, -0.0807, -0.1799, +0.0223, +0.2167, +0.2362],\n[ -0.2444, -0.6241, -0.1147, -0.2118, +0.0857, -0.3879, +0.3546, -0.0733, -1.0193, +0.1459, +0.0498, -0.2553, -0.2879, -0.5408, -0.3574, +0.4237, -0.7382, -0.2695, +0.0282, -0.2548, -0.2155, -0.2600, -0.1573, -0.3413, -0.6334, +0.0929, -0.0176, -0.3606, +0.2845, -0.0617, -0.3122, +0.1209, -0.2462, +0.3425, +0.2171, +0.5273, -0.1572, -0.1532, +0.5552, -0.0139, +0.1994, +0.0812, -1.0411, +0.3236, -0.2614, -0.3478, -0.1349, +0.1892, +0.3215, -0.2034, -0.0209, +0.2062, -0.3500, +0.1164, +0.1389, +0.2095, +0.2791, -0.1504, +0.1044, +0.4266, -0.7286, +0.2604, -0.2693, -0.1244, +0.1317, +0.3252, -0.7114, +0.5549, -0.1956, -0.1914, -0.4135, -0.3006, -0.4114, +0.0110, -0.3341, -0.3824, -0.2613, -0.7747, +0.5477, -0.0026, -0.2780, +0.3150, -0.3447, -0.0708, -0.1666, +0.1757, +0.2413, -0.0522, -0.1630, -0.1557, +0.2338, -0.4316, +0.0440, -0.3387, +0.2614, -0.2280, +0.0125, -0.0967, -0.1448, +0.3245, +0.1507, -0.2703, -0.2256, -0.3510, -0.5730, -0.6961, +0.3662, -0.3726, +0.3866, +0.1329, -0.7782, +0.0877, -0.5175, -0.0080, -0.4061, +0.0042, -0.0629, -0.0861, +0.1035, +0.3153, -0.1609, -0.0586, +0.1450, +0.0041, +0.1466, -0.6496, +0.0201, +0.0129],\n[ +0.1386, -0.0945, +0.2795, -0.0256, -0.0028, -0.0133, -0.4415, -0.1106, +0.1377, +0.3588, -0.4300, +0.3529, +0.2544, +0.1488, +0.4599, +0.3566, +0.1138, -0.4420, -0.1201, +0.1293, -0.5788, -0.0978, -0.1088, +0.1153, -0.4146, +0.1100, +0.0620, +0.1854, -0.7047, -0.0504, +0.2937, +0.0530, +0.0955, -0.0370, +0.0178, +0.1662, -0.2216, -0.0763, -0.4237, -0.5979, +0.0670, -0.0144, +0.0355, -0.5579, +0.2910, +0.2649, +0.0297, -0.4149, -0.2434, -0.2470, -0.2179, +0.1319, -0.1494, +0.3073, -0.3072, -0.3915, +0.3447, +0.2100, -0.4788, -0.0824, +0.2316, -0.0760, -0.2466, +0.0284, -0.0477, -0.0012, -0.7704, +0.3028, -0.8417, +0.0159, +0.6322, +0.0871, +0.1442, -0.0744, -0.0640, +0.3171, +0.0569, -0.0746, -0.0438, -0.2017, -0.2061, +0.0221, -1.0163, -0.3438, -0.0295, +0.1516, -0.2567, -0.4551, +0.1802, +0.2473, -0.3067, -0.0302, -0.2944, +0.0247, +0.1530, -0.4357, -0.2943, -0.1051, -0.3184, -0.0940, +0.1110, -0.2013, +0.1567, +0.4995, -0.6288, +0.5756, +0.0462, -0.0036, -0.8926, -0.1855, +0.0631, -0.7486, +0.0764, +0.2724, -0.3770, -0.4466, +0.1047, +0.2308, +0.1856, -0.3025, -0.2538, -0.0370, -0.6129, -0.6437, +0.0648, +0.0632, +0.1569, -0.5954],\n[ -0.2110, -0.4682, +0.1521, -0.2096, +0.0752, -0.2327, -0.3026, +0.0227, +0.1361, +0.1600, -0.0204, -0.5653, +0.3198, -0.2031, -0.5835, +0.0099, -0.0005, -0.0378, +0.1420, +0.2448, -0.1970, -0.6869, +0.1893, -0.3542, +0.3116, +0.1331, +0.2216, -0.0751, +0.0818, -0.0981, -0.4190, +0.5141, -0.0763, +0.0360, -0.1761, +0.0261, -0.5464, -0.7008, -0.2040, -0.1353, +0.2963, -0.1157, +0.0362, -0.5863, -0.8737, -0.3414, +0.0769, +0.1460, -0.1560, -0.0128, +0.2671, +0.3803, +0.2876, -0.1158, +0.0046, -0.1798, -0.0418, -0.2737, -0.4797, -0.4231, -0.1860, -0.5159, -0.8221, +0.3182, -0.7719, +0.1197, +0.1327, -0.3147, -0.1979, -0.3086, +0.1928, -0.0266, -0.2050, -0.0781, +0.4301, -0.0320, +0.5265, -0.2464, +0.3842, -0.1679, +0.0903, -0.0991, +0.5649, -0.2095, -0.3192, +0.5082, +0.2163, -0.1517, +0.3231, -0.0450, -0.6572, -0.2979, +0.5014, -0.6241, -0.1478, -0.3796, +0.0776, +0.0567, -0.2926, +0.2549, -0.4551, -0.1126, -0.3430, +0.0577, -0.0678, -0.6104, +0.1488, +0.0124, -0.0232, +0.1021, +0.4954, +0.0608, -0.2865, -0.1660, +0.3615, +0.2145, +0.0863, -0.0482, +0.1043, -0.0310, +0.1584, -0.1514, -0.2689, -0.2622, +0.2389, -0.0352, +0.0505, -0.4725],\n[ -0.0229, -0.4313, +0.0184, -0.8077, +0.5455, +0.2926, +0.1833, -0.1669, -0.3142, -0.2872, -0.1973, +0.1569, -0.0577, +0.2992, +0.0187, -0.3197, +0.1143, +0.3957, -0.0845, -0.0656, -0.6643, -0.1391, -0.3222, +0.7873, +0.0598, +0.3911, +0.0200, +0.1554, +0.2937, -0.4432, -0.2098, -0.5019, +0.2838, +0.1635, -0.0923, -0.3384, -0.3427, +0.3220, +0.4029, -0.3257, +0.1441, -0.0432, -0.3495, -0.5423, +0.2564, -0.0237, +0.4511, +0.0765, -0.0877, -0.0430, +0.3376, -0.2686, +0.3190, +0.0444, -0.0437, +0.1836, -0.0036, -0.1122, -0.5164, -0.0967, -0.0943, -0.0267, -0.2884, +0.1110, -0.0452, -0.5358, -0.9480, -0.0428, +0.0632, +0.5967, -0.5697, -0.5430, -0.3233, -0.1837, -0.3865, -0.1141, -0.3161, -0.3135, -0.1826, -0.2103, +0.1367, +0.0738, -0.4706, -0.2797, +0.2463, +0.0844, +0.1632, +0.2067, +0.2860, -0.1431, -0.1538, -0.4689, +0.1815, -0.7608, -0.5361, +0.0924, -0.0427, +0.0261, +0.1086, +0.0071, -0.0351, +0.1962, -0.5212, -0.2708, -0.0387, -0.4651, -0.1066, +0.1502, -0.0507, -0.4060, +0.0178, +0.1007, -0.3050, -0.3290, -0.7691, -0.2058, +0.2255, -0.4308, +0.1231, +0.2162, +0.3136, +0.1682, +0.0186, -0.2181, +0.1038, -0.7105, +0.0725, +0.1498],\n[ +0.2957, +0.3931, +0.5196, +0.0034, -0.1459, -0.4853, +0.0045, +0.3441, -0.6762, +0.3599, -0.2657, -0.0656, +0.2883, +0.2954, -0.3889, +0.5849, +0.0604, +0.2256, -0.7857, +0.0923, +0.1446, -0.4545, -0.0598, -0.0619, +0.0932, -0.0672, +0.1070, +0.2962, +0.1364, -0.0010, +0.0422, +0.1428, -0.0486, -0.6373, -0.1726, +0.1041, +0.0355, +0.1551, -0.8913, -0.6888, -0.1694, -0.7115, +0.2172, -0.7797, +0.2613, -0.6847, -0.3206, -0.0662, -0.1301, -0.2691, -0.3250, -0.5412, +0.3733, -0.6999, -0.1384, +0.2234, -0.6276, -0.6873, -0.1056, -0.2576, -0.3469, -0.0035, -0.3412, -0.4947, -0.2108, +0.2637, -0.4840, -0.7649, +0.0512, -0.5292, -0.1976, -0.3880, -0.1312, -0.2244, -0.5623, +0.1554, -0.0144, +0.1044, -0.3046, -0.0893, +0.0786, +0.1556, -2.0057, -0.2783, +0.2209, -0.6801, +0.1200, -0.0627, -0.1416, +0.0931, -0.1593, +0.6807, -0.0833, -0.3581, -0.1243, -0.2332, +0.0648, -0.2536, +0.0485, -0.3385, -0.3248, -0.2046, -0.0735, +0.1252, -0.0138, -0.2593, +0.3831, -0.2624, -0.0680, +0.1553, -0.6539, -0.3949, -0.2072, -0.3056, +0.0470, -0.2456, -0.2607, -0.1460, +0.1578, -0.4485, -0.1730, +0.0905, -0.5966, +0.1038, +0.4146, +0.0169, -0.2569, +0.0757],\n[ -0.0059, +0.2504, -1.8223, +0.0071, +0.0843, +0.0531, +0.0537, -0.4270, -0.0225, +0.0206, +0.1877, -0.0539, -0.1275, -0.1903, -0.2041, +0.1867, -0.2453, +0.0968, -0.7747, -0.0996, -0.1644, -0.2217, -0.0222, -0.0323, +0.1196, -0.4662, -0.0733, -0.2897, -0.0650, +0.1710, +0.3257, -0.6486, +0.1965, +0.4933, -0.1914, -0.7322, -0.7016, -0.2255, +0.0376, +0.2672, -0.5327, +0.2688, -0.0806, -0.2247, -0.1833, -0.4959, -0.7814, -0.6611, +0.1856, -0.1662, -0.0610, +0.4234, +0.1225, +0.0887, +0.1060, +0.1328, +0.1165, -0.3699, -0.3317, +0.1403, +0.1845, +0.0305, -0.2385, -0.0139, +0.2337, -0.0865, -0.4594, +0.0413, -0.3618, +0.1392, +0.0640, +0.3269, -0.4006, +0.6018, -0.1011, +0.1064, +0.0764, -0.7746, -0.2271, -0.8182, -0.1293, -0.5567, -0.3629, -0.6294, +0.1535, -0.5792, +0.0733, -0.1860, +0.3598, -0.0287, +0.3026, -0.4029, +0.0910, +0.3008, +0.3386, +0.0652, +0.1527, +0.2761, +0.2965, -0.4328, -0.1331, -0.0883, +0.1920, -0.2682, +0.2188, -0.3514, -0.2600, -0.0027, -0.9167, -0.3502, +0.6296, +0.1959, +0.0535, +0.2855, +0.1944, +0.6012, -0.8331, +0.3115, +0.3111, +0.0916, -0.1444, -0.0131, -0.2425, +0.2020, -0.2527, +0.2171, +0.0902, +0.2377],\n[ +0.1734, -0.3360, +0.0102, -0.1136, +0.0421, +0.0059, +0.0081, -0.1179, -0.2811, -0.3038, -0.0654, -0.0284, +0.1823, -0.5043, -0.0641, +0.3135, +0.2485, -0.1943, -0.7957, -0.4855, +0.3363, -0.3024, +0.0201, -0.0095, -0.2435, -0.3528, +0.1311, -0.1494, -0.2564, +0.0214, +0.0812, -0.0815, -0.3220, +0.0243, +0.1589, +0.2596, +0.0788, -0.1100, -0.0409, -0.0636, +0.0915, -0.0302, -0.1303, +0.2093, -0.7333, -0.4362, -0.0231, +0.1416, -0.0775, -0.6113, +0.1495, +0.1520, +0.1515, -0.2184, +0.2908, -0.4809, +0.1760, -0.5343, +0.0255, +0.3853, -0.6806, -0.0456, -0.0844, -0.2962, -0.2281, +0.2654, -1.0656, +0.0755, -0.1765, +0.1391, +0.1288, +0.2546, -0.0349, +0.0460, +0.0746, -0.3408, -0.1496, -0.7456, -0.2263, -0.1545, -0.3210, -0.1699, -0.0215, -0.0136, +0.3570, +0.2186, +0.0431, -0.2374, -0.1237, -0.0502, +0.3524, -0.3504, -0.8003, -0.1479, -0.1306, -0.1243, -0.0349, +0.0689, -0.1313, -0.1078, -0.0253, -0.0653, -0.0671, -0.4589, +0.1150, -0.2785, -0.4662, -0.0127, -0.2129, +0.2506, -0.3115, +0.3973, -0.0523, +0.1712, -0.0820, +0.0759, -0.0481, -0.0596, +0.1819, -0.3091, +0.1873, +0.2186, -0.5450, +0.0715, +0.1790, -0.2883, +0.0226, +0.1730],\n[ -0.2602, -0.3215, -0.0906, +0.1177, -0.1313, -0.5380, -0.4104, -0.4251, -0.0429, +0.1910, +0.2934, -0.0522, -0.0154, +0.0940, -0.7726, -0.0532, -0.1507, -0.4646, +0.0155, -0.1635, +0.4168, +0.3530, -0.0973, -0.0352, -0.1655, -0.2412, +0.1004, +0.3036, -0.0438, -0.6966, -0.3239, -0.8388, +0.0405, -0.0482, -0.4399, +0.5633, -0.0991, +0.0878, -0.4014, -0.3725, -0.1872, +0.1927, -0.0272, +0.2497, -0.3443, +0.0991, -0.2369, +0.2886, +0.0404, -0.3542, -0.3079, -0.3784, -0.1192, -0.5404, +0.1479, +0.1591, +0.2231, +0.2725, -0.1559, +0.2966, +0.0741, +0.0748, +0.3326, -0.7224, -0.0826, -0.0814, +0.0424, -0.4582, +0.3849, -0.0785, -0.1042, +0.1102, -0.2348, -0.0806, -0.0245, -0.1454, +0.3831, -0.0851, -0.6094, -0.4785, -0.1979, -0.2312, -0.3423, -0.1665, +0.3068, -0.6060, +0.1745, -1.2672, -0.3371, +0.0945, -0.0562, -0.6999, -0.2454, -0.1074, +0.2825, -0.3700, +0.3908, -0.1738, +0.0191, -0.5917, -0.0799, -0.4251, -0.9711, +0.0096, -0.7487, -0.1138, +0.2945, -0.4373, +0.1421, -0.1377, +0.2897, -0.0829, +0.2462, +0.1447, +0.0142, -1.0093, -1.0673, -0.6900, -0.2250, +0.1916, -0.8061, -0.3221, +0.1838, +0.0782, +0.0042, +0.0976, +0.4808, -0.2449],\n[ -0.0186, +0.0312, -1.1666, +0.4912, +0.0535, -0.0724, +0.4590, +0.5356, -0.2967, +0.0945, +0.3760, +0.1753, +0.5287, -0.3805, -0.8444, -0.1378, -0.2789, -0.0429, +0.5450, -0.0904, +0.0773, -0.4917, -0.7371, +0.0729, +0.1716, +0.0492, +0.2712, -0.4112, +0.4279, -0.2601, +0.1060, -0.1974, -0.0752, -0.0291, +0.0424, -0.3498, +0.3540, +0.4572, -0.0620, -0.1689, -0.7175, -0.0296, +0.0981, -0.0719, +0.1335, -0.0579, -0.7234, +0.5031, +0.0332, +0.2731, -0.3885, -1.1700, +0.2232, +0.1357, -0.1256, -0.3946, +0.1373, +0.2081, +0.0855, -0.5177, -0.0246, +0.2997, -0.5857, -0.3490, +0.1922, -0.6260, -0.2010, -0.0540, -0.5983, -0.1169, -0.0199, +0.2142, -0.0344, -0.0711, +0.2242, +0.0118, +0.1590, +0.1529, +0.1587, -0.7301, +0.1030, -0.2880, +0.2163, -0.2043, -0.0016, -0.0082, +0.1785, -0.6902, -0.1381, -0.1248, +0.3989, -0.1963, +0.4378, -0.0211, +0.2309, -0.5025, +0.1148, +0.0732, +0.0534, +0.0736, -0.3570, +0.1016, +0.0007, -0.0154, -0.2812, +0.4252, +0.0808, +0.3596, +0.2169, -0.2200, -0.8553, -0.7431, -0.0798, -0.5308, +0.0099, +0.1634, +0.1090, +0.0750, +0.0094, -0.3583, -0.2442, -0.0193, +0.1221, +0.2039, -0.4970, -0.4103, -0.2623, +0.1852],\n[ +0.0035, -0.2371, -0.0652, -0.5765, +0.2370, +0.1303, -0.1977, -0.6236, +0.1144, -0.1280, +0.0588, -0.0054, -0.1472, -0.2433, -1.1024, +0.2071, -0.1101, +0.7363, -0.0024, +0.2096, +0.0864, -0.2140, +0.1191, -0.1789, +0.2744, -0.5595, -0.4182, +0.0468, -0.2223, +0.1140, -0.0006, -0.4948, -0.2110, +0.2943, -0.0723, -0.0925, -0.4041, +0.2033, -0.5595, +0.0529, +0.1126, -0.8388, -0.2227, -0.0921, -0.2104, -0.0412, +0.1017, +0.0853, +0.2641, +0.0581, +0.2180, -1.8387, +0.3215, +0.2269, -0.1205, -0.0315, +0.0466, -0.4390, -0.9485, -0.9049, +0.1571, -0.1657, -0.1598, -0.6076, +0.3959, -0.1420, +0.0664, -0.0393, +0.2893, -0.4381, +0.0649, -0.0343, +0.1189, -0.3135, +0.0619, +0.0534, -0.0150, -0.0847, -0.3386, -0.4189, -0.2144, -0.5014, -0.4263, -0.3158, -1.1655, +0.4229, +0.1088, -0.0160, +0.0623, -0.1086, +0.0452, -0.1172, +0.0235, -0.4329, +0.0315, -0.2861, -1.8719, +0.1323, -0.1785, +0.0296, +0.0625, +0.1258, -0.2638, -0.2692, +0.2976, +0.0126, -0.5463, -0.4668, -0.1375, +0.0925, -0.1242, -0.8335, +0.1498, -0.4909, +0.0150, +0.1882, -0.3309, +0.3453, -0.0746, +0.1613, -0.0352, +0.4158, -0.6152, -0.1984, +0.3839, -0.3962, -0.0498, -0.8401],\n[ -0.2221, +0.4577, -0.3264, +0.0867, +0.2288, -0.6050, +0.0765, -0.0253, +0.0234, -0.1089, +0.2742, +0.0977, -0.2987, +0.3709, -0.6876, -0.0137, +0.2326, +0.1873, -0.0458, +0.4263, -0.1981, -0.1495, -0.2254, +0.1243, +0.2372, +0.0684, +0.0282, +0.0340, +0.2636, -0.1085, +0.1307, +0.0005, +0.2727, -0.3159, -0.0499, -0.5526, -0.4607, -0.1653, -0.1389, -0.2559, -0.3343, -0.1751, +0.0961, -0.1130, -0.3663, +0.1649, -0.0549, +0.1994, +0.3338, +0.2785, +0.3672, +0.1056, +0.3413, +0.2041, +0.2421, -0.4176, +0.0595, -0.7157, -0.8599, -0.2813, -0.5280, -0.2340, -0.3723, +0.3193, -0.0203, -0.7509, -0.4940, -0.0868, -0.0221, +0.0788, -0.0377, +0.1623, +0.0788, -0.0519, -0.2508, -0.3549, +0.2371, -0.4721, +0.5425, +0.5633, +0.0457, -0.0053, -0.4628, -0.2514, -0.1613, -0.2032, +0.0440, +0.3368, +0.0844, -0.1580, -0.7842, +0.1071, +0.0913, +0.2130, -0.3305, +0.1332, +0.1810, -0.1177, +0.2646, -0.4359, -0.3863, -0.0371, +0.2614, -0.1002, +0.1836, -0.2881, +0.4521, -0.1639, -0.0507, +0.1292, -0.2386, -0.2004, -0.0219, -0.9069, +0.2525, -0.0051, +0.5764, +0.1496, +0.2575, -0.1619, +0.1981, -0.0817, -0.0992, +0.1743, -0.1275, -0.2257, -0.0344, +0.1149],\n[ +0.0552, -0.1676, +0.0500, +0.0371, +0.2764, -0.2129, -0.2672, +0.3881, +0.3348, +0.1618, -0.0235, -0.2225, +0.0011, +0.3560, +0.2559, -0.2586, -0.0630, -0.0616, +0.2104, +0.1421, +0.0131, -0.5461, -0.1410, -0.2444, -0.3954, +0.2889, -0.1876, +0.5588, -0.2274, +0.0884, -0.4838, +0.0815, -0.4368, -0.0366, -0.0305, -0.2435, +0.1682, -0.1123, -0.6352, -0.1180, -0.1304, -0.3762, -0.1200, -0.0787, +0.1968, +0.0229, +0.6007, +0.3058, +0.0111, +0.0071, -0.3882, -0.4516, +0.0657, -0.4349, -0.0312, -0.3833, +0.3771, +0.5473, -1.1309, -0.2844, -0.2990, -0.3114, -0.2003, +0.1692, -0.5216, -0.2573, +0.2162, +0.4196, +0.0645, +0.1814, -0.1180, -0.1903, +0.1395, +0.3146, -0.0503, -0.0129, +0.1173, -0.1289, -0.7010, +0.1966, -0.1170, +0.1836, -0.5466, +0.2949, +0.1268, +0.2523, +0.1339, +0.2687, -0.1148, -0.9586, -0.3372, +0.0221, -0.3042, +0.1392, -0.1978, -0.4729, +0.0023, -0.0018, -0.3816, -0.2387, +0.0710, +0.0402, +0.3025, -0.0422, +0.2517, -0.1266, +0.2964, -0.0651, -0.1329, +0.0103, +0.4835, -0.1226, +0.0643, -0.1345, +0.0647, -0.3269, -0.4923, +0.2875, +0.1877, -0.1526, -0.0785, -0.0118, -0.0353, -0.6584, +0.3742, -0.1663, +0.2500, +0.0618],\n[ +0.2997, +0.2519, -0.4286, -0.7198, -0.2245, -0.1805, -0.4405, -0.4680, -0.1060, -0.3217, +0.1738, -0.2077, +0.1985, -0.3943, -0.3274, -0.5616, -0.5938, +0.0370, -0.0538, +0.2143, -0.3124, -0.3561, +0.3302, -0.2428, -0.1921, +0.0271, -0.0047, -0.2883, +0.0238, -0.2252, -0.2254, +0.1708, -0.2050, +0.2460, +0.0141, +0.0999, -0.1220, -0.1217, -0.1450, -0.0053, -0.4942, -0.4115, +0.1852, -0.1231, +0.1924, -0.3888, -0.2041, -0.4723, -0.4349, +0.0732, -0.6097, -0.3690, +0.1171, +0.1377, -0.3710, +0.4654, -0.1571, -0.8847, +0.0086, -0.0884, +0.3293, -0.5117, +0.0098, +0.2000, -0.2808, +0.0643, +0.1971, -0.0608, -0.4650, -0.3024, +0.1653, -0.0648, +0.2489, -0.1785, -0.1707, +0.3948, -0.3938, -0.6172, -0.2203, -0.7173, +0.0293, -0.3072, -0.0620, -0.2389, -0.0896, +0.1239, +0.2999, -0.0323, -0.5117, -0.0260, -0.3110, -0.1003, -0.5293, -0.0664, +0.2057, +0.1447, -0.3164, -0.0746, -0.6058, +0.1389, +0.1527, -0.2403, -0.0355, -0.1622, -0.0819, +0.2913, +0.2429, -0.0298, +0.0585, -0.0653, +0.1618, -0.0336, -0.6792, +0.3777, +0.0029, -0.2496, +0.3963, -0.0700, +0.4383, -0.4798, +0.2028, +0.1346, -0.6482, -0.1837, +0.0233, -0.0848, +0.0192, +0.2274],\n[ -0.7703, +0.3597, +0.2273, -0.0120, -0.6104, -0.3715, -1.1547, -0.8693, -0.0804, -0.3563, -0.2710, +0.1459, +0.0653, +0.0391, -1.0880, +0.1807, +0.3675, -0.1739, -0.1623, +0.1400, -0.1035, +0.1792, +0.6849, -0.7708, -0.4373, +0.1885, -0.2048, +0.0541, +0.1565, -0.4325, -0.5115, -0.0447, +0.3524, +0.1776, +0.2972, -0.0688, -0.4532, -0.0173, -0.0426, -0.1938, +0.1425, -0.4570, +0.0673, -0.0495, -0.3246, +0.2467, +0.3304, -0.1078, +0.3719, -0.0125, -0.0105, -0.1284, +0.3591, -0.3257, +0.2117, -0.3518, +0.2448, -0.5693, +0.4628, -0.4708, -0.0589, +0.2012, +0.1599, -0.3289, -0.1241, -0.8050, -1.0541, +0.2217, +0.0578, -0.0257, +0.4239, -0.3047, -0.4532, +0.5791, -0.0018, -0.1231, -0.1040, -0.2046, -0.0637, -0.1778, -0.1759, +0.3343, -0.4349, -0.6581, -0.0120, -0.3806, -0.6405, -0.1362, +0.1076, -0.9756, -1.0895, -0.5013, +0.1329, -1.0341, -0.1451, -0.0347, -0.0139, -0.2391, -0.3683, +0.0724, +0.3231, -0.7633, -0.1369, +0.0740, -0.0623, -0.0497, -0.1996, +0.0780, +0.2873, -0.2659, -0.3895, -0.6219, +0.2696, +0.4523, -0.3043, -0.2205, +0.2472, -1.0548, -0.3012, -0.6168, -0.6512, -0.2143, -0.5998, -0.2672, +0.1359, -0.0752, +0.0370, +0.0627],\n[ -0.1791, -0.2279, -0.0998, +0.1783, +0.1553, +0.0612, -0.2220, +0.1551, -0.2240, -0.0159, +0.0010, -0.0103, +0.1007, -0.3934, -0.5397, -0.2077, +0.0963, -0.1834, -0.2112, -0.0267, -0.3809, -0.2319, +0.2167, +0.0076, +0.0075, -0.0750, +0.0607, +0.1455, +0.0941, -0.0205, -0.2981, -0.0061, +0.4115, -0.4933, +0.2443, -0.9828, -0.3445, -0.2593, -0.3871, +0.0736, -0.8643, -0.0187, +0.1316, -0.2119, +0.3713, +0.0419, -0.5368, +0.1741, -0.1143, +0.0186, -0.2846, -0.4499, -0.2979, -0.0987, -0.1089, -0.1469, +0.1726, -0.4902, +0.3746, -0.1956, -0.6684, -0.1877, -0.4473, +0.3690, +0.1347, -0.1845, -1.4047, -0.1341, -0.0740, -0.0375, -0.1693, -0.3976, -0.6494, -0.0323, -0.2126, +0.3526, -0.2334, -0.3229, +0.4134, +0.0701, +0.2353, +0.4844, +0.0562, +0.3268, -0.1159, -0.1046, -0.0269, -0.0188, -0.1613, -0.2217, -0.2000, -0.0957, -0.2712, +0.3059, +0.1249, -0.1035, +0.0004, +0.3311, +0.1874, +0.3168, +0.0562, +0.1328, +0.1546, -0.1332, -0.0177, +0.2608, -0.1027, -0.4310, -0.5956, +0.0371, +0.0779, -0.1145, +0.0611, +0.1766, +0.2322, +0.3979, -0.1862, +0.3387, -0.6133, +0.0790, +0.0620, -0.1411, -0.0586, -0.0211, -0.1644, -0.4890, -0.0665, +0.2908],\n[ -0.1590, +0.3465, -0.7111, +0.0057, +0.2907, -0.4929, +0.1248, -0.0589, -0.0498, -0.3394, +0.3763, +0.2212, -0.0642, +0.4740, -0.3273, -0.3461, +0.1982, -0.4864, +0.3201, +0.1274, +0.2486, +0.0471, -0.0542, +0.2365, +0.4782, -0.9680, -0.0935, -0.0158, -0.1450, -0.3454, -0.2127, -0.2524, +0.1208, -0.0273, +0.3810, -0.0663, +0.0663, -0.2722, -0.6524, -0.3031, +0.1824, -0.5041, +0.0428, +0.3808, -0.4099, +0.1713, -0.2006, +0.3985, -0.1635, -0.0436, -0.8630, +0.0073, -0.0152, +0.2548, +0.3976, +0.2089, -0.0484, +0.0710, +0.0378, -0.0727, -0.3489, -0.2270, +0.2633, +0.0113, +0.3312, +0.0097, -0.5475, +0.1103, +0.0917, -0.2627, -0.5849, +0.1446, -0.8225, -0.3470, -0.0024, -0.2059, -0.4963, +0.0666, -1.5108, -0.2249, +0.1441, -0.2078, -0.2021, -0.2921, -0.3632, +0.0481, +0.0159, -0.3559, -0.3166, +0.2004, -0.4084, -0.3994, +0.1976, +0.0868, +0.1715, +0.0243, -0.0022, +0.3939, +0.0631, -0.5910, -0.0901, +0.0470, -0.1276, +0.0298, +0.0838, +0.2862, -0.7131, -0.1051, -0.4813, +0.1589, -0.1359, +0.1241, -0.0412, +0.1048, +0.1344, -0.3297, -0.0250, +0.1921, +0.0646, +0.1873, -0.3154, +0.1462, -0.1932, +0.4339, -0.4673, +0.1966, -0.2193, +0.1271],\n[ -0.4045, -0.4693, -0.2753, -0.0697, -0.7086, -0.1363, +0.0742, +0.5169, -0.9451, +0.0164, +0.7178, +0.4367, -0.5113, -0.0481, -0.3382, +0.3882, -0.6975, +0.3492, +0.6143, +0.2262, -0.3421, -0.1838, +0.5108, +0.2796, +0.3800, +0.0532, -0.4792, -0.4716, +0.0690, -0.0716, +0.2828, -0.7554, +0.1276, -0.2303, +0.0311, -0.2633, +0.0998, +0.2469, +0.1871, +0.6439, -0.4219, -0.2035, -0.4298, +0.0361, +0.1351, +0.2658, -0.2374, -0.1598, -0.5585, -0.0315, -0.9313, +0.2618, +0.2350, -0.0825, +0.0885, +0.1367, +0.3425, +0.0241, -0.4690, -0.1568, -0.2416, -0.2253, -0.0125, -0.1595, +0.0867, +0.4414, -0.8874, +0.0626, -0.3080, +0.5134, +0.4091, -0.2617, -0.1513, -0.1776, +0.0203, +0.2636, +0.0000, -0.4166, -0.0429, +0.2566, -0.3680, -0.1859, -0.6129, +0.4622, -0.1716, -0.0095, -0.1997, +0.3254, +0.4619, -0.4222, +0.0490, +0.2444, -0.0382, -0.0066, +0.0251, -0.7795, +0.6492, -0.4009, +0.0306, -0.6512, +0.0904, -0.5018, +0.2744, +0.6262, -0.0631, -0.7705, -0.3628, -0.0445, +0.1403, -0.3503, +0.3548, -0.4655, +0.6479, -0.2118, +0.2087, -0.8546, +0.7755, -0.2266, +0.2387, -0.6748, +0.1535, -0.3294, +0.3806, +0.1586, -0.1722, -0.2350, +0.1667, +0.3890],\n[ +0.2363, +0.0062, -0.4151, -0.2195, -0.1890, +0.1727, -0.1118, -0.2844, +0.1809, -0.2691, -0.1235, -0.1263, +0.3011, -0.5891, +0.4425, -0.1007, -0.1312, -0.2862, +0.1523, +0.0050, -0.0170, -0.3431, -0.2285, +0.0159, +0.1520, -0.6582, +0.1684, -0.4045, -0.2205, +0.1423, +0.0978, -0.0692, -0.6432, +0.2055, -0.0115, +0.0345, +0.0718, -0.1850, -0.3148, -0.0285, +0.4103, +0.3684, +0.0221, -0.9018, -0.4790, -0.2507, -0.0904, +0.3063, +0.1512, -0.0405, -0.5723, -0.7270, -0.0956, +0.1412, -0.0119, -0.3245, +0.2918, -0.4344, +0.1103, +0.2323, -0.3513, +0.3947, +0.3163, -0.3286, +0.0400, -0.4976, -0.0799, +0.1862, -0.4608, -0.4698, -0.3919, -0.1890, +0.3467, -0.4846, +0.1217, -0.4737, -0.0228, -0.0274, -0.7486, -0.7364, +0.0778, +0.1966, -0.0731, -0.7374, -0.3736, -0.5404, -0.0662, -0.2419, +0.1435, +0.1261, -0.8224, -0.2903, +0.1371, +0.0677, +0.1414, -0.7651, -0.2402, -0.2509, +0.3953, -1.0583, -0.4834, +0.1641, -0.4187, -0.3944, -0.3237, -0.2496, -0.3212, -0.0233, -0.0451, +0.0084, +0.0396, -0.5585, +0.0647, -0.7654, +0.0435, +0.2010, -0.7568, -0.0416, -0.7364, -0.1277, -0.3532, +0.1159, +0.1172, -0.0711, +0.1222, +0.1614, +0.2695, -0.1347],\n[ +0.2645, +0.1945, -0.1109, -0.0107, +0.4603, -0.0164, -0.6991, +0.0160, -0.2493, -0.0361, +0.0486, +0.1446, +0.1816, -0.3309, -0.5692, +0.3186, +0.0398, +0.1935, +0.0052, +0.1339, -0.6104, -0.0547, -0.0493, +0.1449, -0.0981, -0.4863, -0.0619, -0.0866, -0.2730, -0.0892, -0.5666, +0.0349, -0.1451, -0.0458, +0.0709, -0.0688, -0.3157, -0.1346, +0.0223, +0.1968, -0.3147, -0.2957, +0.0356, -0.4077, -0.0574, +0.1900, -0.2019, -0.4952, -0.4156, -0.2254, +0.3388, -0.1667, -0.0230, +0.0165, -0.1358, -0.0144, +0.0272, -0.2540, -0.0612, -0.0324, -0.0505, +0.1923, -0.3608, +0.3274, -0.5640, +0.2636, +0.2367, -0.1310, +0.0460, +0.0487, +0.0208, -0.3474, +0.1411, +0.2065, +0.6770, -0.1130, -0.0721, -0.6301, -0.0159, -0.0925, +0.1345, +0.1874, -0.4512, -0.2718, +0.0344, -0.2044, +0.1453, -0.0164, -0.1500, +0.3967, +0.0505, -0.0252, +0.2409, -0.4191, +0.0563, +0.0028, +0.1797, -0.6223, +0.3218, -0.3501, -0.1397, -0.3705, -0.0733, +0.2525, +0.1673, +0.2652, +0.3524, +0.2129, +0.0589, +0.0883, +0.4070, +0.3489, +0.0569, -0.1190, +0.4708, -0.2421, +0.2813, +0.1846, +0.2897, -0.2035, +0.2092, +0.1079, +0.3770, -0.0807, -0.2196, +0.1587, +0.0665, +0.1521],\n[ -0.0032, -0.5900, +0.3245, +0.2203, -0.4119, +0.0732, -0.1448, -0.2840, -0.5423, +0.4369, +0.1149, -0.1663, -0.0846, -0.4860, -0.7153, +0.2717, +0.3148, +0.0577, +0.0600, +0.1907, +0.0226, -0.8129, -0.0191, -0.1380, -0.3814, -0.2589, +0.1204, -0.1775, -0.4343, -0.3241, -0.4191, +0.2309, -0.1817, +0.1551, +0.1934, -0.6179, -0.3449, -0.3706, -0.1836, -0.0723, +0.1246, -0.1750, +0.0323, +0.0048, +0.1908, -0.0128, -0.2133, +0.2548, +0.2636, +0.2050, -0.5303, +0.2526, +0.0808, -0.5870, -0.2439, +0.2156, -0.4137, -0.7845, -0.0266, -0.1659, -0.0701, -0.0901, +0.4030, -0.2117, -0.3111, +0.2580, -0.3229, +0.2362, +0.5040, -0.0254, +0.1128, +0.0537, +0.0629, +0.0724, -0.0315, +0.3396, +0.0600, +0.2878, +0.1927, +0.0392, -0.4752, -0.1950, -0.1491, -0.4002, +0.2534, +0.0002, -0.0474, +0.3546, +0.3228, +0.4452, -0.0011, +0.0549, -0.0518, +0.2764, +0.2517, +0.2436, -0.4300, +0.2737, +0.3223, +0.0856, +0.1481, -1.0614, -0.0964, -0.6597, -0.1409, -0.1445, -0.1676, -0.5498, +0.7005, +0.3196, +0.2127, -0.0677, -0.2303, -0.2276, -0.1833, -0.2009, -0.0879, -0.1477, +0.2008, -0.1563, +0.0598, -0.0930, +0.1721, -0.2699, -0.2913, +0.0134, -0.2100, -0.2508],\n[ +0.0078, -0.1145, +0.0566, +0.2845, +0.3333, +0.0245, +0.5178, -0.2139, -0.5225, +0.1092, +0.3087, -0.2633, -0.4887, +0.5600, -0.3808, -0.4829, +0.3219, +0.3766, +0.0972, +0.4930, -0.2441, -0.2812, -0.2231, -0.0461, -0.6873, -0.0303, -0.0595, +0.1965, +0.1405, +0.2027, +0.1398, -0.5602, -0.5044, +0.3049, -0.8612, -0.0465, +0.4593, -0.1495, +0.4121, +0.3910, +0.1221, -0.3441, -0.1128, -0.3937, -0.0790, -0.2019, -0.5485, +0.1180, +0.0659, -0.0949, -0.1865, +0.8964, +0.1803, +0.2051, -0.3293, -0.3495, -0.2919, +0.1497, -0.0474, -1.0231, +0.0527, -0.4692, -0.0999, +0.0488, -0.1689, +0.1371, -0.1849, +0.1495, -1.0595, +0.4409, -0.1860, +0.3221, -0.4063, +0.2084, +0.3131, -0.0811, -0.0254, +1.0520, +0.2568, -0.1223, -0.6538, -0.3910, +0.0634, -0.1927, +0.0842, -0.5116, +0.0424, +0.0742, +0.0284, -0.9449, -0.2828, -0.2224, +0.1159, -0.0119, -0.0894, -0.2882, -0.3089, +0.2021, -0.1726, -0.1260, -0.2500, -0.0621, -0.6379, +0.2073, +0.3437, +0.2344, +0.1052, +0.0429, -0.1970, +0.7690, -0.2105, -0.6152, +0.1540, -0.1216, -0.1457, -0.0201, -0.7810, -0.5367, -0.1856, -0.3119, -0.6950, -0.0800, +0.1553, +0.7838, -0.4806, -0.7120, -1.0775, +0.4206],\n[ -0.8158, -0.0751, +0.4568, +0.1936, -0.0115, +0.0016, +0.1775, -0.3664, +0.2353, -0.1059, -0.0433, -0.2364, -0.0069, -0.0294, -0.3629, +0.4730, -0.3616, +0.2660, +0.4088, -0.0482, +0.2876, -0.0399, +0.1708, +0.3116, +0.0624, +0.1124, +0.2188, -0.1548, +0.0379, +0.2865, +0.0004, -0.3470, -0.4660, -0.1543, -0.6684, +0.3219, +0.7255, +0.2310, -0.2165, +0.0654, -0.5981, -0.2871, -0.4359, -0.0235, -0.4874, -0.6068, -0.1394, +0.0140, +0.1378, +0.0300, -0.3226, -0.1644, +0.0784, -0.3535, +0.4106, +0.3939, +0.4227, -0.2622, -0.0644, -0.7142, -0.6646, -0.3376, -0.0979, -0.1231, -0.2758, -0.7267, -0.0163, -0.3791, +0.2953, -0.0657, +0.0056, -0.4484, -1.0532, +0.5263, -1.1149, -0.1071, -0.5389, -0.0615, -0.0329, +0.1728, -0.0696, -0.3026, -0.0906, -0.1880, +0.1664, -0.4204, +0.2661, -0.6305, -0.2993, -0.1241, -0.2866, -0.1463, +0.4917, +0.1292, -0.4802, +0.3295, -0.7186, -0.1402, -0.0365, +0.0668, +0.1496, -0.3388, -0.0329, -0.2554, +0.1411, -0.6357, -0.1040, +0.1403, +0.1908, -0.7587, -0.5095, +0.2143, +0.2150, +0.2288, +0.2723, -0.8710, +0.1903, +0.0446, -0.7277, -0.1252, -0.0186, +0.2522, -0.4735, -0.0302, -0.3595, -0.3273, +0.2083, +0.0594],\n[ -0.2945, +0.3638, +0.1284, -0.5290, -0.1601, +0.1974, +0.1350, -0.3181, +0.1355, -0.1144, -0.8185, +0.3295, +0.0454, -0.2519, +0.0312, -0.0144, +0.0092, +0.1483, +0.0112, +0.1281, +0.2530, +0.0103, -0.2943, +0.0510, +0.2044, +0.0298, +0.2082, -0.0052, +0.1255, +0.1062, -0.6600, -0.0876, +0.0886, -0.1130, -0.4472, -0.0706, -0.2610, -0.4739, -0.3537, +0.0546, -0.3391, -0.5774, -0.1172, +0.1768, +0.3232, +0.0907, -0.5683, -0.6098, +0.1459, +0.3442, -0.3454, +0.0279, +0.1736, -0.1270, +0.1902, +0.0658, -0.0248, -0.2342, +0.1925, +0.1986, +0.1576, -0.6727, -0.3078, +0.1383, -0.2497, -0.1600, +0.2205, -0.3507, +0.1797, +0.5265, -0.3245, +0.0958, -0.1797, +0.0096, +0.3309, -0.2368, +0.0322, -0.2754, +0.1351, +0.5305, +0.1616, +0.5841, -0.7230, -0.3630, -0.0726, -0.5375, -0.2105, -0.1580, +0.1014, +0.1227, +0.0446, +0.0279, +0.5773, -0.5343, +0.2156, -0.0660, -0.3511, -0.0986, -0.2209, +0.1967, +0.0479, +0.4054, -0.4945, +0.2346, -0.4003, +0.1255, +0.0379, -0.3276, -0.3174, -0.1166, +0.1421, -0.3541, +0.0644, -0.2061, -0.0139, +0.1682, -0.2627, +0.0623, +0.3165, -0.2602, +0.3458, +0.2270, -0.4983, -0.1965, -0.1811, +0.1990, -0.0536, -0.3008],\n[ +0.2861, +0.2465, +0.5445, -0.6241, -0.3810, -0.2431, +0.0899, +0.0009, +0.0287, -0.2860, +0.4379, +0.1100, -0.0060, -0.0688, +0.3326, +0.4050, -0.0543, -0.3589, -0.4444, -0.1794, +0.0463, -0.4859, +0.5408, +0.1241, -0.1452, +0.1877, +0.3102, -0.5119, -0.4563, +0.2339, +0.0762, -0.0091, -0.2146, +0.0441, -0.8371, +0.1044, +0.3922, +0.0952, +0.2457, -0.1154, +0.2800, +0.0238, -0.1393, +0.1442, -0.6630, +0.3521, -0.2741, -0.0738, -0.0247, -0.0797, -0.2095, +0.5173, -1.1573, +0.4417, +0.2038, -0.2982, -0.2218, -0.0335, -0.0754, +0.0173, -0.1934, -0.7999, -0.7296, -0.1534, +0.0536, -0.5079, +0.3942, +0.1031, +0.0451, +0.3108, +0.0228, -0.5315, -0.5942, +0.1103, -0.1635, -0.0243, -0.3393, -0.2886, +0.1906, +0.0561, -0.1120, -0.2488, -0.2236, -0.3785, -0.0452, +0.2679, +0.2678, +0.4436, -0.8340, +0.0203, +0.6116, -0.3027, +0.1799, -0.2438, +0.1136, -0.2686, -0.0847, +0.0744, +0.0928, +0.2443, +0.0078, -0.0683, -0.4921, -0.0881, -0.1307, +0.3243, -0.4150, +0.0227, -0.5241, -0.5408, +0.2310, +0.2375, -0.0665, +0.2242, -0.2104, +0.2351, -0.1965, -0.3067, +0.0265, -0.1175, -0.3326, -0.3971, -0.4137, -0.0187, -0.4536, -0.0317, -0.2076, +0.1488],\n[ +0.3728, -0.0547, +0.3402, +0.0846, -0.2939, -0.4488, -0.3421, +0.3539, +0.2334, -0.1906, +0.2732, +0.2161, -0.4480, +0.1510, -0.4346, -0.0497, +0.2783, -0.1186, +0.0806, -0.0484, +0.1960, +0.2053, -0.0394, +0.0148, -0.0442, +0.1445, -0.2074, +0.2616, -0.0416, +0.2471, -0.3292, +0.2827, +0.1714, -0.0269, -0.1959, -0.3425, +0.3473, -0.2343, -0.3136, +0.2331, -0.0308, +0.1485, +0.4697, -0.1206, +0.3913, -0.1763, -0.3010, -0.2376, +0.5187, +0.1515, -0.2541, -0.0851, -0.5678, +0.1875, -0.0576, +0.2598, -0.0535, +0.0647, -0.2367, -0.5131, -0.6636, +0.0245, +0.1720, -0.1660, +0.0015, -0.5347, +0.0851, -0.1086, -0.0278, +0.3490, -0.3233, -0.1908, +0.2611, +0.2172, -0.0438, +0.1540, +0.0241, +0.3071, -0.2689, +0.0820, +0.3680, -0.1333, -0.4390, -0.0022, -0.1614, -0.2464, -0.5837, -0.1894, +0.0748, +0.2046, -0.1179, -0.3081, -0.2560, +0.0728, -0.0119, +0.1377, -0.2225, -0.1516, -0.0038, +0.0417, +0.3232, -0.1597, +0.0374, -0.2734, -0.0756, +0.2224, -0.1316, -0.1672, +0.1052, +0.1462, +0.3924, -0.1192, -0.2089, -0.0764, -0.0825, -0.1494, -0.2589, +0.1217, -0.2977, -0.2204, -0.2216, -0.3703, -0.3574, -0.1172, -0.2267, +0.2539, -0.0252, +0.0847],\n[ +0.1868, -0.6792, -0.0882, -0.4607, -0.0270, +0.1324, +0.2618, -0.6255, -0.0388, +0.3012, +0.1471, -0.1281, -0.3204, -0.1431, -0.3314, -0.0240, -0.1630, -0.2548, +0.1775, -0.1358, -0.2844, +0.0252, -0.4802, -0.0492, -0.2956, +0.0343, +0.2928, +0.2291, -0.0586, +0.1699, +0.2495, -0.0260, +0.1860, -0.0989, +0.0219, +0.0784, -0.1875, +0.2573, -0.1494, -0.0104, +0.1539, +0.3493, -0.1276, +0.2420, +0.2307, -0.1657, -0.3181, +0.0017, -0.2638, +0.1521, +0.2645, -0.9728, -0.0588, -0.3231, +0.2092, -0.3449, +0.4395, +0.2772, +0.0681, -0.0655, +0.0825, +0.0245, +0.6292, +0.1525, -0.3409, -0.0468, -0.3370, +0.1332, -0.0825, +0.1383, +0.0434, +0.3655, +0.1437, -0.2284, -0.1684, -0.0226, +0.4623, +0.1211, -0.1569, +0.0185, +0.3112, +0.1661, +0.0546, +0.2685, +0.1671, +0.0382, +0.1157, +0.2854, +0.2848, +0.0935, +0.3446, -0.3609, -0.2390, +0.1744, -0.7724, +0.0076, -0.3620, +0.4923, -0.0388, -0.1553, +0.2411, -0.0320, +0.4227, +0.2251, -0.2288, -0.6397, -0.1282, +0.4254, +0.0105, +0.2636, -0.0833, -0.0003, +0.1051, -0.0291, -0.3264, -0.0814, +0.1735, -0.0074, -0.4138, -0.0254, -0.0859, +0.0787, -0.1913, +0.1291, +0.1845, -0.0069, +0.0697, +0.1905],\n[ +0.2668, +0.0176, -0.6247, +0.0919, -0.4115, -0.2334, +0.0598, +0.1609, +0.5839, +0.1131, +0.2215, -0.2243, +0.1482, -0.3540, -0.9145, +0.0116, -0.0904, -0.3873, -0.0561, -0.1550, -0.2506, +0.1287, -0.0165, -0.3660, -0.1245, -0.5657, -0.2736, -0.4638, +0.3541, -0.0719, +0.2303, -0.2123, +0.1793, +0.1374, +0.4679, -0.3017, -0.2900, -0.0085, -0.0199, -0.0094, -0.0539, -0.4537, -0.1137, +0.1031, -0.2512, -0.5477, -0.3711, +0.0679, -0.0909, -0.4900, -0.0460, +0.1870, +0.5657, +0.0770, -0.1924, +0.5013, +0.1926, +0.4636, -0.2005, -0.6079, -0.0038, -0.3662, -0.1998, +0.2434, -0.1351, -0.1438, +0.4540, +0.3861, -0.5414, +0.1765, +0.5507, +0.0502, -0.2789, +0.0302, -0.4437, +0.1469, +0.0083, -0.0509, +0.0401, +0.0566, -0.3248, -0.1318, -1.5129, -0.6188, -0.1140, -1.1974, +0.2079, -0.1738, +0.2441, -0.8853, +0.0682, +0.0367, +0.2440, -0.6512, +0.0619, +0.2182, +0.1542, +0.2227, -0.4152, +0.4476, -0.4592, +0.6061, -0.5287, +0.5690, -0.5691, +0.0677, -0.2069, +0.0190, -0.1155, +0.5428, +0.4470, +0.0292, +0.3965, +0.0130, +0.4818, -0.6723, -0.6049, +0.1751, -0.0091, -0.0193, -0.4040, +0.1776, -0.0811, +0.5117, +0.1059, +0.2048, +0.3580, +0.0120],\n[ -0.1210, +0.0283, -0.2150, -0.8675, -0.1966, -0.1280, -0.3534, +0.1179, -0.5637, -0.0615, -0.2321, +0.0095, +0.1918, +0.0317, -0.2031, -0.1552, +0.3155, +0.4319, +0.2815, -0.0709, -0.6060, -0.0318, -0.6541, -0.3911, +0.0916, +0.1339, +0.3901, -0.0215, +0.3117, +0.3603, +0.3742, -0.2424, -0.2102, -0.0055, -0.1783, +0.2992, -0.0213, +0.2737, +0.3640, -0.0041, -0.0435, +0.1138, -0.0185, -0.0374, +0.0415, +0.4626, +0.3861, -0.2194, -0.6983, +0.1617, +0.1974, -0.0364, +0.4909, +0.1799, +0.2560, +0.0741, +0.0866, +0.0264, -0.4130, -0.0046, -0.5335, -0.0196, +0.3220, -0.0230, +0.2521, +0.1357, -0.2103, +0.1343, +0.1675, +0.0260, -0.1268, +0.2371, +0.0360, +0.2276, +0.1925, +0.2276, +0.0839, +0.1985, -0.0862, +0.1063, -0.1310, +0.1342, +0.1287, -0.1455, -0.3308, -0.1680, -0.5989, -0.5364, -0.0396, -0.4750, -0.0452, +0.2292, -0.0566, +0.3250, -0.0889, -0.1587, +0.0793, -0.2573, +0.3200, +0.4030, -0.0636, +0.4392, +0.1043, +0.0802, +0.0769, +0.4630, +0.1506, +0.0572, +0.0029, -0.3777, -1.6826, -0.0892, -0.7466, -0.1378, +0.3277, +0.1467, -0.0098, +0.0221, -0.1759, -0.7071, +0.0824, -0.0311, -0.2508, -0.0275, -0.7920, -0.3872, +0.1502, +0.2264],\n[ +0.6120, +0.3405, +0.2589, +0.0686, -0.1953, +0.6403, -0.1969, +0.0595, +0.1195, +0.2618, +0.1715, +0.0922, +0.1265, -0.1682, +0.0085, -0.0564, -0.2752, -0.2192, +0.0928, -0.1574, +0.1297, +0.1780, +0.2758, +0.0059, +0.2249, -0.2070, -0.2551, +0.0703, +0.0269, -0.2630, +0.0450, +0.2389, +0.2634, -0.2034, -0.4609, +0.4847, +0.5883, -0.3472, +0.2286, +0.0655, -0.6584, +0.3056, +0.2531, -0.0975, -0.1207, -0.1381, -0.5765, +0.4057, -0.2481, +0.1628, -0.0124, +0.1207, -0.4008, -0.1172, -0.2151, -0.0345, +0.0495, +0.0189, +0.3476, +0.0653, +0.0872, -0.1548, -0.0870, -0.5931, -0.1480, -0.0160, +0.3780, -0.0113, -0.1431, -0.2628, -0.0559, -0.4389, +0.2481, +0.0285, +0.2956, -0.2034, -0.1550, +0.0492, -0.0950, -0.1279, +0.0670, -0.2606, -0.0781, +0.1651, -0.6137, -0.2109, +0.0098, -0.4534, +0.0486, +0.1961, -0.4423, +0.1798, -0.1662, -0.1309, +0.2564, -0.3861, -0.0668, -0.2116, -0.3516, -0.0911, +0.1962, -0.6550, -0.3101, -0.1711, -0.3370, -0.2917, +0.2500, +0.2666, -0.0542, -0.4167, -0.3549, -0.0180, +0.2593, +0.0414, -0.1374, +0.3264, +0.0359, -0.4018, -0.1284, -0.0707, +0.3961, -0.3629, +0.5779, +0.3261, -0.1960, +0.0196, +0.1802, +0.1772],\n[ +0.3321, -0.0043, +0.0059, +0.1838, -0.0379, -0.6290, -0.2934, -0.0023, +0.4021, +0.1333, +0.3395, +0.3662, -0.5307, +0.4866, +0.1920, +0.1895, +0.1353, -0.0677, +0.0010, -0.1776, +0.0715, +0.1955, +0.1733, +0.0839, +0.1278, +0.0549, -0.1681, -0.0210, +0.0268, -0.2156, -0.0524, +0.0486, +0.1273, +0.0394, +0.4595, +0.1428, -0.4473, -0.1574, -0.5620, +0.0088, -0.1087, -0.3781, +0.0653, +0.0713, +0.0020, +0.1512, -0.0755, -0.1028, -0.0136, -0.0456, -0.3270, -0.0032, +0.2283, +0.2919, +0.3500, -0.5352, -0.1475, +0.2793, -0.7237, -0.3808, -1.2308, -0.2369, -0.1346, -0.1291, +0.4355, -0.2403, -0.3712, +0.0012, +0.2265, +0.4574, +0.0566, +0.5269, +0.3710, +0.5246, -0.0498, -0.3951, -0.4347, -0.3339, -0.4218, +0.0127, -0.0504, +0.2275, -0.2729, +0.2287, +0.0500, +0.2769, -0.1740, -0.3289, +0.0437, -0.1089, -0.1714, -0.3370, +0.1238, -0.2227, -0.2236, -0.0241, +0.0526, -0.1371, +0.0734, +0.1850, +0.1059, -0.1044, +0.1228, -0.1843, +0.1778, +0.1081, +0.2087, -0.7055, +0.0547, +0.0166, +0.0231, -0.1416, +0.3223, -0.1833, +0.1695, -0.2619, +0.1388, +0.2318, +0.0007, -0.1106, -0.0560, +0.1814, -0.1111, -0.2346, +0.2086, -0.1681, -0.4507, -0.2912],\n[ -0.0355, +0.2780, -0.0415, -0.6060, +0.1641, -0.1634, -0.1836, -0.2687, -0.2778, +0.3303, -0.5843, +0.2118, -0.0916, +0.1060, +0.0583, +0.0854, +0.2461, -0.2423, +0.4740, +0.3551, -0.3546, +0.0514, -0.1922, -0.8780, -0.1073, +0.0772, +0.1236, -0.2780, +0.1447, -0.6879, +0.0313, +0.2184, -0.2200, -0.0125, +0.0931, -0.1300, -0.1865, -0.4454, -0.8373, -0.0907, -0.3115, +0.0840, -0.2306, -0.1615, -0.3491, +0.2386, +0.1848, -0.5184, -0.1454, -0.2316, +0.2788, -0.0251, -0.0211, +0.0219, +0.1418, +0.0277, -0.2427, -1.2938, -0.1140, -0.2718, -0.6272, -0.0525, +0.1877, +0.2890, +0.3268, -0.0594, +0.1912, +0.4100, -0.2659, -0.0808, -0.8520, -0.7808, -0.2231, +0.4269, +0.1724, +0.3903, -0.4007, -0.3331, -0.1801, -0.3650, +0.0508, +0.4958, +0.2926, -0.2167, -0.2007, -0.0711, +0.2235, +0.0508, -0.0419, +0.2554, +0.0702, +0.0802, -0.0612, -0.0652, +0.3999, +0.4148, +0.6440, -0.1218, -0.4333, -0.2390, -0.2230, +0.0354, +0.1575, +0.0190, -0.2055, +0.1771, -0.7678, -0.2000, +0.1853, +0.1183, +0.0925, -0.2047, -0.4217, -0.0655, -0.0234, +0.2635, +0.0828, +0.2193, -0.6872, -0.3432, -0.2302, +0.1799, -0.2867, +0.0938, -0.1178, -0.0551, -0.4497, -0.1970],\n[ -0.5875, +0.3584, +0.1795, -0.4308, +0.3049, +0.3270, +0.3174, -0.1452, -0.1415, +0.2994, -1.2432, -0.3681, +0.0554, +0.1826, +0.1509, +0.6676, +0.7488, +0.0322, -0.9482, -0.0708, -0.1230, +0.3096, +0.0785, -0.0805, -0.6251, +0.1364, -0.3964, -0.9179, +0.4554, +0.0184, +0.5066, +0.1493, +0.0249, -0.5184, -1.4056, +0.0259, +0.3481, -0.4445, +0.1686, -0.0919, -0.1951, -1.3686, -0.3031, -0.7120, -0.1386, +0.3186, +0.4143, +0.1699, +0.3723, -0.5573, +0.0541, +0.7135, -0.1683, +0.3588, +0.2727, -0.5074, -0.7353, -1.2056, +0.1675, -0.3630, -0.0839, +0.2155, +0.3176, +0.2199, +0.0528, +0.1867, -0.1459, -0.1960, -0.5922, -0.1418, +0.1251, -0.2371, -0.0244, -0.3642, -0.0267, -0.3112, +0.0506, +0.2327, -0.0061, +0.0202, +0.0529, -0.1410, -0.5588, -0.3405, -0.1945, +0.0048, -0.2415, -0.0087, +0.0536, -0.2444, -0.3201, -0.0885, +0.4312, -0.0041, -0.2366, +0.2506, +0.0255, -0.4700, -0.0447, +0.1439, +0.3104, +0.1605, +0.0736, +0.1862, -0.0253, -0.1050, +0.5843, +0.0937, -0.3332, -0.2927, -0.3305, +0.0606, +0.3630, -0.0441, +0.3914, +0.1469, -0.3773, -0.1831, -0.4460, -0.2917, -0.5101, +0.5178, +0.0635, -0.3741, +0.0059, -0.3863, -0.0546, +0.2805],\n[ +0.0601, -0.3253, +0.0195, -0.1363, +0.3744, +0.0671, -0.1641, -0.4333, +0.4887, +0.2348, -0.1369, -0.2670, +0.1986, -0.5604, -1.0478, -0.3187, -0.4200, -0.2033, -0.5111, +0.1593, +0.0223, -0.2550, -0.0922, -0.2716, +0.3623, -0.1694, -0.0972, +0.1189, -0.6272, -0.0722, +0.1894, -0.6329, -0.3364, -0.7210, -0.8415, -0.3291, +0.0917, -0.4014, +0.2275, -0.3271, -0.2019, -0.3149, +0.5368, -0.0693, +0.5510, -0.1304, -0.2988, -0.1306, +0.0359, +0.1579, -0.6640, +0.1081, -1.5365, -0.1597, +0.2577, -0.0664, +0.0757, +0.1524, -0.0956, -0.1541, -0.4604, -0.4904, +0.0975, +0.2428, -0.1907, -0.7102, +0.6586, -0.1683, -0.0257, -0.3399, -0.1199, -0.0780, +0.0957, -0.2709, +0.1472, +0.0477, +0.0115, -0.1198, +0.3382, +0.4367, +0.0999, -0.6750, -0.8398, +0.0322, +0.0039, -0.3979, -0.1288, -0.5003, -0.2096, -1.1291, -0.4065, -0.3172, +0.1936, -0.6337, -0.6576, +0.0531, +0.4156, -0.9388, -0.0643, -0.7279, -0.5641, -0.3065, +0.4290, +0.0895, -0.1019, -0.3363, +0.1544, -0.0767, +0.0565, -0.7275, -0.0629, -0.8995, -0.0352, -0.0069, -0.2988, -0.5636, +0.5480, -0.3188, -0.7048, -0.3809, -0.1963, +0.3920, +0.1438, +0.1053, -0.2073, +0.2638, -0.1706, +0.0799],\n[ -0.2158, +0.0953, +0.2846, -0.2392, -0.2678, +0.0783, +0.3179, -0.2224, +0.6093, -0.5543, -0.1057, -0.2840, -0.3915, -0.2513, +0.1723, -0.3655, +0.0223, -0.4514, +0.0781, -0.3668, -0.0650, -0.2735, -1.0161, -0.0027, -0.3664, -0.0702, -0.1575, -0.2282, +0.0369, -0.0807, +0.1153, -0.0076, -0.1323, +0.4017, +0.1480, +0.2383, -0.4470, +0.1887, -0.1691, +0.3031, +0.1124, -0.5467, +0.0554, +0.0814, -0.4822, +0.0195, -0.3758, +0.4181, -0.4839, -0.3081, +0.4140, +0.1578, +0.1257, +0.2257, -0.5583, -0.5895, -0.8581, -0.3002, +0.0568, -0.3166, -0.8867, -0.0909, -0.5856, -0.1936, -0.4399, +0.1872, -0.2999, +0.4525, -0.1103, +0.1011, -0.1481, -0.0094, -0.2643, -0.3380, +0.5789, -0.2954, -0.4386, -0.7334, +0.3065, +0.4677, -0.0934, +0.2365, +0.2516, +0.3897, -0.0673, -0.0134, -0.0030, +0.1351, -0.4304, +0.0894, +0.1702, -0.1998, -0.1700, -0.1687, +0.1565, -0.3954, -0.4844, -0.3194, -0.5078, +0.0651, +0.1964, +0.0848, -0.1559, -0.1095, -0.3068, -0.0296, -0.3526, -0.5551, -0.2015, -0.3554, -0.0166, -0.0325, -0.3315, +0.6864, +0.1491, +0.4219, +0.4374, -0.1169, -0.2291, +0.5127, -0.3610, -0.4966, +0.4471, +0.0802, -0.5580, -0.5791, -0.1772, +0.2302],\n[ -0.1076, +0.1499, -0.0961, -0.2042, +0.2166, -0.2431, +0.0062, -0.7276, -0.1863, -0.3058, -0.2379, +0.1921, -0.4037, +0.0759, -0.1372, -0.1939, -0.2092, -0.7652, -0.0190, +0.3077, -0.0457, -0.0271, -0.5684, +0.0415, +0.0116, -0.8223, +0.3359, -0.1225, +0.1816, +0.2140, +0.1602, -0.1146, -0.1266, -0.0036, -0.7904, -0.2256, -0.1559, -0.2645, +0.0860, -0.0661, -0.2869, -0.6140, -0.0998, +0.2239, -0.1742, +0.2176, +0.3107, +0.0136, -0.1963, +0.5052, +0.0713, -0.3807, +0.2308, -0.5190, +0.1811, -0.1509, +0.1385, -0.0837, +0.1361, -0.2535, +0.1506, -0.3724, +0.0996, +0.1964, +0.1111, -0.0517, -0.0306, +0.1187, +0.1788, -0.0407, -0.1487, -0.0854, +0.2843, -0.0753, -0.0068, -0.0077, -0.3471, +0.0351, -0.6159, -0.4226, +0.5148, +0.0071, -0.7155, +0.2349, -0.0137, +0.1020, -0.0478, -0.0962, -0.3094, +0.0115, +0.0320, +0.1118, -0.0642, -0.6710, -0.0404, +0.2625, -0.1370, +0.0298, -0.0184, -0.1681, +0.0445, +0.0099, +0.0830, +0.2498, +0.3416, -0.2665, +0.1916, +0.0037, -0.6989, -0.3035, -0.5451, -0.0226, +0.1792, +0.2112, +0.1199, -0.4196, +0.3648, +0.1852, +0.1674, +0.3833, +0.4238, +0.2341, -0.1482, -0.0028, +0.1002, -0.2465, +0.0602, +0.2140],\n[ +0.0213, +0.1944, -0.1501, -0.7898, -0.0269, -0.1696, -0.1227, +0.3991, +0.3180, -0.0466, +0.5871, +0.0320, +0.3256, +0.0336, -0.3113, -0.0198, -0.1346, -0.6108, -0.1094, -0.5958, -0.0453, -0.5481, -0.1500, -0.0589, +0.0894, +0.1282, -0.6472, -0.0027, -0.2055, +0.2224, +0.1027, +0.1256, -0.0334, -0.1769, -0.0391, -0.3949, +0.1055, +0.2503, -0.1800, -0.3791, -0.2318, -0.0518, -0.1476, +0.1280, +0.0072, -0.0926, -0.0673, +0.2434, -0.3668, -0.9051, -0.1701, +0.0967, +0.0836, -0.0320, +0.0400, +0.0664, -0.2545, -0.2088, +0.0896, -0.3086, -0.1286, -0.1372, +0.3918, +0.2618, -0.1957, -0.2447, -0.3575, -0.3319, +0.6881, -0.1629, +0.0188, -0.3465, -0.1682, +0.2730, -0.1074, -0.0268, -0.2431, +0.0457, +0.4783, +0.2920, -0.8347, +0.0237, +0.0183, +0.2163, +0.2028, +0.0512, +0.3901, +0.3220, +0.1953, +0.2893, +0.1298, +0.1812, -0.4022, -0.2563, -0.1943, +0.0630, -0.3236, +0.3854, -0.0505, +0.0693, +0.0277, -0.5653, -0.0168, +0.0141, -0.2540, -0.5629, -0.0544, -0.1690, +0.2695, -0.5674, +0.4644, +0.5105, -0.0195, -0.4197, +0.0466, -0.3659, +0.3466, -0.1359, -0.2359, -0.3740, +0.2405, +0.1225, -0.5024, +0.0566, -0.8196, -0.1433, +0.0337, +0.0528],\n[ -0.6809, +0.2662, -0.5515, +0.0274, +0.4504, +0.3657, -0.0369, -0.0298, +0.0075, -0.1811, +0.1554, -0.5186, +0.5404, -0.3754, -0.3679, -0.0857, +0.0986, -0.5003, +0.1381, -0.2794, -0.4324, +0.0690, -0.3677, +0.0815, -0.0896, -0.6115, +0.3429, -0.1858, -0.1057, -0.4543, +0.1465, -0.1958, +0.0853, -0.1470, +0.1513, -0.2316, +0.2884, -0.4537, -0.3710, -0.0103, +0.2550, -0.1489, -0.2394, -0.2326, +0.0281, -0.1502, +0.0989, +0.2860, -0.1225, -0.4216, +0.3528, -0.2252, +0.0390, -0.5138, -0.2122, -0.0263, +0.1364, -0.4858, +0.3086, +0.1359, +0.3356, -0.6935, -0.6898, +0.2928, -0.2900, +0.4967, +0.1789, +0.0129, -0.0865, +0.1469, +0.1938, +0.3798, +0.1427, -0.3017, +0.5246, -0.0486, -0.5537, -0.0919, -0.1737, +0.0380, -0.0290, -0.3807, -0.6776, -0.2915, -0.0784, -0.2607, +0.0507, -0.0996, -0.0567, +0.3834, -0.0326, +0.1360, +0.0460, -0.1078, +0.4804, +0.2062, +0.3488, -0.1236, +0.3503, +0.0557, -0.0188, +0.2796, -0.0616, -0.0295, -0.3695, +0.1613, -0.2866, +0.3682, -0.1529, -0.0211, -0.8208, -0.5679, +0.3730, +0.2508, -0.0373, -0.5967, +0.2355, -0.0826, -0.1477, -0.4036, +0.0439, +0.3214, -0.0763, +0.1182, -0.1230, -0.0080, +0.0365, -0.3488],\n[ +0.1278, +0.3544, -0.0588, -0.9237, +0.1073, +0.2965, +0.5606, +0.1852, -0.4272, -0.6675, -0.2859, -0.7539, +0.0184, +0.0534, -0.8694, -0.1548, +0.2093, +0.0162, -0.2391, -0.3411, +0.2604, -1.3084, +0.3698, -0.0768, -0.0322, +0.0754, -0.0044, +0.1771, -0.1628, +0.4309, +0.0774, -0.0721, -0.2629, -0.0708, -0.3502, -0.0600, +0.1463, -0.5095, -1.0829, -0.9172, +0.2253, +0.1346, -0.1799, -0.0397, -0.1397, +0.0269, -0.2786, +0.1675, -0.3110, +0.0059, +0.3807, +0.1454, +0.0373, -0.0626, -0.0482, +0.3647, -0.1477, +0.4116, +0.1811, -0.3096, +0.1492, -0.0770, -0.1550, +0.3186, -0.5793, -0.0217, +0.1216, +0.0893, -1.0605, -0.0276, -0.3422, +0.0822, -0.4680, +0.2439, -0.3746, +0.0006, -0.2558, +0.1921, -0.0572, -0.0718, -0.5186, +0.0136, -0.7412, -0.2912, -0.1026, -0.3082, -0.0322, -0.1574, +0.0616, +0.1399, -0.5695, -0.5556, -0.1188, +0.0646, -0.4050, +0.3945, +0.4077, +0.1311, -0.8293, -0.0814, -0.7487, +0.0789, +0.2741, +0.1798, +0.1800, +0.0221, +0.2849, +0.1241, +0.0981, -0.0291, +0.0349, -0.2348, -1.1890, +0.2165, -0.2498, +0.3480, -0.1510, -0.0254, +0.4350, -0.2413, -0.2154, +0.1620, +0.3549, +0.1094, +0.3039, -0.1670, -0.5215, +0.2193],\n[ -0.5056, -0.2717, -0.0887, +0.1180, +0.2221, -0.0533, +0.3635, +0.0107, -0.0155, -0.3851, -0.0780, +0.4512, -0.0052, -0.2470, -0.0436, -0.3644, +0.2348, -0.3132, -0.1388, -0.3696, -0.2221, -0.1212, -0.1152, +0.1401, +0.2334, -0.2287, -0.0463, +0.0870, -0.1798, +0.0395, -0.8861, -0.8117, +0.0519, -0.0148, -0.1086, +0.2235, -0.2144, -0.2035, +0.3049, +0.2782, -0.0790, -0.3271, +0.1502, +0.0752, +0.1874, -0.2961, -0.1831, -0.1052, -0.1899, -0.1599, -0.2232, -0.0929, +0.1634, +0.2954, +0.1783, +0.1850, +0.0323, +0.1821, -0.1331, -0.0664, +0.2822, -0.3062, -0.2497, -1.2594, -0.3746, +0.1056, +0.2596, +0.0462, +0.2250, -0.5202, -0.2983, -0.3262, -0.2862, -0.4389, +0.1809, +0.1076, +0.2707, +0.4548, -0.0305, -0.0603, +0.0805, -0.3495, -0.3292, -0.4320, +0.2417, +0.1606, +0.3576, -0.0476, -0.3210, +0.1739, +0.3417, -0.2769, -0.1329, +0.6614, +0.1064, -0.5903, +0.3824, -0.0035, -0.1594, -1.0897, +0.1392, -0.0113, -0.0069, +0.3172, +0.1027, -0.1457, -1.0550, +0.2104, -0.8771, +0.1007, -0.3465, +0.3272, +0.0404, +0.0174, +0.3485, -0.2760, +0.0764, -0.0402, +0.1597, +0.4512, +0.4985, -0.8222, -0.5071, +0.0934, -0.1479, -0.6469, -0.1912, +0.2672],\n[ +0.5416, -0.0218, -0.0531, +0.0453, +0.1252, -0.1311, +0.2916, -0.0341, +0.3304, +0.0958, +0.0507, -0.2015, -0.2878, -0.5418, -0.0068, +0.0552, +0.0864, +0.4119, +0.0150, -0.0622, +0.0781, -0.4440, -0.5189, +0.0631, +0.0681, -0.0632, -0.1314, -0.0119, -0.0225, -0.1634, -0.3359, +0.1974, -0.2654, -0.1948, -0.0087, +0.0303, +0.4607, +0.1021, +0.0181, -0.0214, +0.1044, +0.0861, +0.3070, -0.2432, +0.1628, +0.0187, -0.9277, +0.1928, -0.6063, +0.3662, -0.1792, -0.0239, -0.0632, -0.6483, +0.3550, -0.1499, -0.0971, +0.2099, +0.2688, -0.0530, +0.1171, +0.5035, -0.8688, -0.3405, -0.2476, +0.4547, -0.4105, -0.1177, -0.3137, +0.1011, +0.0825, -0.3973, +0.1754, +0.2616, -0.1194, -0.0725, -0.2332, -0.5832, -0.1741, -0.4962, +0.1793, -0.3603, +0.5548, -0.2168, -0.4409, +0.2611, -0.1732, -0.2258, +0.4478, +0.2887, -0.6937, +0.4469, -0.1434, +0.2411, -0.0069, -0.8774, -0.2686, -0.0973, +0.1061, -0.0011, -0.0416, -0.2424, +0.1575, +0.1909, -0.6973, -0.0013, -0.0070, -0.2964, +0.1380, +0.5137, -0.0704, -0.2486, -0.1062, +0.1285, -0.3033, +0.0060, -0.1700, -0.0587, +0.1942, +0.3036, +0.2824, -0.8177, -0.6379, +0.4813, +0.0342, -0.1333, +0.0801, +0.4477],\n[ -0.0211, +0.2675, +0.3353, -0.3372, -0.1233, +0.2106, +0.0731, -0.1431, -0.1459, +0.2807, -0.2799, -0.2038, -0.1239, -0.4392, +0.2931, +0.4137, -0.2620, -0.1753, +0.4271, -0.1496, -0.3291, +0.3312, +0.1065, +0.1967, +0.3853, -0.0131, +0.3213, -0.1285, -0.6470, -0.2297, -0.1128, +0.3138, -0.2046, -0.0544, +0.2242, -0.1240, +0.2349, -0.4463, -0.3117, -0.1976, -0.5940, +0.5098, +0.0314, +0.1411, -0.1443, -1.3227, -0.0915, +0.0622, +0.3047, -0.6741, -0.0068, +0.1571, +0.0197, +0.3689, +0.2350, -0.7219, -0.3110, -0.3136, +0.1014, +0.1670, -0.0109, -0.1335, -0.1780, +0.2741, -0.5067, -0.0343, -0.2735, -0.1306, +0.0644, -0.0421, -0.3508, -0.0239, -0.0895, +0.7201, -0.4332, -0.2279, +0.3636, +0.4006, +0.0258, +0.0039, +0.0240, +0.0969, +0.1108, -0.3942, -0.0191, -0.0833, -0.0460, -0.0104, -0.3314, -0.3009, -0.2580, -0.0891, -0.4601, +0.0273, -0.6587, +0.0187, +0.1647, +0.2728, -0.3057, -0.6602, +0.2252, +0.0499, +0.0645, -0.3887, +0.0085, +0.2773, -0.2943, +0.0106, -0.1876, +0.1680, +0.4481, -0.6070, -0.3804, +0.0686, -0.1384, +0.0941, -0.2830, -0.3419, +0.5389, +0.0071, +0.0309, +0.3877, -0.2346, -0.2765, +0.3154, +0.5432, +0.1597, -0.0707],\n[ -0.3307, +0.0719, -0.4069, -0.4818, -0.0336, +0.2526, +0.0127, +0.1715, +0.1634, -0.1317, -0.0556, -0.6635, +0.0385, -0.1778, -0.6021, +0.0995, -0.3965, +0.5307, +0.2085, +0.0421, -0.2732, -0.4179, -0.3875, -0.2914, +0.0900, +0.1833, +0.2219, -0.4152, +0.4455, -0.3731, -0.2920, -0.0412, +0.0676, -0.4575, -0.5457, -0.3103, -0.1316, -0.6066, -0.0151, -0.2290, -0.5575, +0.4713, +0.0446, +0.1031, -0.0860, +0.1306, -0.5180, -0.0828, -0.0814, -0.0137, -1.1862, -0.0610, -0.0665, -0.4811, +0.0875, -0.1226, -0.1726, +0.1026, +0.2877, -0.2522, -0.8006, +0.0242, -0.6489, -0.1083, +0.1570, -0.0922, -0.5692, +0.2164, -0.0050, +0.0594, +0.3049, -0.1859, -0.8103, -0.2647, -0.1506, -0.3981, -0.0826, +0.2345, +0.3153, -0.3429, +0.0491, -0.1324, -0.2087, -0.0861, -0.8154, -0.0180, +0.2558, -0.2299, +0.0994, +0.0263, -0.8396, +0.1073, +0.1554, -0.3760, +0.0870, -0.0737, -0.3097, -0.1488, -0.0324, -0.2025, +0.3044, +0.1957, -0.0953, -0.1347, -0.6328, +0.0435, -0.3573, -0.6667, -0.0919, -1.0430, -0.0192, -0.1639, +0.4036, -0.1974, -0.5038, -0.0258, +0.2064, +0.2843, +0.0147, -0.0562, -0.1918, -0.3606, -0.4848, -0.1829, -0.3000, +0.0925, -0.2469, -0.0005],\n[ -0.1075, +0.0796, -0.0946, -0.4344, +0.2005, -0.1093, -0.0906, -0.3207, -0.2558, -0.0439, +0.5002, -0.5107, -0.3569, -0.2400, -0.9696, +0.1216, +0.1939, -0.0901, +0.0303, -0.5634, -0.0879, -0.0478, -0.6500, -0.1287, -0.0194, -0.4115, +0.1487, -0.3913, +0.2391, +0.2528, +0.1589, +0.1568, -0.2328, -0.3401, -0.3910, -0.4641, -0.3274, -0.2689, -0.5790, -0.2490, -0.3726, +0.1373, -0.3168, +0.1565, +0.0834, -0.1442, +0.0261, +0.0699, -0.5031, +0.1383, -0.3060, +0.0458, -0.1907, -0.1535, -0.0182, -0.2781, -0.0593, -0.3813, +0.2376, +0.0838, +0.0307, +0.1134, +0.0186, +0.0102, +0.0446, -0.0612, +0.2964, +0.0673, +0.4004, -0.1744, +0.2707, -0.0061, +0.3257, -0.8955, -0.0757, -0.6494, -0.1417, -0.2841, +0.1827, +0.0669, -0.2435, +0.0048, -0.3185, -0.2073, +0.2819, +0.0463, -0.2587, +0.1456, -0.1462, -0.1835, -0.3942, +0.1628, +0.4768, -0.3747, +0.0178, +0.1807, -0.1081, +0.0570, -0.1036, +0.3498, -0.3699, +0.1021, -0.5282, -1.1311, -0.1090, +0.3226, -0.4719, -0.1089, -0.4208, +0.4415, -0.5734, -0.4619, +0.3115, +0.3256, +0.1768, -0.3252, -0.7296, +0.6043, -0.3889, +0.1948, -0.1121, -0.3825, -0.3001, +0.0001, -0.3872, -0.0690, +0.2413, +0.2725],\n[ -0.1956, +0.4824, +0.0299, +0.0923, -0.1255, -0.0784, +0.1399, -0.0119, +0.2045, +0.1789, -0.0844, +0.0612, +0.0869, -0.1022, +0.1522, +0.1175, -0.3761, -0.8891, +0.1135, -0.1164, -0.0650, -0.4960, -0.0575, +0.3796, -0.4186, +0.5628, -0.0502, -0.2575, -1.6787, -0.1565, -0.3031, +0.2404, -1.0437, -0.0959, -0.1703, +0.1407, +0.1496, +0.1953, -0.0319, +0.3103, +0.1761, +0.0003, -0.5419, -0.6587, +0.0537, +0.2545, -0.3464, +0.0076, -0.1283, -0.4792, +0.1090, -0.2502, +0.0810, +0.5328, -0.2368, -0.2101, -0.0410, -0.0465, -0.0782, -0.0789, -0.1115, +0.0534, -0.0064, -1.3387, +0.2155, -0.0807, -0.3716, -0.1701, +0.0104, +0.0665, -0.0696, -0.6764, -0.0896, +0.1293, +0.2936, -0.2979, +0.1135, +0.1420, +0.2191, +0.0649, -1.5397, +0.0106, -0.0295, +0.0041, +0.1493, +0.2680, -0.1745, +0.0864, +0.0223, +0.1098, +0.1272, +0.1090, -0.3073, +0.0846, -0.1849, +0.2400, +0.0479, -0.1117, -0.3259, +0.2560, -0.8979, -0.2436, -0.2723, +0.0895, +0.2896, +0.5229, +0.3580, +0.2622, +0.1865, -0.0928, -0.1333, -0.0331, -0.2025, +0.0284, +0.0933, -0.4256, +0.1458, -0.0047, +0.2814, -0.5681, -0.1660, -0.0740, -0.3705, -0.1294, -0.2887, -0.1671, -0.3191, +0.0041],\n[ -0.0429, -0.2979, -0.8061, +0.0274, +0.1359, -0.0807, +0.2634, -0.1513, -0.1274, -0.0166, -0.2593, -0.0043, -0.3913, -0.4622, -1.0735, -0.2252, -0.3954, -0.2352, +0.1754, -0.3066, -0.5369, -0.2073, -0.0621, -0.2404, +0.1481, +0.1076, -0.2903, +0.0644, +0.3461, -0.8494, -0.2225, -0.4445, +0.0074, +0.1606, -0.2494, +0.1894, -0.4898, +0.2003, -0.8726, +0.1727, +0.4279, +0.0250, -0.2323, -0.4568, -0.9386, +0.4602, -0.0544, -0.6053, -0.3673, -0.2031, -0.2869, -0.0142, -0.1154, -0.0311, +0.3106, -0.1357, -0.0287, -0.6635, +0.0821, -0.7078, -0.8762, -0.4313, -1.1641, +0.1237, -0.2846, -0.7949, -0.8328, +0.1506, +0.3018, -0.0569, +0.3055, -0.2692, -0.8944, -0.6283, +0.0082, +0.1982, +0.2366, -0.2263, +0.1205, -0.3662, -0.0564, -0.8527, -0.0458, -0.2136, -0.8190, +0.3705, +0.1591, +0.0972, +0.1972, +0.1188, +0.1552, -0.0021, -0.3104, -0.0915, +0.0861, -0.0970, -0.1669, -0.2250, +0.1361, -0.2040, -0.1728, +0.0874, -0.4176, -0.0232, +0.2895, -0.3241, -0.2774, -0.6778, +0.0860, -0.1810, -0.2635, -0.3905, +0.2894, -0.5524, -0.5900, +0.0042, -0.1872, +0.0342, -0.1982, -0.3431, -0.2942, -0.1713, -0.3947, -0.0634, -0.2148, -0.3236, -0.6599, -0.4746],\n[ +0.1139, +0.0650, -0.2456, -0.1315, +0.1407, +0.0788, -0.1801, +0.4116, +0.0631, +0.3614, +0.1779, +0.2011, +0.1047, +0.1695, +0.0276, -0.1528, -0.1153, -0.3329, +0.1846, -0.2067, -0.1040, -0.3134, -0.0784, -0.0427, +0.2053, +0.1852, -0.2523, +0.0244, -0.1028, -0.4057, -0.4726, -0.4021, +0.1813, -0.1851, +0.1457, -0.4243, +0.2401, -0.1664, +0.4241, +0.1109, +0.0009, +0.4179, -0.0435, +0.0391, -0.1372, +0.1172, -0.4394, +0.2111, -0.4688, +0.0953, -0.8070, -0.0438, -0.1590, -0.5387, -0.0352, +0.1419, +0.3123, +0.4866, -0.3642, -0.1444, +0.2279, +0.1875, +0.2849, -0.0600, +0.4420, +0.3253, -0.2210, +0.0820, +0.0518, +0.2397, -0.6165, +0.2275, -0.0655, -0.5054, +0.1406, +0.0747, -0.2759, +0.2004, +0.2437, -0.9419, +0.1028, +0.0414, +0.1167, +0.0623, +0.2964, -0.1441, -0.2434, -0.0737, -0.0669, +0.4142, -0.3767, -0.4335, -0.2037, -0.4858, +0.1892, -0.0506, +0.1072, +0.0308, -0.0045, +0.1688, +0.0354, -0.1328, -0.0729, +0.1582, -0.7064, -0.2073, +0.1505, +0.1551, -0.4480, +0.3366, -0.0612, -0.4353, +0.0585, +0.0633, -0.0932, -0.0445, +0.0806, +0.1299, -0.0159, +0.3052, +0.2307, -0.1101, -0.0202, +0.0697, -0.5824, -0.0746, +0.4108, -0.0447],\n[ +0.0791, +0.1980, -0.7039, +0.0263, +0.2395, +0.6237, +0.2502, -0.5161, +0.2494, -0.2117, +0.1475, +0.0525, -0.4173, -0.3249, +0.2539, -0.1223, -0.0348, -0.2279, +0.2262, +0.0258, -0.3077, +0.2887, +0.3885, +0.1906, +0.2463, +0.3811, -0.6282, +0.0522, +0.5306, -0.4458, -0.1445, -0.3747, -0.0662, +0.2248, -0.1537, -0.2953, +0.0561, -0.2883, -0.5619, +0.0355, -0.2044, +0.1899, -0.0981, -0.2455, +0.0141, +0.0599, -0.0083, +0.0112, -0.2022, -0.2484, -0.4551, -0.3796, +0.0054, +0.0155, -0.1069, -0.0031, -0.4725, +0.2876, +0.0632, -0.6091, +0.0202, +0.0060, +0.4156, -0.3036, -0.0778, +0.2482, -0.8074, +0.2175, -0.2130, +0.2723, +0.1917, -0.1859, +0.3197, -0.1314, -0.1503, +0.1386, +0.1615, -0.5322, -0.4531, -0.2186, -0.0994, -0.4077, +0.4061, -0.0434, -0.2415, -0.6183, -0.0757, -0.2556, -0.2224, +0.0449, +0.4767, -0.0516, -0.1578, -0.5435, -0.3362, +0.5258, -0.0355, -0.0297, +0.3339, +0.0571, -0.1741, +0.4465, +0.2506, -0.6265, -0.4877, +0.2635, -0.1913, -0.0469, +0.2758, -0.1095, +0.1190, -0.0785, +0.0585, +0.3044, -0.4372, -0.4773, -0.4492, +0.2806, -0.5223, +0.4161, -0.3399, +0.0181, +0.0459, +0.0355, -0.2409, +0.3043, -0.5086, -0.3526],\n[ +0.3889, -0.1236, +0.3455, +0.0586, +0.1031, -0.0423, +0.2450, +0.3118, -0.0065, -0.2184, -0.1482, -0.3720, +0.2949, +0.2279, -0.0590, -0.1256, +0.0081, -0.0231, -0.0424, -0.0446, -0.0472, -0.0739, +0.0330, +0.2615, +0.0390, +0.0315, -0.8050, -0.0950, -0.0147, -0.0005, -0.4977, +0.1281, -0.4847, -0.0590, +0.2566, +0.0080, +0.3070, +0.0687, -0.3527, -0.0516, -0.0147, -0.1813, +0.3453, -0.0237, +0.1650, -0.5538, -0.2602, -0.4506, +0.0339, -0.2247, +0.0678, +0.3342, -0.1982, -0.0218, +0.1906, +0.1715, +0.0353, -0.0505, +0.3488, +0.1096, +0.1575, -0.1131, -0.0731, +0.0549, +0.2869, +0.0115, +0.5333, -0.3713, -0.3464, +0.1477, -0.0973, -0.5280, +0.0714, +0.2884, +0.1430, +0.1944, +0.2250, +0.2365, +0.3119, +0.0493, -0.2922, +0.2082, -0.2683, -0.2334, -0.0036, -0.1826, +0.0553, +0.0514, +0.0699, +0.4359, -0.0839, -0.1283, -0.5059, +0.0360, +0.4593, -0.2900, +0.3191, -0.0913, -0.0334, -0.0426, -0.2070, -0.0938, +0.1743, -0.0897, +0.0871, +0.0905, -0.4720, -0.0107, +0.3723, -0.0417, -0.1012, -0.0172, -0.3901, -0.3876, +0.1151, +0.0425, -0.3587, +0.0249, +0.0674, -0.2577, +0.1353, -0.1724, +0.1267, -0.2325, +0.0940, +0.4211, +0.2862, -0.4374],\n[ -0.3170, +0.3755, -0.0338, -0.6477, -0.1268, +0.4264, -0.2412, -0.3584, -0.4816, -0.0234, -0.4274, -0.0597, +0.4883, -0.4695, -0.0844, +0.2464, -0.1324, -0.1116, +0.3023, +0.2658, +0.1502, +0.0145, -0.1165, +0.0199, +0.0481, +0.2250, +0.2717, +0.0514, -0.5709, -0.8578, -0.5535, -0.1945, -0.1825, -1.0669, -0.0163, +0.2724, -0.1275, +0.4192, -0.0253, -0.0427, -0.1992, +0.2067, +0.3362, +0.0737, -0.8661, -0.2856, -0.2030, +0.0765, -0.1862, -0.7112, +0.0424, +0.3103, -0.0769, +0.2623, +0.1757, +0.0937, +0.2951, +0.0081, -0.2650, +0.4250, -0.0401, +0.0961, +0.4578, -0.1580, +0.2566, -0.1072, +0.0640, -0.2257, +0.1407, -0.2923, -0.0112, -0.3022, -0.2181, -1.1515, +0.5188, +0.1174, +0.2920, -0.1971, +0.0040, +0.0855, -1.0107, -0.2752, -0.3839, -0.2948, -0.0194, +0.2019, -0.4102, -0.3485, -0.4294, +0.1244, -0.0475, +0.0760, -0.0122, +0.0635, -0.4242, +0.1172, -0.1831, +0.2975, -0.0734, +0.0206, -0.6166, +1.0285, -0.8004, +0.2025, -0.3087, +0.0608, -0.0776, +0.4676, -0.6982, +0.0874, +0.3236, -0.1107, -0.2018, +0.0993, -0.1123, +0.5077, -0.2170, -0.8204, -0.2160, -0.1872, +0.6235, +0.2099, +0.0830, +0.5081, -0.3071, -0.3446, -0.2227, -0.3825],\n[ -0.1874, -0.1186, -0.6802, +0.0035, +0.1394, -0.3427, +0.1561, -0.0044, -0.4163, -0.6432, +0.0009, +0.0743, +0.0019, +0.3348, -0.1833, +0.0599, -0.0237, -0.8147, -0.5931, +0.0681, +0.3820, -0.4131, -0.5803, +0.5772, -0.1047, -0.1667, -0.0174, -0.4283, -0.0115, -0.2094, +0.3176, -0.4656, +0.1398, -0.2553, +0.3680, -0.2766, -0.0749, +0.3381, +0.6150, -0.5616, +0.5847, -0.2923, -0.0928, +0.2362, -0.4309, +0.2479, +0.3509, +0.0306, -0.0601, -0.0772, -0.4807, -0.0940, +0.0597, +0.2288, -0.2230, -0.1378, -0.0027, +0.1361, +0.1256, -0.0968, -0.4223, -0.1213, -0.0766, -0.3987, +0.4523, +0.1027, +0.0623, +0.1629, +0.0932, -0.1068, -0.0629, -0.2282, -0.1594, -0.2082, -0.4811, +0.2055, +0.3540, +0.0748, +0.1092, -0.3655, -0.1293, +0.5093, +0.2215, -0.3072, +0.2241, -0.0072, -0.2497, -1.0213, +0.1769, -0.1960, -0.5724, -0.1859, +0.2190, -0.4151, +0.4209, -0.1399, -0.2326, +0.1927, +0.1507, -0.3548, -0.2342, +0.3334, -0.4695, -0.0160, +0.0182, -0.0142, +0.0374, -0.4114, -0.7137, -0.0206, -0.9733, -0.2071, +0.1435, +0.0653, -0.2332, -0.5594, -0.0698, +0.1158, +0.3619, +0.0128, -0.1712, +0.5550, -0.0249, -0.2328, -1.0144, +0.0697, +0.2297, -0.0154],\n[ +0.1416, +0.2309, +0.1272, +0.3941, -0.1264, +0.0519, +0.0873, +0.1319, +0.2100, +0.5420, +0.0346, +0.1919, -0.0527, +0.2655, -0.0654, +0.0043, +0.3115, +0.4582, +0.3955, +0.0363, -0.3897, +0.2471, -0.1910, -0.3071, +0.0255, +0.1280, -0.0897, +0.4849, +0.1039, -0.2925, -0.0640, +0.1888, +0.1011, -0.2415, +0.3240, -0.7261, +0.4590, -0.3333, +0.2433, +0.1094, -0.1727, -0.0460, -0.2958, -0.1053, -0.0891, -0.0464, +0.0229, -0.1550, -0.2716, +0.0343, +0.0671, +0.0667, -0.4367, +0.2242, +0.0838, +0.1480, -0.1503, +0.1468, -0.0787, +0.1684, +0.5497, +0.3268, +0.0520, -0.3957, +0.3271, +0.0407, -0.0103, -0.6582, -0.4209, +0.2281, +0.2063, +0.1261, -0.0853, +0.1172, +0.2424, +0.2464, -0.3707, +0.2101, +0.1751, +0.1536, -0.3106, -0.2551, -0.3719, -0.4328, -0.5526, +0.3865, -0.0679, +0.4122, +0.0144, +0.5162, +0.3118, -0.0527, -0.5021, -0.0328, -0.0653, +0.2858, +0.3345, -0.1361, +0.2132, +0.0117, +0.1889, -0.1802, -0.0858, +0.2928, -0.3164, +0.0148, -0.3775, -0.1734, +0.0018, +0.2143, +0.2750, -0.4583, -0.3690, +0.0197, -0.0628, -0.0013, -0.3150, -0.0254, +0.1141, +0.1115, -0.0689, +0.2310, -0.3745, -0.1398, -0.5079, +0.0076, +0.1439, -0.2340],\n[ -0.0705, +0.2175, -0.3332, -0.1569, -0.1880, -0.0037, +0.1646, +0.1630, -0.6191, -0.8408, -0.3053, -0.1745, +0.0026, +0.4307, -0.4499, -0.1112, -0.0265, +0.2433, -0.0564, +0.2633, -0.2392, -0.0076, -0.3069, -0.4411, +0.1402, -0.6455, +0.3210, +0.0260, -0.3119, +0.5672, -0.0835, -0.5722, -0.6016, +0.1153, +0.1611, -0.7369, -1.3666, +0.0773, -0.0642, -1.0518, +0.0271, +0.1978, -0.2372, +0.6399, +0.0095, -0.1786, -0.8306, +0.1600, +0.0155, -0.0362, -0.3337, -0.6398, -0.5897, -0.0730, +0.2387, -0.2654, +0.2886, -0.0430, -0.1995, +0.8904, +0.4374, -0.5649, -0.1457, +0.4233, +0.5757, -0.1515, -0.6817, +0.2119, -1.2438, +0.0547, -0.8169, -0.2639, +0.0272, -0.0893, +0.2654, -0.0196, +0.2261, -0.5399, +0.5351, -0.0945, -0.2289, -0.8261, +0.4079, +0.3965, -0.3613, -0.3296, -0.1655, -0.3622, +0.4279, -0.0974, -0.6180, -0.2898, +0.2287, -1.0311, +0.5108, -0.5468, -0.5000, -0.0666, +0.6999, +0.0559, +0.0413, -0.5421, -0.3508, +0.6266, -0.4022, -0.8094, -0.6987, -0.0470, -0.2295, -0.0360, +0.0580, -0.6328, -0.5165, -0.6991, -0.1251, -0.3032, -0.8044, -0.0751, +0.1579, +0.2006, +0.4040, -0.0811, +0.3109, -0.0211, -0.1546, +0.1875, -0.0905, +0.0186],\n[ -0.1124, -0.0259, -1.0389, -0.2415, -0.1909, -0.0094, +0.1439, -0.2701, -0.2011, +0.1145, +0.1781, +0.0553, -0.2120, -0.2954, +0.3016, -0.0922, -0.3145, +0.2040, +0.0703, -0.2844, -0.0642, +0.4545, +0.0644, -0.1845, -0.0994, -0.8798, -0.1192, -0.5188, +0.0076, -0.2705, +0.1672, +0.4707, -0.0896, +0.3602, -0.4464, -0.1122, +0.1324, -0.1451, +0.1014, +0.0307, -0.9547, +0.0565, -0.1124, -1.5838, +0.0327, -1.1211, -0.2453, +0.1526, -0.2564, -0.0585, +0.2328, +0.0004, +0.0644, +0.4235, +0.3800, -0.0384, +0.0893, +0.0122, -0.4679, -0.2261, -0.4993, +0.4685, -0.0971, -0.1161, -0.1099, -0.1286, -0.3552, -0.5843, +0.0891, -0.7263, -0.2820, -0.0843, -0.2338, +0.0958, -0.4697, -0.1636, +0.2075, -0.1443, -0.0772, +0.1051, -0.6447, -0.6946, -0.6446, +0.1869, -0.0377, +0.2761, +0.2183, -0.2731, +0.4264, -0.0304, -0.3574, -0.1569, +0.0530, -0.2232, +0.5197, -0.2320, +0.1347, +0.2012, +0.4305, +0.5680, +0.0232, +0.0364, -0.0573, -0.0205, +0.2564, +0.1661, -0.2184, -0.0759, -0.1445, -0.0086, -0.0486, -0.3832, -0.5249, +0.3752, -0.1400, -0.4232, -0.5390, +0.1935, -0.4422, +0.5784, +0.2731, -0.1560, -0.2261, -0.1864, -0.3307, -0.4695, +0.1852, -0.3265],\n[ -0.6703, +0.0926, -0.5231, -0.0533, -0.1457, -0.4545, +0.1801, +0.3127, -0.2343, +0.3361, +0.3834, -0.7315, +0.0778, -0.3351, +0.4405, -0.4576, -0.5709, +0.2803, +0.0452, +0.0133, -0.2356, -0.6245, -0.0676, -0.2523, +0.0208, -0.5238, -0.9113, -0.2605, -0.4297, -0.5623, +0.4107, -0.0113, +0.1714, -0.4200, -0.5485, -0.0135, -0.5424, -0.9021, +0.4813, -0.0478, -0.0835, +0.2108, +0.1599, -0.2998, -0.1875, -0.1832, +0.2381, -0.3613, +0.0165, +0.2614, -0.0388, +0.0995, +0.0900, -0.3565, -0.0507, -0.4172, -0.4961, -0.0002, -0.2006, +0.3034, +0.1398, +0.3606, +0.1704, +0.0951, -0.8070, -0.0367, -0.2107, +0.3901, -0.2230, -0.1858, +0.0469, +0.3347, -1.5453, +0.0425, +0.3040, +0.7134, -0.7763, -0.1766, -0.3002, -0.5227, +0.0830, -0.1727, -0.4539, -0.1433, -0.0327, +0.4274, +0.6077, -0.0714, -0.0844, -0.1670, -0.1059, -0.1060, +0.2655, +0.3415, -0.0698, +0.0529, -0.6991, -0.0675, +0.2832, -1.2297, +0.1104, -0.1522, -0.0114, +0.0321, -0.8254, +0.2382, -0.1540, -0.4243, -0.5772, +0.2560, +0.0717, +0.5933, +0.1687, +0.5182, -0.2102, +0.0605, -0.0974, -0.6947, -0.5887, -0.1060, +0.1320, -0.5905, -0.0760, +0.1181, +0.3569, -1.1210, -0.1727, -0.3395],\n[ -0.1772, +0.3237, -0.4493, -0.3765, -0.0837, -0.1910, +0.0285, +0.0285, -0.0667, -0.3530, -0.4776, +0.1161, +0.1717, +0.0996, -0.2512, +0.1541, +0.1372, -0.4701, +0.3240, +0.2725, +0.3984, +0.2341, -0.1772, -0.0638, -0.9732, -0.2302, +0.1441, +0.2924, -0.0501, -0.1822, +0.2711, -0.4964, -0.2970, -0.0056, -0.3456, -0.2073, +0.2572, +0.1298, -1.3292, +0.1337, +0.3027, +0.1671, +0.4342, -0.2896, +0.1945, -0.0109, -0.0791, +0.4136, -0.3764, +0.1054, -0.0079, -0.9551, +0.1080, +0.1019, +0.0474, -0.6121, +0.0449, +0.0598, -1.1097, +0.0250, -0.1256, -0.1458, -0.2722, +0.1970, +0.0081, +0.0660, -0.1319, +0.2402, +0.1193, -0.2687, +0.1651, -0.0789, -0.0503, +0.1427, +0.1901, -0.4159, -0.1638, +0.0628, -0.3589, -0.0816, +0.2290, +0.0615, -0.0529, +0.5482, -0.2030, -0.1866, -0.1962, +0.1167, -0.0306, -0.0570, -0.5030, +0.2395, -0.0310, +0.1479, -0.4245, +0.1024, -0.1648, +0.0096, +0.4198, -0.0484, -0.4378, -0.1204, +0.0451, -0.1932, +0.1844, +0.0525, -0.2658, +0.2161, +0.0031, +0.0196, -0.4107, -0.0498, +0.0715, +0.0926, -0.4154, -0.8397, -0.0224, -0.0883, +0.0899, -0.2138, +0.3100, +0.0488, +0.1945, -0.2503, +0.1935, -0.2487, +0.1230, +0.1808],\n[ +0.0978, +0.2190, -0.0593, -0.2084, +0.1471, +0.2789, +0.1923, +0.0510, -0.1054, +0.0873, -0.3996, -0.3414, -0.1372, +0.4061, +0.4627, -0.3202, -0.4714, -0.9601, -0.4819, -0.0822, +0.2082, +0.4049, -0.3494, -0.2458, +0.1077, +0.0142, +0.2404, -0.2345, -0.1056, -0.2140, -0.2043, -0.0387, +0.2045, +0.0874, -0.2327, -0.2445, -0.2904, -0.5968, -0.2412, -0.0058, -0.2166, -1.6990, +0.0953, -0.4337, +0.2621, +0.0671, +0.2875, +0.2830, -0.0937, -0.1816, -0.1853, -0.2221, -0.1232, -0.2037, -0.5923, -0.1751, -0.5312, +0.0452, +0.0293, -0.1305, -0.6181, +0.3799, +0.0910, +0.2810, -0.1294, -0.3590, -0.0479, +0.5384, +0.4292, +0.2362, +0.0533, +0.3635, +0.1747, -0.0539, +0.2116, +0.1162, -0.1256, -0.1487, +0.3834, -0.1898, -0.6399, +0.3572, -0.2274, -0.3151, -0.5312, +0.2602, +0.4891, +0.5756, +0.1359, +0.0022, -0.0719, +0.1784, -0.1273, -0.1417, +0.0633, -0.0857, -0.0686, -0.1350, +0.0348, +0.0844, -0.3420, +0.4479, +0.0563, +0.3900, -0.0671, -0.2740, -0.4127, +0.2551, -0.1971, -0.2538, -0.7251, +0.1549, -0.4616, +0.0078, -0.1966, -0.2291, +0.4462, -0.0231, +0.1826, -0.2767, +0.2035, +0.2113, +0.2010, -0.1224, +0.0935, +0.0749, +0.5143, -0.2320],\n[ +0.4054, +0.2429, -0.0183, -0.0671, +0.0756, -0.3977, -0.0052, +0.3820, +0.1232, -0.6040, +0.1117, +0.1694, +0.0706, -0.0372, -0.3751, -0.1524, +0.1442, +0.2963, +0.3444, -0.1264, -0.2681, +0.2256, -0.1735, +0.1759, -0.1471, -0.0931, -0.1282, -0.0297, -0.4555, +0.1279, -0.3330, -0.0396, -0.2031, +0.0297, -0.2009, -0.4507, +0.1667, -0.0241, +0.0708, -0.0017, +0.4077, -0.4664, +0.1849, +0.2298, +0.0304, -0.1883, -0.1395, -0.4955, -0.3710, -0.1001, +0.2081, -0.1213, +0.1483, -0.0164, -0.0292, -0.2213, +0.0290, -0.1306, -0.7133, -0.2339, +0.0027, +0.3779, -0.1889, -0.4761, +0.0219, -0.3321, -0.0823, -0.3801, -0.4626, +0.0792, -0.1393, +0.0571, +0.2609, +0.5688, +0.1841, -0.1736, +0.4979, -0.2646, -0.1537, -0.0628, +0.4625, +0.1443, +0.0340, -0.2156, -0.1324, +0.1748, -0.0408, +0.2963, -0.0362, -0.0399, -0.2844, -0.1848, +0.0143, +0.2672, +0.0449, -0.0820, +0.2961, +0.0566, +0.1471, +0.0741, -0.1360, -0.3407, +0.1218, +0.2081, +0.3921, -0.1527, -0.3588, +0.1095, +0.5187, +0.1537, +0.3488, +0.1492, +0.1520, +0.0071, +0.0351, -0.4850, +0.0759, -0.1150, +0.1143, -0.2452, +0.1051, -0.3250, +0.3133, +0.1014, +0.4137, +0.0760, -0.0911, -0.1412],\n[ +0.2362, +0.1948, -0.0234, -0.3180, -0.3947, -0.4324, +0.0409, +0.3256, +0.3289, +0.2192, +0.5133, -0.0905, -0.1503, -0.2234, -0.1854, -0.1960, +0.3640, +0.5452, -0.2586, -0.2272, -0.4349, -0.4656, -0.1711, -0.6232, -0.0941, -0.0505, -0.0344, +0.4293, +0.0385, +0.5727, +0.1007, +0.5269, -0.1729, -0.1460, +0.0340, +0.0289, -0.1339, -0.5340, -0.2329, +0.0951, -0.0265, +0.0880, -0.1202, -0.2510, +0.5979, -0.3229, +0.1184, +0.0523, -0.4103, -0.3743, -0.0977, -1.0859, -0.6775, +0.3640, -0.8403, +0.0808, +0.1736, -1.3813, -0.3318, -0.0426, +0.0144, -0.0157, +0.3430, -0.2677, -0.9526, -0.1665, -0.5873, +0.5943, -0.0056, +0.1360, -0.6386, -0.0068, -0.3385, +0.3513, -0.3315, +0.2062, -0.0138, -0.0954, +0.3514, -0.2501, -0.1372, +0.1850, +0.3187, +0.2417, -0.0165, -0.0188, -0.4274, -0.6362, -0.0069, -0.2832, +0.2678, +0.1463, -0.2544, +0.1819, -0.1543, +0.5009, -0.0178, -0.3187, +0.2966, +0.2476, +0.2248, +0.1764, -0.3332, -0.4743, -0.0223, -0.1946, +0.3361, +0.3132, -0.4011, -0.0480, -0.2854, +0.0791, +0.3655, -0.0550, -0.3133, -0.0224, +0.3137, +0.0587, -0.0012, -0.0146, -0.9586, -0.4508, -0.3847, +0.7080, -0.1266, -0.0618, +0.2187, +0.1620],\n[ -0.2008, +0.1522, -0.5955, +0.6051, +0.1620, +0.0268, +0.1630, +0.2391, +0.0104, +0.1939, +0.0231, -0.2313, +0.0025, +0.1624, -0.3330, -1.0833, +0.0795, +0.4136, -0.1967, -0.1655, -0.4112, -0.5563, +0.0014, +0.2210, +0.0668, +0.0035, +0.0021, -0.3850, +0.0200, -0.1434, +0.2206, +0.4751, -0.2284, -0.3879, -0.4084, -0.3560, -0.4632, -0.4490, +0.4050, -0.1352, +0.6847, -0.1648, -0.0965, -0.1461, -0.4165, +0.1370, +0.1259, +0.4660, -0.2856, -0.2612, -0.0582, -0.3693, -0.1022, +0.2008, +0.0215, -0.0236, +0.1790, -0.6251, -0.2491, -0.3790, -0.3333, -0.4391, +0.3633, -0.2411, +0.1885, +0.4584, +0.0208, +0.1701, +0.1982, -0.0979, +0.4661, +0.0457, +0.1591, +0.1819, +0.0283, +0.4571, -0.5580, -0.1806, -0.3569, -1.1665, +0.0674, -0.2786, -0.2875, +0.4527, -1.1245, +0.3730, +0.1160, +0.1188, -0.2990, +0.0239, -0.6229, -0.2620, -0.3014, -0.4404, -0.0490, -0.3869, -0.6526, -0.5756, -0.8867, -0.7128, -0.0526, +0.0577, -0.3834, +0.0532, -0.0304, +0.0433, +0.1128, -0.6711, +0.4782, -0.0905, -0.5573, -0.4746, -0.2918, +0.4604, +0.6131, -0.1770, -0.2956, +0.3240, -0.4591, +0.0128, -1.1714, -0.1943, -0.0942, -0.0663, +0.0099, +0.3937, +0.0239, +0.2766],\n[ +0.0170, +0.1780, -0.4973, -0.0716, -0.2169, +0.4612, -0.1626, -0.4549, +0.0101, -0.2997, -0.1466, -0.4529, +0.4533, -0.2593, -0.1703, -0.0386, -0.1084, +0.2293, +0.2698, +0.1456, +0.4082, +0.0162, -0.2304, -0.1280, -0.6106, +0.1895, +0.3912, +0.1475, +0.0724, -0.0967, -0.0013, -0.5470, +0.2986, -0.1856, +0.2783, -0.0052, -0.0829, +0.4755, -0.0281, -0.3022, -0.3180, -0.2155, -0.0571, -0.4241, +0.2273, -0.1250, -0.2180, -0.1371, +0.2904, -0.1918, +0.0821, -0.0508, +0.0111, +0.4068, -0.2078, -0.2357, +0.1265, -0.3957, -0.2841, -0.1312, +0.0499, -0.1635, +0.5007, -0.1800, -0.1760, -0.2792, +0.3352, +0.2681, +0.0224, +0.0703, +0.0863, +0.4539, +0.3918, -0.2057, -0.0448, +0.4148, -0.4861, +0.0167, -0.2710, +0.2207, -0.0376, +0.1143, -0.0410, +0.2689, +0.2591, -0.1383, +0.0083, -0.0713, +0.4651, -0.0582, -0.8496, -0.5416, -0.0562, +0.0845, +0.3523, +0.0582, -0.0935, -0.9280, +0.2553, -0.1722, -0.1497, -0.0048, +0.1228, -0.0553, -0.0358, -0.0211, -0.2331, -0.0881, +0.6748, +0.1664, -0.2651, -0.3012, +0.0718, +0.2363, -0.2371, +0.2396, +0.0795, +0.1391, +0.0848, -1.2251, -0.2978, -0.3685, -0.2829, -0.2450, +0.3609, +0.3417, +0.1501, +0.3238],\n[ +0.0900, -0.2040, -0.3442, -0.0759, -0.0008, -0.1255, -0.4207, -0.1525, +0.0554, -0.0316, -0.0605, +0.0019, -0.1811, -0.4385, +0.2662, -0.0091, -0.2444, +0.4792, -0.1383, -0.0188, +0.0034, +0.0720, +0.0144, -0.0678, -0.1145, -0.3357, +0.0574, -0.0022, -0.2551, +0.2565, -0.2532, -0.5748, +0.0039, +0.2820, +0.0501, -0.2402, +0.2779, -0.3452, +0.0553, +0.2286, +0.1530, -0.2965, -0.0433, +0.2779, +0.2508, +0.2410, -0.4465, -0.3985, +0.3084, +0.3646, -0.1586, -0.2752, -0.1026, -0.1534, +0.0080, -0.1443, +0.0291, -0.0555, +0.4796, -0.3250, +0.0324, -1.3958, -0.1115, -0.4174, -0.8049, -0.1630, -0.0525, -0.1305, +0.0095, -0.0867, +0.6708, -0.0000, -0.0843, -0.0231, +0.0889, +0.3775, +0.0640, -0.2378, -0.5108, +0.5505, +0.2033, -0.0100, -0.6312, +0.2917, -0.0302, -0.6544, -0.0306, -0.1413, +0.0555, +0.0217, +0.0254, +0.1307, -0.3287, -0.2562, -0.5944, +0.1274, -0.2151, +0.1284, -0.1192, -0.0702, +0.0741, -0.4227, +0.3265, +0.4678, +0.3035, +0.0018, +0.3776, -0.1378, -0.1743, +0.3270, +0.2135, +0.1307, +0.3493, +0.0399, -0.0457, -0.2329, +0.0670, +0.1151, +0.1365, -0.0620, -0.2110, +0.3270, -0.0685, -0.1888, +0.2463, +0.0731, +0.2472, -0.1711],\n[ -0.6258, +0.0343, -0.4289, -0.0230, +0.1648, -0.2181, -0.0405, +0.2619, -0.3522, +0.2752, -0.2303, -0.8017, +0.0875, -0.5309, -0.0828, +0.0963, -0.0591, -0.1077, -0.5009, +0.0188, -0.3606, +0.1645, -0.4699, -0.0852, -0.0179, +0.1350, -0.1510, -0.3459, -0.0844, -0.3181, +0.2630, -0.4920, +0.0747, +0.1878, +0.1468, -0.1660, +0.3766, -0.7024, -0.1669, -0.3311, -0.0016, -0.4786, -0.0991, -0.1425, -0.0974, -0.2058, -0.1966, +0.2934, -0.3269, -0.3688, +0.1155, -0.1031, -0.1258, -0.4646, +0.2674, +0.1911, +0.4869, -0.1837, -0.5085, -0.5208, -0.1521, -0.0067, -0.0239, -0.2621, -0.3033, -0.5556, +0.1006, -0.0904, -0.0944, +0.0145, +0.3106, +0.1368, -0.0789, +0.0792, +0.2908, +0.2396, +0.3105, -0.6561, +0.1680, -0.1279, -0.2308, -0.2565, +0.4570, +0.5447, -0.2268, +0.1352, -0.3080, -0.1836, +0.4647, -0.3035, +0.0227, -0.0715, -0.0236, -0.2222, +0.0024, +0.1746, +0.5088, -0.3626, +0.1088, -0.4604, -0.1205, -0.0737, +0.2408, -0.4081, -0.2848, -0.0930, +0.6972, -0.6394, -0.3911, +0.3077, +0.1641, -0.2980, +0.3526, +0.4996, +0.4259, -0.4143, -0.0722, -0.0489, -1.0649, +0.1243, -0.1063, -0.2763, -0.3938, +0.0349, -0.2978, -0.1311, -0.1644, -0.2178],\n[ +0.2228, -0.1594, -0.1861, -0.4351, -0.3284, -0.3570, -0.1520, -0.3707, -0.2458, +0.1899, +0.1590, -0.4710, -0.3044, +0.2631, +0.4542, +0.5799, +0.1573, -0.0166, -0.3127, -0.2947, +0.2246, -0.2435, +0.3069, -0.2311, -0.5902, +0.4220, +0.2568, +0.1050, +0.0542, -0.3542, -0.2701, -0.0067, +0.1571, +0.0236, +0.0788, -0.3256, -0.4584, -0.2293, -0.2716, -0.0985, -0.3065, -0.1393, +0.3065, -0.2204, -0.2763, +0.5014, +0.5428, -0.3151, -0.0117, -0.5099, +0.5324, -0.0418, +0.1083, +0.2712, +0.1853, -0.0178, +0.1567, -0.8271, -0.0563, -0.0500, +0.1297, -0.0225, -0.3542, -0.0787, -0.1147, -0.0703, -0.3248, +0.2742, -0.0758, -0.5295, -0.8132, -0.0120, -0.2870, +0.3596, -0.4447, +0.1253, +0.3223, +0.0799, -0.2196, -0.1284, -0.3209, +0.6268, +0.3135, -0.3077, +0.5631, +0.0843, -0.1243, +0.3294, +0.2224, -0.3047, -0.3792, -0.5224, -0.0835, -0.0596, -0.4145, -0.4059, +0.2051, -0.5110, -0.2352, -0.1687, +0.3791, -1.1113, +0.0344, -0.5561, -0.0971, -0.1956, -0.9917, +0.0974, +0.4424, +0.2190, -0.5096, +0.1050, -0.1065, +0.1762, -0.2220, -0.3448, -0.0259, +0.0306, +0.3688, +0.0888, +0.5377, -0.0213, +0.0768, +0.1048, -0.2878, -0.1735, -0.0799, -0.3435],\n[ -0.2558, -0.1894, -0.4240, -0.1683, +0.2696, -0.6537, -0.4097, -0.8188, +0.0788, -0.0674, +0.0466, -0.1045, -0.3688, +0.1775, +0.0076, +0.0276, -0.0015, -0.2355, -0.0081, +0.3447, +0.3784, -0.5474, +0.1801, -0.1660, +0.3446, +0.4288, +0.2730, -0.1667, +0.3351, +0.1431, +0.3574, -0.8036, +0.2580, -0.0009, -0.2548, +0.2669, -0.0010, +0.2673, -0.0481, +0.2498, -0.1628, +0.0913, +0.3274, +0.1052, -0.1022, +0.4280, -0.1549, -0.4395, +0.1083, +0.3907, -0.2138, +0.2467, -0.8071, +0.2845, +0.2277, +0.0958, +0.1499, +0.0426, -1.0645, +0.2627, -1.0988, +0.0073, -0.2371, -0.1110, -0.0370, +0.1322, +0.1908, -0.0820, -0.0871, -0.0927, -0.2126, -0.1933, -0.5629, -0.0014, -0.1545, +0.4580, -0.1378, +0.0316, -0.5284, +0.1727, -0.1004, -0.2099, +0.0290, +0.1137, +0.0408, -0.1891, -0.2607, -0.5103, -0.0073, -0.3733, -0.2262, -0.5160, +0.2817, -0.1558, +0.0888, +0.3333, +0.6985, -0.2266, -0.3598, +0.1534, +0.2226, +0.2616, -1.3961, +0.2103, +0.2918, +0.1863, -0.1366, -0.0886, +0.0040, -0.1006, -0.2855, -0.3092, +0.0798, +0.0321, -0.0850, +0.1356, +0.1690, -0.0924, +0.1542, -0.1020, -0.4774, -0.2607, -0.0833, -0.1287, +0.0672, +0.2268, -0.1807, -0.0932],\n[ -0.2405, -0.3201, +0.7086, -0.1859, -0.1761, -0.2028, -0.2184, -0.1675, -0.1567, +0.0830, -0.6260, -0.0593, -0.4650, +0.2506, -0.0680, +0.4262, -0.2814, -0.5134, +0.0255, +0.1273, -0.2961, +0.1498, -0.3338, +0.0174, -0.2212, +0.3174, -0.1205, +0.4797, +0.0533, -0.0115, +0.2920, +0.2266, -0.1898, +0.1149, -0.1576, -0.2620, +0.1099, -0.3745, -0.0760, +0.0467, -0.3250, +0.1586, +0.0099, -0.2494, -0.7048, -0.4182, +0.5150, +0.5745, -0.1478, -0.2727, -0.2478, -0.2406, -0.2523, +0.1856, -0.0971, -0.6087, -0.0432, -0.9908, +0.1931, -0.1341, +0.2256, +0.3962, +0.7869, +0.0154, -0.2696, -0.0954, -0.3696, -0.0110, -0.1052, -0.0975, +0.1781, +0.1258, -0.0776, +0.0353, -0.2180, +0.1670, -0.0379, +0.3439, +0.0207, -0.1833, +0.2566, -0.0464, +0.2096, +0.4449, +0.0416, +0.1547, +0.3403, +0.0345, +0.4687, -0.2652, -0.0801, -0.0020, +0.2610, -0.1122, +0.3909, +0.0769, -0.4641, +0.0736, -0.1571, -0.3774, +0.2008, +0.0973, +0.0370, +0.2582, +0.1885, -0.1066, -0.2197, -0.4079, -0.0710, -0.4423, -0.4312, -0.0751, -0.1196, +0.3101, -0.0285, +0.5479, -0.1970, -0.2913, -0.4369, +0.5454, -0.6903, -0.8019, +0.0506, -0.1360, +0.0087, +0.0290, -0.2148, -0.1939],\n[ -0.4610, -0.2473, -0.0072, +0.1139, -0.0247, -0.0270, +0.1747, +0.2458, +0.0063, -0.0531, -0.0759, +0.0047, +0.0364, -0.5022, -0.1617, -0.7361, -0.6447, -0.2081, +0.2924, +0.2775, -0.2411, -0.0804, +0.0894, +0.2635, +0.5808, +0.0442, +0.0549, +0.3769, -0.2544, +0.1273, +0.2322, -0.0005, +0.0571, -0.2578, -0.3183, -0.0808, -0.0843, -0.4059, +0.5397, +0.4067, +0.1236, +0.0372, -0.0280, +0.3330, -0.0433, -0.6466, +0.1857, -0.0144, -0.3054, +0.4676, +0.0957, -0.0832, +0.0522, +0.1565, +0.1035, -0.1486, -0.0411, +0.1708, -0.6490, +0.2695, -1.4983, -0.0924, +0.0861, -0.3205, +0.1533, -0.5704, -0.1770, +0.0457, -0.2784, +0.1999, -0.5306, -0.3039, +0.0315, -0.0714, -0.2366, +0.2338, -0.0119, -0.1229, -0.4881, -0.1953, -0.0109, +0.3970, +0.0111, +0.0094, +0.2245, -0.3690, -0.2375, -0.5421, +0.3655, -0.1213, -0.1289, +0.4099, -0.2984, +0.2335, -1.0059, +0.3213, -0.1308, +0.0610, -0.4671, +0.1776, -0.2169, +0.0928, +0.4206, +0.0827, -0.0566, -0.1113, -0.2543, +0.0759, +0.2825, +0.0447, -0.4559, -0.0657, +0.1503, +0.0488, -0.2538, -0.1964, +0.1072, -0.1280, +0.6441, -0.4528, +0.3117, +0.2581, -0.4013, -0.0773, -0.0911, -0.3778, +0.0242, +0.0215],\n[ -0.3375, -0.5719, -0.6334, -0.0649, -0.0854, +0.0065, +0.0112, -0.8870, +0.3573, +0.4600, +0.1725, -0.0770, -0.3685, -0.0545, -0.5765, +0.0273, -0.1385, +0.2495, +0.2816, +0.2096, -0.1360, -1.0331, +0.0761, -0.2855, -0.0729, -0.2590, -0.1185, -0.2533, +0.1765, -0.4079, +0.2194, -0.5630, +0.2157, +0.2393, +0.1017, -0.7140, +0.0217, -0.4780, +0.1821, +0.2813, -0.3911, -0.3134, +0.0405, -0.0632, -0.1073, -0.1237, -0.2487, +0.6443, +0.2270, -0.2456, -0.1470, -0.4370, -0.2383, +0.1560, +0.1944, -0.4339, +0.1600, +0.2022, +0.0125, -0.9087, -0.2564, -0.1490, -0.3016, -0.3584, -0.4661, -0.1902, -0.7473, +0.2934, +0.5054, -0.0753, +0.3913, +0.2743, -0.0716, -0.0532, -0.1229, +0.3036, +0.0937, -0.6381, +0.3031, +0.0587, -0.3400, -0.0665, +0.0558, +0.1511, +0.1622, +0.1003, -0.2812, -0.1361, -0.2708, +0.2619, -0.0755, -0.3061, -0.1715, +0.0845, +0.0100, +0.0034, -0.5785, +0.1349, -0.1744, -0.2035, +0.4662, +0.1399, -0.3066, -0.4236, -0.0472, -0.2843, -0.2385, -0.5794, +0.1820, +0.3557, -0.0862, +0.3738, +0.1450, +0.1000, +0.5473, +0.1507, -0.5443, +0.0914, -0.3537, +0.3741, -0.1891, +0.3546, +0.3191, -0.5246, +0.4230, -0.5878, -0.1751, -0.0542],\n[ +0.0213, -0.1471, +0.3261, -0.4587, -0.1803, +0.3454, -0.4453, -0.3145, -0.3858, -0.2512, -0.1947, -0.1724, +0.0583, +0.0332, +0.3049, -0.2802, -0.1356, -0.1136, -0.2123, +0.5089, +0.2820, -0.1252, +0.0767, +0.0704, -0.4895, +0.2362, -0.1172, +0.0923, +0.1067, +0.3615, -0.0697, +0.2067, -0.5127, +0.0913, -0.3579, +0.2358, -0.3798, +0.0920, +0.1841, -0.2430, -0.0443, -0.0572, +0.0997, +0.1046, +0.1505, +0.4334, +0.0064, +0.1389, -0.1436, -0.1635, +0.0277, -0.1091, -0.0032, +0.2427, +0.2126, +0.1951, -0.1043, -0.4167, +0.0660, +0.3166, -0.0735, +0.4281, -0.0680, +0.2983, +0.2509, +0.0579, -0.1808, +0.2992, +0.4224, +0.1334, -0.3825, +0.2144, -0.3767, +0.0435, +0.2921, +0.3553, -0.6511, -0.4253, +0.0686, -0.3102, -0.0320, +0.1272, -0.0162, +0.3658, -0.0617, +0.2804, +0.0475, +0.2362, -0.3137, -0.3116, -0.0363, -0.0645, -0.3065, -0.0121, -0.1316, -0.1926, +0.2697, -0.3122, -0.1086, -0.2185, +0.0241, +0.1630, +0.1691, -0.2266, +0.0638, -0.3642, +0.0405, +0.0430, -0.3574, -0.2212, -0.1512, +0.2038, -0.5828, -0.2209, -0.0242, +0.2462, +0.1717, -0.0077, -0.0030, -0.0895, +0.1574, +0.4435, -0.0056, -0.1508, +0.2267, -0.9727, +0.0261, -0.1215],\n[ -0.3160, -0.1400, +0.4377, -0.3019, +0.3067, +0.2706, -0.0765, -0.0643, +0.2614, -0.2829, -0.0770, -0.4608, +0.0010, -0.7371, +0.1481, +0.0234, +0.2131, +0.5582, -0.5795, -0.1648, -0.3535, +0.2181, -0.3191, +0.0782, +0.0203, -0.1470, -0.3755, -0.2801, +0.4999, +0.1431, +0.3485, -0.0319, -0.2214, -0.5425, -0.4827, +0.0720, +0.0499, +0.0915, +0.1852, +0.4811, -0.2475, +0.3030, +0.1523, -0.8120, +0.1666, -0.1881, -0.2929, -1.0035, +0.1171, -0.1081, -0.2652, +0.2682, +0.2016, -0.1875, +0.1921, +0.1625, -0.2025, -0.6555, -0.4698, -0.6763, -0.0720, +0.0744, +0.0735, -0.2397, -0.7100, -0.0878, -0.2742, -0.0971, +0.3277, -0.0065, -0.1009, -0.2990, +0.0095, -0.1290, +0.1047, +0.2795, -0.0224, -0.1167, +0.1958, +0.0617, +0.0240, -0.1795, -0.1911, -0.3702, -0.1098, +0.0642, -0.1415, +0.2290, +0.2341, -0.1400, -0.3408, -0.1841, +0.0148, +0.1168, +0.3008, -0.3096, +0.1196, -0.4468, +0.1721, +0.1640, -0.0977, -0.0530, +0.0229, +0.1476, -0.1028, -0.1261, -0.1420, +0.1022, +0.3167, -0.6265, +0.1797, -0.0936, +0.0511, -0.1893, -0.0957, +0.6811, +0.1573, -0.2752, +0.1701, -0.2201, -0.5291, -0.2507, +0.3921, -0.1902, +0.0837, -0.0864, +0.2542, -0.3904],\n[ +0.3549, -0.3863, +0.0507, +0.2643, -0.5068, +0.0091, -0.1987, +0.2044, +0.1588, -0.1687, +0.3042, -0.2765, -0.1975, +0.3145, -0.3224, +0.0269, +0.1342, +0.1690, -0.3698, +0.0475, +0.2741, +0.1402, -0.3399, +0.1895, -0.3704, +0.0448, -0.6119, -0.4312, -0.1161, +0.1789, -0.2738, -0.0005, -0.1235, -0.1129, -0.1537, -0.4127, +0.3147, -0.4615, -0.1564, +0.1232, -0.0074, +0.4090, +0.1535, +0.1611, -0.0459, +0.0316, -0.0453, +0.2114, +0.2703, -0.1030, +0.2373, -0.1695, +0.1394, +0.2881, +0.1062, +0.2914, -0.0610, -0.0085, -0.2375, -0.0314, -0.0266, +0.0097, -0.1276, +0.2307, +0.3851, -0.0813, +0.0323, +0.3472, -0.1466, -0.0722, -0.2811, +0.1014, +0.1024, -0.0211, +0.1074, -0.0008, -0.1776, +0.2776, -0.2850, +0.1980, +0.3691, -0.0283, +0.1396, +0.1274, -0.1716, -0.1685, -0.4215, +0.0225, +0.0786, -0.1001, +0.4591, -0.0575, -0.3971, +0.1456, -0.2183, +0.0558, -0.0271, +0.3083, +0.0568, +0.2462, -0.1597, -0.0232, +0.1730, -0.3045, +0.0611, -0.0139, +0.0820, +0.1426, +0.2030, +0.1218, -0.0595, -0.0221, -0.1941, -0.2436, +0.0602, +0.3822, -0.0739, +0.3550, -0.2108, -0.1007, +0.0043, -0.1852, +0.0749, -0.1664, +0.1066, -0.0244, +0.1905, -0.0022],\n[ -0.4225, +0.0205, +0.2343, +0.6412, -0.1847, -0.3596, +0.0700, -0.0939, -0.0499, -0.0930, -0.0358, -0.8865, -0.0728, -0.0062, -0.0464, -0.0181, +0.2364, -0.0239, +0.5704, -0.0628, -0.1030, -0.4086, +0.5106, +0.1740, +0.0949, -0.1879, +0.1340, -0.0541, +0.3082, -0.0291, +0.3116, +0.0689, -0.1211, -0.1224, +0.0628, -0.4648, +0.2210, +0.0961, -0.2342, +0.0106, +0.0186, +0.0873, -1.5674, +0.2819, -0.2889, -0.2208, -0.5178, +0.4684, +0.0253, -0.4241, +0.1826, -0.1348, +0.2715, +0.2436, +0.2658, +0.2304, +0.1359, -0.0806, -0.5793, +0.3441, +0.2264, -0.2614, -0.1882, -0.3382, +0.1948, +0.4520, -0.9745, +0.1227, -0.3187, +0.1561, +0.1611, -0.2133, -0.1997, -0.4954, -0.2462, +0.1609, +0.1442, +0.1312, +0.1740, -0.3870, -0.4074, -0.3078, -0.0300, +0.2480, +0.2126, +0.2092, -0.8827, -0.5479, +0.1335, -0.3101, -0.0703, +0.0903, -0.2329, -0.1830, +0.4569, -1.1144, -0.0159, +0.6270, +0.1413, +0.3254, -0.0640, -0.9923, -0.2731, -0.8292, +0.3298, -0.3924, -0.2309, +0.2209, -0.1428, +0.1210, +0.3638, +0.0533, -0.2677, -0.4959, +0.0251, -0.2504, -0.0507, -0.0921, +0.1597, +0.1228, +0.0347, +0.1019, -0.0810, -0.1491, +0.0628, +0.2191, -0.4947, +0.0954],\n[ +0.1637, +0.0948, -0.4648, +0.3582, +0.0854, +0.0058, +0.1979, -0.5683, +0.2024, +0.6277, -0.1446, +0.1466, -0.3083, -0.0088, +0.2004, -0.5835, -0.0979, -0.4047, +0.1741, +0.2211, -0.1131, -0.9463, -0.0805, +0.2657, +0.1303, +0.3516, +0.1750, +0.1509, +0.2827, +0.0082, -0.0395, -0.2700, -0.9032, +0.0319, +0.1324, +0.1078, -0.5541, +0.7263, +0.3923, -0.0758, -0.7597, -0.5127, +0.1942, -0.2357, -0.5484, -0.1027, +0.3685, -0.3620, +0.3254, -0.2113, -0.8554, +0.4961, +0.1886, +0.2338, +0.1664, -0.5808, -0.0672, +0.1681, +0.1266, +0.0296, -1.1437, -1.3080, -0.7877, -0.2797, +0.0179, -0.3475, +0.2645, +0.2776, +0.2837, +0.3169, +0.2472, -0.0013, -0.1141, -0.0726, -0.1195, +0.2691, -1.2304, -0.3329, -0.7756, +0.3802, -0.1996, +0.2079, -0.1404, +0.2316, +0.4997, +0.1979, +0.1014, +0.0706, -0.3750, -0.8870, +0.1731, +0.3022, -0.2311, -0.2704, -0.1208, -0.1827, -0.9557, +0.1715, -0.3653, +0.2939, -0.6981, +0.2919, -0.4313, -0.3437, -0.0768, +0.0977, -0.3232, -0.3931, -0.2694, -0.2122, -0.0491, -0.3902, +0.2010, -0.4833, -0.4078, -0.0292, -0.0444, -0.3722, -0.1701, -0.0076, -0.7189, -0.1509, -0.0553, -0.9382, -0.0043, -0.5509, -0.0923, +0.0345],\n[ +0.1160, -0.2954, -0.3315, -0.1049, -0.1665, -0.6713, +0.2705, +0.2416, -0.1831, -0.4300, +0.1659, +0.0535, -0.3640, +0.4983, +0.1289, +0.1666, -0.3206, -0.0546, +0.1964, -0.0279, +0.0947, +0.0797, -0.3268, +0.1590, -0.2458, +0.0288, +0.2251, -0.3009, -0.1476, +0.2927, +0.1510, -0.0044, -0.3690, +0.3700, +0.2042, -0.3515, -0.4181, +0.0056, -0.0933, -0.4424, +0.3104, -0.0206, -0.7065, +0.4244, +0.0692, +0.0821, -0.0387, +0.0745, +0.1301, -0.1519, -0.0195, +0.0234, +0.1581, +0.2883, +0.0212, -0.4560, -0.5068, -0.5831, +0.4690, -0.1566, -0.0208, +0.1124, +0.0008, -0.3741, -0.2130, -0.2332, -0.3150, +0.0841, -0.0527, -0.1880, -0.0400, -0.1145, +0.2574, +0.1251, -1.0734, -0.2387, -0.1422, -0.2481, +0.0069, +0.0068, +0.0484, -0.0377, +0.3183, -0.2792, +0.2541, -0.2930, -0.0113, -0.1120, +0.0407, -0.0675, -1.0289, +0.2839, +0.2054, +0.0621, -0.1725, -0.6597, +0.2531, -0.1362, -0.1612, +0.2407, -0.2504, +0.0369, +0.2115, -0.4549, -0.1791, +0.1713, -0.2896, -0.1486, +0.2133, +0.0461, -0.3307, +0.0050, -0.4137, +0.2785, -0.4291, -0.0957, +0.2633, -0.0458, -0.0116, +0.2395, +0.0834, -0.1063, -0.5765, -0.1646, -0.0355, -0.6641, -0.4030, +0.1182],\n[ -0.1603, +0.1595, -0.8431, -0.6778, +0.1075, -0.2351, -0.1148, +0.0481, +0.0980, +0.1509, -0.4538, -0.6482, +0.3181, -0.7497, -0.6013, +0.1111, +0.0616, -0.1351, -0.0651, +0.3321, -0.0197, -0.1127, -0.0341, +0.3115, -0.3223, +0.0818, +0.1163, -0.0377, +0.1245, -0.3289, -0.0432, +0.1208, -0.1961, +0.0422, -0.0774, -0.0120, -0.3441, +0.5444, -0.2227, +0.0064, -0.4028, -0.0151, -0.3354, +0.3311, -0.0559, +0.0867, +0.4502, +0.0128, +0.0003, -0.1244, +0.7798, +0.5170, -0.0804, +0.6044, -0.3152, +0.0484, -0.1151, +0.5629, -0.6603, -0.2273, -0.1492, -0.2242, -0.0485, -0.5484, -0.2776, -0.5286, -0.0948, +0.0538, -0.1728, -0.2477, +0.0368, -0.0486, -0.1006, +0.0436, -0.1171, +0.5106, +0.2599, -0.2541, -0.3214, +0.0887, +0.1058, -0.0414, +0.7731, +0.4503, -0.3084, -0.3069, -0.8430, -0.2717, +0.0244, -0.0623, +0.4468, -0.0700, -0.6146, -0.4785, +0.1428, +0.0444, +0.8492, +0.0297, +0.1108, -0.1313, -0.2234, +0.0726, +0.1076, -0.1348, +0.2932, -0.8258, +0.1431, -0.2649, +0.3067, +0.1376, +0.4508, +0.1335, +0.4536, +0.1607, +0.4285, -0.0105, +0.1935, +0.0985, -0.4130, -0.3155, -0.9062, -0.3534, +0.1880, -0.5299, -0.3446, -0.2847, -0.5438, +0.2116],\n[ +0.5761, -0.3813, -0.3865, +0.1568, -0.1607, -0.2502, +0.0219, -0.8582, -0.5226, +0.5144, -0.1524, +0.0812, -0.4727, -0.3335, -0.1110, +0.3647, +0.0783, -0.5708, -0.3820, -0.3120, -0.3096, -0.0256, -0.4717, +0.1317, +0.5989, +0.4897, -0.0469, +0.0568, -0.2580, -0.2221, -0.4452, -0.2594, -0.9505, +0.4975, -0.5212, +0.2497, -0.0509, +0.1511, -0.6159, +0.0438, -0.1114, +0.2150, -0.0859, +0.2133, -0.0728, -0.3740, -0.4002, +0.4732, +0.4738, +0.5238, -0.0480, -0.2455, -0.0934, -0.2782, -0.2165, -0.1058, -0.0855, +0.5160, -0.1801, +0.0536, +0.3633, -0.2925, -0.7053, +0.0379, +0.2051, -0.0552, -0.7254, +0.1808, -0.8717, -0.2988, -0.3500, -0.4196, -0.1360, -0.3550, +0.3361, -0.4210, -0.1077, -0.4150, +0.3904, -0.2485, -0.3365, +0.2199, -0.6205, +0.1837, -0.1190, -0.3000, -0.3884, +0.2614, +0.0790, -0.0139, -0.3965, +0.3965, -0.4885, +0.4306, +0.1003, -0.0760, +0.4036, +0.0463, +0.3266, -0.2110, -0.5959, -0.4129, -0.0650, +0.3431, -0.6494, +0.2506, -0.3991, -0.1680, -0.1170, -0.0935, +0.0483, -0.0313, +0.1041, -0.5254, -0.1478, -0.0523, +0.0539, +0.0581, -0.3135, +0.4106, -0.0569, -0.1274, -0.7759, -0.1341, +0.1121, -0.2227, -0.3384, +0.2073],\n[ -0.0015, -0.0050, +0.0626, -0.2167, -0.1618, -0.1563, +0.0808, +0.3984, +0.3761, -0.7672, -0.0743, +0.1242, +0.2863, +0.0726, +0.2476, -0.2723, -0.0107, +0.0697, -0.2586, +0.0376, -0.5399, -0.5003, +0.2620, +0.0800, -0.1510, -0.3301, -0.0241, +0.2990, -0.3245, +0.4816, +0.1996, +0.1130, -0.8881, +0.3102, +0.0306, +0.0916, -0.5965, -0.4686, +0.0940, +0.0700, -0.4971, +0.3605, +0.0462, -0.3902, +0.1146, -0.5735, -0.6344, -0.9496, -0.5057, -0.1810, -0.0559, -0.2218, -0.0443, -0.2366, +0.0693, -0.4752, +0.1593, -0.3631, +0.3647, +0.1596, +0.1076, -0.1984, -0.4186, +0.3248, +0.1609, +0.2158, +0.1010, +0.2301, +0.1915, -0.1493, -0.8783, +0.0871, -0.1188, -0.2275, -0.0359, +0.3421, +0.0802, -0.6345, -0.1397, -0.1732, +0.0109, -0.1552, -1.1050, +0.0960, +0.2679, +0.0425, +0.3679, -0.4878, +0.4553, -0.8658, +0.0454, -0.6804, -0.1970, -0.4404, -0.0434, -0.3961, -0.1004, +0.2561, -0.4153, +0.1440, -0.0352, +0.4118, -0.2326, -0.0680, +0.0416, -0.2793, +0.2123, +0.1383, -0.1678, +0.1839, -0.2010, +0.2858, -0.3092, +0.2992, -0.2234, -0.2068, +0.1857, -0.2247, +0.1250, +0.0474, +0.1694, -0.3829, -0.3199, -0.0096, -0.0066, -0.4101, +0.2096, +0.1492],\n[ -1.0530, -0.1616, +0.1781, -0.2967, -0.3292, -0.1714, -0.1484, +0.0427, +0.3548, -0.0519, -0.2956, -0.8272, +0.0106, +0.5496, -0.5411, -0.0851, +0.2586, -0.2523, +0.0898, -0.3291, -0.4361, +0.2933, -0.3067, -0.1003, +0.4732, +0.3920, +0.2250, +0.2445, +0.2729, +0.6164, +0.1556, -0.6162, +0.1678, +0.1554, -0.0462, -1.0487, -0.0951, +0.0250, -0.0550, -0.0011, -0.0247, -0.4949, +0.3541, +0.2239, -0.3604, +0.2358, -0.9694, -0.5448, -0.6723, -0.5508, -0.0862, -0.5063, -0.2174, -0.2356, -0.2273, +0.0797, -0.3075, -0.0495, +0.1235, -0.1639, -0.3079, -0.1264, -0.4438, +0.3899, -0.1129, -0.5128, -0.6813, -0.1342, -0.1244, +0.0879, +0.5020, +0.0280, +0.3070, +0.2640, -0.4427, +0.1822, +0.0034, +0.2847, -0.2542, -0.4012, +0.0463, -0.4115, -0.5966, -0.3262, -0.0476, -0.5559, -0.2575, +0.2801, -0.2403, +0.2426, -0.1122, -0.0639, -0.0933, +0.3127, -0.4744, -0.1973, -0.7071, -0.2202, -1.0998, -0.1894, -0.2028, -0.2228, -0.1441, -0.4315, -0.8790, +0.1403, -0.3170, -0.1493, -0.3308, -0.5001, +0.2890, -0.4068, -0.1283, -0.4281, +0.2751, +0.1483, +0.4841, -0.3758, +0.4211, -0.1636, -0.0437, -0.1343, -1.0156, +0.2221, +0.1052, -0.5406, -0.4152, +0.0602],\n[ -0.2308, -0.2978, +0.0745, -0.2628, -0.3429, -0.0427, +0.0573, +0.1858, -0.2774, -0.2777, -0.0013, -0.1791, +0.0037, -0.0213, +0.0925, -0.0212, -0.2492, +0.0183, +0.3678, -0.0179, -0.0583, +0.0586, -0.0014, +0.1517, +0.3286, +0.6509, +0.2159, -0.3924, +0.0795, +0.2268, -0.1243, +0.2543, -0.0295, +0.0129, -0.0151, +0.1521, -0.5144, +0.1643, -0.5641, +0.0363, +0.4261, +0.0787, -0.0144, +0.2405, +0.1561, +0.1014, -0.0883, -0.1637, +0.1274, +0.0933, -0.2461, -0.2536, +0.2394, +0.4473, -0.3484, +0.1002, -0.3076, +0.0268, +0.0668, -0.0877, -0.2816, -0.6566, +0.1330, -0.3667, +0.3127, -0.1171, +0.2557, +0.3192, +0.3922, +0.2430, -0.8649, -0.3526, +0.4035, -0.0499, +0.1406, +0.1930, -0.1427, +0.1748, +0.1786, +0.0764, +0.3115, +0.2849, -0.0117, +0.0284, +0.1940, -0.5640, +0.2338, +0.2769, -0.2405, -0.4534, -0.4668, -0.0395, +0.2209, +0.0711, -0.0354, +0.4161, -0.0014, -0.2474, -0.1787, +0.2895, +0.2003, +0.0210, +0.1707, -0.1632, -0.1909, -0.0915, -0.2660, +0.0307, +0.0622, +0.0725, +0.3364, -0.0440, -0.0147, -0.4324, -0.4612, +0.2892, -0.0327, +0.0708, -0.2288, -0.6771, +0.1125, +0.4182, -0.4915, -0.2478, +0.1810, -0.4989, -0.3241, -0.0827],\n[ +0.0611, +0.2865, -0.1639, -0.5949, -0.6014, -0.0413, +0.4919, -0.1920, +0.1659, -0.4057, -0.0116, +0.1717, -1.8083, +0.3695, +0.1806, -0.4881, +0.5887, -1.2683, -0.1151, -0.5173, +0.2263, +0.2536, +0.2895, -0.4869, -0.1181, +0.0537, +0.2262, +0.1881, -0.1279, -0.7002, -0.3557, -0.0005, -0.2306, -0.8433, -0.0015, -0.1687, +0.2881, +0.0146, -1.0380, +0.0755, +0.3358, +0.1864, -0.3233, +0.3758, -0.1901, +0.0065, -0.3992, -0.3306, +0.1113, -0.2037, -0.1552, -0.3460, +0.3272, -0.2249, -0.8292, -0.1383, -0.2892, -0.1736, -0.3339, +0.0082, -0.5588, -0.0003, -1.3892, +0.1160, -0.3565, -0.0793, -0.3037, +0.1453, -0.2012, +0.1600, -0.0894, +0.1147, -0.2503, -1.0418, -0.1991, +0.0348, -0.6366, +0.1861, +0.0672, -0.4284, -0.0243, +0.1470, -1.0503, -0.4545, +0.1642, +0.5413, -0.2965, +0.4746, +0.0497, -0.0846, +0.3373, +0.1835, -0.3656, -0.9899, -0.7913, +0.2604, -0.4761, -0.3113, -0.3797, +0.0733, +0.0392, +0.0518, -0.3743, -0.1053, -0.2166, +0.0327, +0.0788, +0.2358, -0.1864, -0.4050, +0.1921, -0.8714, -0.6943, -0.1009, -0.2309, +0.0990, +0.0303, -0.2281, -0.6570, +0.1796, -0.2536, -0.7205, +0.0996, +0.1210, -0.2807, -0.0518, -0.1800, +0.1282],\n[ -0.7158, -0.0248, -0.1820, -1.0689, -0.8583, +0.1329, -0.2660, +0.0176, +0.0568, +0.3375, +0.2250, -0.0206, -0.4120, -0.8021, +0.2371, +0.1026, -0.3022, -0.1062, -0.1898, +0.0673, -0.1232, +0.2277, -0.0368, -0.0241, -0.1203, -0.3597, -0.2682, -0.0838, +0.1016, +0.0101, +0.1256, +0.0478, -0.5503, -0.0960, -0.6214, -0.3078, -0.1974, -0.4763, -0.4231, -0.8695, -0.2724, +0.1457, +0.1207, -0.3700, -0.0486, +0.1690, -0.0181, -0.2192, -0.0046, -0.3685, -0.0330, +0.2796, +0.1762, -0.2772, -0.0359, -0.2993, -0.1015, -0.1635, +0.2030, -0.1603, +0.2134, -0.1556, -0.5028, -0.4694, +0.3322, -0.0483, -0.7822, -0.8137, -0.7079, +0.0925, +0.1491, -0.1615, +0.3602, -0.5017, -0.0455, +0.1418, -0.0643, -0.2456, -0.1544, -0.3930, -0.7707, -0.1255, -0.5566, +0.3966, -0.1460, +0.0126, +0.0630, -0.1355, +0.0769, +0.2010, -0.6037, -0.2071, -0.1490, +0.2603, -0.0213, +0.0620, +0.1688, +0.1827, +0.4220, +0.3981, +0.2312, -0.3357, -0.1231, +0.1700, -0.1651, +0.0388, -0.1009, -0.2646, +0.1996, -0.3023, +0.1891, -0.4615, -0.3505, +0.1069, -0.1581, -0.0312, -0.8901, +0.0545, -0.6672, -0.5027, +0.2211, +0.1013, -0.7583, -0.1434, -0.6627, -0.2156, +0.2614, +0.3662],\n[ -0.2563, -0.0474, -0.2365, -0.1934, -0.3914, -0.0048, -0.2730, -0.2007, -0.1603, -0.2423, -0.5630, -0.3473, +0.0258, -0.9864, +0.4544, -0.4950, +0.0337, -0.0502, +0.2225, -0.7403, -0.9425, -0.4985, -0.1403, +0.0626, +0.3485, +0.2418, -0.0540, -0.7098, +0.0510, +0.0277, +0.1600, -0.1689, -0.1595, +0.1059, -0.0557, +0.0532, -0.0278, -0.5567, -0.2592, -0.5742, -1.0013, +0.1918, -0.0463, +0.1362, -0.1924, +0.3461, -0.5660, +0.1042, +0.0646, -0.1462, -0.3421, +0.2529, +0.0525, -0.2629, -0.1130, -0.6224, -0.2830, +0.3472, +0.3616, -0.1641, -0.5444, -0.3770, +0.4742, -0.3860, -0.5951, -0.1638, -0.7594, +0.1544, -0.2297, -0.5665, -0.4948, +0.2582, -0.1000, -0.1965, +0.0185, +0.1803, +0.5790, -1.2134, -0.1435, +0.2652, -0.1434, -0.3791, +0.3301, -0.7856, +0.2785, -1.0229, +0.0838, -0.2758, -0.3962, -0.4106, -0.6214, -0.1216, +0.0602, +0.0587, +0.0879, -0.9288, -0.1678, -0.5674, +0.1685, -0.4010, -0.0188, -0.1108, -0.1286, -0.1027, +0.4474, -0.5951, +0.6017, -0.6050, -0.0316, -0.1669, -0.1520, -0.0173, -0.6195, -0.0047, -0.5535, +0.2926, -0.2384, +0.1254, -0.4349, +0.1235, -0.5216, -0.7084, -0.1835, -0.1166, -0.5433, -1.7441, +0.1094, +0.0133],\n[ -0.1527, +0.2050, +0.0983, -0.1605, -0.1750, -0.1275, +0.0175, -0.4205, -0.2100, +0.0893, +0.2032, +0.0399, -0.4218, +0.0874, -0.6308, +0.2939, +0.0782, +0.0255, +0.0107, -0.2254, -0.9162, +0.2254, -0.3521, +0.2638, -0.1499, -0.1506, -0.2354, +0.2721, +0.5795, -0.3090, +0.1415, -0.3314, +0.0392, -0.5504, +0.1330, -0.2533, -0.0513, -0.5682, +0.1356, -0.7459, -0.7628, -0.0834, -0.3449, +0.3160, +0.0946, +0.3228, +0.0644, +0.1491, +0.0060, -0.3167, -0.3146, -0.8460, -0.3023, +0.2308, +0.1187, -0.5089, +0.0434, -0.0090, -0.5630, +0.0231, +0.1602, -0.2552, -0.5270, -0.7234, +0.5465, -0.4352, -0.7148, -0.3853, +0.4611, +0.4006, -0.6137, +0.4332, -0.2807, +0.0564, +0.1006, -0.2015, -0.0501, -0.1992, +0.0239, -0.0428, -0.2136, +0.0181, +0.0028, -0.2133, +0.4164, +0.0994, +0.1958, +0.0943, -0.3896, -0.1137, -0.0743, -0.0451, +0.0772, -0.2562, +0.3917, -0.2452, +0.5537, -0.0203, +0.3696, +0.1016, -0.3851, -0.0034, -0.3018, -0.4717, +0.1248, -0.0375, +0.0164, -0.2870, -0.2551, -0.3909, +0.2639, -0.5118, +0.2163, -0.1475, -0.2449, -0.1022, -0.3885, -1.1527, +0.3727, +0.3782, +0.1173, +0.4259, -0.0340, -0.0485, +0.1656, +0.2814, +0.0839, -0.5007],\n[ +0.4765, -0.1649, -0.1024, +0.0055, -0.1214, +0.1782, -0.2572, -0.4435, -0.7717, -0.1426, -0.3608, -0.7833, +0.1269, -0.0235, +0.1364, +0.3735, -0.2381, -0.0663, -0.1387, -0.4519, -0.6952, -0.2574, +0.3000, -0.3859, -0.0087, -0.0432, -0.1001, -0.3128, -0.0270, -0.3285, -0.1081, +0.1883, -0.8374, -1.1177, +0.2704, +0.1830, +0.1482, +0.4163, +0.2134, -0.7739, +0.4174, -0.3978, -0.5230, +0.2132, -0.2400, -0.1015, +0.3938, +0.6610, -0.2333, -0.0563, +0.0737, -0.3698, -0.4550, -0.2412, +0.3110, +0.0754, -0.1881, -0.1304, -0.3407, +0.2422, -0.1928, -0.6068, -0.4171, -0.1750, -0.4661, +0.1497, -0.8435, +0.4782, -0.0119, +0.0284, -1.0337, -0.0165, -0.0950, +0.4089, -0.3840, -0.0563, -0.5054, +0.0379, +0.3352, -0.1341, +0.0938, -0.8643, -0.1014, +0.0840, -0.5156, -0.1637, +0.0072, +0.3067, -0.0884, -0.7413, +0.5943, -0.0905, -0.2211, -0.5878, +0.3823, -0.3302, -0.3392, -0.5429, +0.1319, -1.0519, +0.3625, +0.3302, -0.0129, -0.1716, -0.5146, -0.0158, -0.3191, -0.2978, +0.0817, +0.1130, +0.0154, +0.6382, -0.3736, +0.2939, +0.2334, +0.2609, +0.2365, +0.2562, +0.2581, +0.3822, -0.3219, +0.0573, +0.2748, -0.0081, +0.1287, -0.5954, -0.5557, +0.3334],\n[ -0.1244, -0.0739, -0.1773, -0.0051, -0.2568, +0.0253, -0.3943, +0.4885, -1.2380, +0.1550, -0.0180, +0.0387, -0.1563, -0.0694, +0.3956, -0.1860, +0.2417, -0.0104, +0.1181, -0.1433, -0.1424, -0.2395, +0.1543, +0.0020, +0.3424, -0.8965, +0.2261, -0.0223, -0.0491, -0.1797, -0.7054, +0.0414, +0.1474, -0.0349, -0.0832, -0.0827, +0.0572, +0.2952, -0.0980, -0.5059, -0.3756, -0.3521, +0.3812, -0.2648, +0.1227, +0.5788, -0.1290, +0.1006, -0.6027, +0.0324, -0.1361, +0.1538, +0.1624, -0.4140, -0.2818, +0.2475, -0.3697, +0.0158, +0.0414, -0.4090, +0.1862, +0.3428, +0.3250, -0.2630, +0.1261, +0.2009, +0.3502, -0.0755, -0.2826, +0.1526, -0.2166, -0.2248, -0.4648, -0.6940, +0.2055, -0.2284, -0.0808, -0.1773, +0.4859, -0.8962, -0.0136, -0.5233, -0.4861, -0.1999, -0.0792, +0.6809, -0.4605, -0.0301, +0.1620, -0.5748, -0.1882, +0.2918, +0.2180, -0.0636, -0.3878, -0.0864, +0.1710, +0.1606, -0.0546, -0.3337, -0.6139, +0.1103, +0.0151, +0.0350, +0.1895, +0.3184, +0.3794, +0.0449, -0.1940, +0.1791, -0.2073, +0.1471, -0.5927, +0.0492, +0.1733, -1.0351, -0.3525, +0.2772, -0.0317, -0.9995, -0.0498, -0.1881, +0.5443, +0.2347, +0.5254, -0.3914, -0.0550, -0.1160],\n[ -0.0967, +0.3455, -0.2238, +0.2009, +0.1764, +0.0967, -0.0590, +0.3214, -0.0303, -0.2983, -0.4011, -0.5049, -0.1646, -0.0277, -0.4578, +0.7766, +0.0177, -0.6196, -0.1869, -0.7060, -0.7281, -0.3773, +0.3006, +0.1190, -0.1093, -0.3381, +0.3843, +0.2368, -0.0337, -0.1140, -0.3126, -0.4312, +0.5474, -0.4514, +0.0049, +0.3787, -0.1660, +0.7759, -0.3546, +0.0536, -0.1700, +0.0265, -0.1812, -0.2721, +0.0742, -0.2028, -0.3201, +0.1234, -0.4013, +0.1645, -0.7290, -0.1574, -0.2551, -0.6970, -0.3877, -0.3200, +0.1977, -0.8099, -0.0750, -0.0673, -0.2181, -1.0244, -0.3877, +0.0909, -0.3593, -0.5187, -0.2553, +0.0355, -0.3642, +0.0107, +0.1705, +0.4053, -0.2568, +0.1740, -0.2911, -0.1625, +0.2549, +0.0840, +0.3694, -0.6590, -0.3594, -0.1929, +0.2237, -0.1976, +0.1534, -0.3068, +0.1278, +0.4571, -0.2539, +0.2164, -0.5471, -0.0529, +0.0369, +0.4208, -0.0814, +0.3714, -0.5938, -0.3710, +0.3114, +0.6259, +0.0059, +0.0130, -0.5826, +0.0877, -0.2332, +0.0354, -0.8047, +0.1317, -0.9597, +0.1143, -0.2499, +0.1294, -0.0472, -0.5513, +0.0643, +0.1936, -0.1909, +0.1698, +0.0858, -0.1199, -0.6053, +0.0864, +0.0556, -0.3210, -0.3413, +0.0078, +0.1152, -0.4971],\n[ -0.0527, -0.0277, -0.5070, +0.1096, +0.2826, +0.3327, -0.0866, +0.0629, -0.1437, +0.2578, +0.0404, -0.3073, +0.3690, -0.3613, -0.2037, -0.5715, -0.1269, +0.1994, -0.0578, -0.1329, -0.0046, -0.6498, -0.1521, -0.2816, -0.1446, -0.0221, +0.0810, -0.3729, +0.4460, +0.6792, +0.1963, -0.0118, -0.3029, -1.0091, -0.5706, +0.1085, -0.6219, -0.3816, -0.8965, +0.4163, -0.0812, -0.1559, +0.0440, -0.2807, -1.0406, -0.2695, -0.0474, -0.1721, +0.2909, +0.2715, -1.6228, -0.3200, -0.1958, +0.0388, +0.1530, -0.9646, +0.2767, +0.4320, -0.1137, -0.6233, +0.1335, -0.2247, +0.4663, +0.0214, -0.2247, +0.3232, -0.7008, +0.4906, +0.3080, -0.0374, -0.6051, +0.2417, -0.3725, +0.4420, +0.2252, -0.3977, -0.4043, +0.1437, +0.5895, -0.9664, -0.1180, +0.5082, +0.0914, +0.3361, -0.7208, -0.5858, -0.5050, -0.0817, +0.2920, +0.0912, +0.4496, -0.2410, -0.3415, +0.1653, -0.1557, +0.7604, -0.5723, -0.5638, -0.3500, -0.8521, -0.2054, +0.0974, +0.2551, -0.2385, -0.0929, -0.0650, -0.0434, -0.3776, +0.4589, +0.2075, -0.4577, -0.0888, +0.0540, -0.3768, -0.3778, -1.2187, -0.2733, -0.3868, -0.5227, +0.5560, +0.1831, +0.1179, -0.4877, -0.1921, -0.2940, -0.1083, +0.1321, +0.0658],\n[ +0.0851, +0.1237, +0.3079, -0.3762, -0.2870, +0.1797, +0.1528, +0.1408, -0.0854, +0.2107, -0.0888, +0.1461, +0.0396, +0.0078, +0.2542, -0.0276, +0.1926, +0.0659, -0.1361, +0.0434, +0.3480, -0.0129, -0.0772, +0.0886, -0.0437, -0.0188, -0.4141, +0.1342, -0.2803, -0.0198, -0.2619, +0.0503, -0.3024, -0.1624, -0.0729, +0.0999, +0.2488, -0.5538, -0.5757, +0.0182, +0.1508, +0.0242, +0.1205, +0.0480, +0.0230, +0.0396, +0.5233, -0.4589, +0.3359, -0.2505, +0.0021, +0.1641, -0.1351, -0.2552, -0.2580, +0.0780, -0.2114, -0.0661, +0.1573, -0.0167, +0.4415, -0.1601, +0.1631, +0.2454, -0.1081, +0.0016, -0.4957, -0.0726, +0.0853, +0.2539, -0.4234, -0.0471, +0.2212, -0.2625, +0.1045, +0.1269, +0.1261, +0.1722, +0.1526, -0.1199, -0.1990, -0.3317, -0.3490, -0.1991, -0.8123, -0.1331, -0.2023, -0.4781, -0.0670, +0.1621, -0.4527, -0.0136, -0.5296, +0.1699, -0.2776, -0.4615, +0.0328, +0.3221, +0.0832, +0.0988, +0.0501, +0.0616, -0.1198, +0.0369, -0.2673, +0.1644, -0.1164, -0.7414, +0.2164, -0.2900, -0.2164, +0.1668, -0.2093, +0.0993, -0.1886, -0.3224, -0.3970, +0.1618, -0.2486, -0.5561, -0.2513, +0.0465, -0.0900, +0.0689, -0.3624, -0.5229, +0.0975, -0.1538],\n[ -0.2350, -0.0645, -0.3615, -0.0227, -0.5530, +0.1408, -0.0817, -0.2408, -0.1138, -0.1513, +0.3608, +0.4338, -0.1659, +0.6428, -0.0378, +0.3013, -0.1392, -0.3619, -0.8423, +0.3682, +0.3757, -0.0121, +0.4504, +0.0619, -0.6040, -0.6458, -0.5900, -0.1712, +0.4316, +0.0350, +0.3142, -0.7079, +0.0091, +0.4126, -0.4210, -0.0609, -0.1032, -0.3058, -0.1821, +0.3230, -0.3882, -0.9278, +0.0894, -0.0269, -0.6832, -0.7596, +0.3437, -0.7062, -0.6884, -0.2122, -0.0652, +0.1604, -0.0175, +0.3662, +0.4354, +0.3192, -0.2377, +0.1625, +0.1337, +0.0469, -0.0486, +0.2694, -0.5156, +0.0278, +0.2552, -0.4929, -0.9727, -0.1039, -0.1098, -1.2475, +0.1701, -0.2273, -0.7669, -0.5180, +0.1835, +0.1097, -0.4413, -0.4020, -0.3054, -0.3705, -0.8869, +0.2093, -0.3340, -0.1148, -0.3528, -0.1682, -0.1487, -0.4372, -0.4378, +0.1359, -0.0848, -0.7216, -0.6878, -0.2174, -0.4028, -0.2557, +0.0784, -0.1654, -0.1174, +0.3619, -0.1810, -0.0099, -1.1576, -0.0002, -1.0263, -0.3744, +0.2061, -0.8543, -0.8111, -0.4431, +0.1512, +0.2831, +0.1916, +0.4312, +0.5064, +0.0385, -0.0675, -0.1375, +0.2126, +0.0130, +0.4966, +0.2362, +0.0904, -0.7148, +0.0392, -0.1645, +0.1844, -0.7697],\n[ +0.0220, -0.0428, +0.0276, +0.3503, +0.2144, -0.1321, +0.3869, -0.1202, -0.1374, +0.0744, -0.4690, -0.1239, -1.0280, +0.3670, -0.1420, -0.1227, +0.1930, -0.1847, +0.3942, +0.2043, -0.5067, -0.1962, -0.6377, -0.3575, -0.0448, +0.1373, +0.0288, +0.3011, -0.0772, +0.0266, -0.2126, +0.4098, +0.0688, +0.2684, +0.0468, -0.7170, +0.5634, -0.1395, +0.4975, -0.1992, +0.0157, -0.3145, +0.1247, -0.2183, +0.1947, -0.0192, +0.0535, -0.2897, +0.2576, -0.0970, +0.0374, +0.1809, -0.2987, -0.1980, -0.0604, -0.2548, +0.3602, -0.0131, +0.2217, -0.4366, +0.3172, -0.4558, +0.3201, -0.6872, +0.1945, +0.1090, +0.0402, +0.0382, -0.1878, -0.1224, -0.0817, +0.1835, -0.9401, +0.0475, -0.0883, +0.2037, -0.0891, +0.1447, -0.2684, +0.1187, +0.2925, +0.0545, -0.6845, +0.1526, -0.1610, -0.2451, +0.0026, +0.1057, +0.0704, +0.1843, -0.2590, -0.0985, -0.2079, -0.7211, +0.0775, +0.2452, -0.3347, +0.2375, -0.0090, -0.2547, +0.1042, -0.0516, -0.0689, +0.3129, +0.1685, +0.0748, -0.2181, -0.3170, -0.1933, -0.0852, +0.5291, +0.0392, -0.0554, -0.1644, -0.2692, -0.2711, +0.1414, +0.2713, -0.4879, +0.0969, -0.1078, -0.0405, -0.4846, -0.1335, +0.0573, +0.0358, +0.1098, -0.1402],\n[ -0.1117, +0.1986, -0.7761, -0.3040, +0.3357, -0.3474, +0.1743, +0.1902, +0.3739, -0.2314, +0.0653, -0.1791, -0.2269, -0.5653, +0.2996, +0.1259, -0.2694, +0.2773, -0.1439, -0.1760, -0.1470, -0.2581, -0.4101, +0.0351, -0.3246, +0.2160, +0.1913, +0.2079, +0.2030, -0.0556, +0.3580, +0.0021, +0.1484, +0.2643, +0.1736, -0.2415, +0.0955, +0.0201, -0.0134, -0.0322, +0.3084, -0.4807, +0.1081, -0.5020, +0.2811, +0.0917, -0.2092, +0.0043, +0.1168, +0.4111, -0.0706, -0.2049, +0.0966, -0.2413, -0.0175, +0.3128, -0.3217, -0.5673, -0.1846, -0.2971, -0.1817, -0.2797, -0.3498, +0.0397, +0.3490, +0.0587, +0.0955, -0.1578, -0.4743, +0.1826, +0.4050, +0.3482, +0.1023, +0.0652, -0.1641, -0.2173, -0.4869, +0.0762, -0.0708, +0.2776, +0.0441, +0.3667, -0.5101, -0.5287, -0.2622, -0.2233, +0.1004, +0.2117, +0.1264, -0.0336, +0.0162, +0.4209, +0.3222, +0.3265, -0.3533, +0.1657, -0.3463, -0.7502, -0.2849, +0.1964, +0.2663, +0.1315, -0.0235, -0.1738, -0.3701, +0.1235, -0.3071, -0.1384, +0.1270, +0.5121, +0.2657, -0.4361, -0.0806, +0.3179, +0.2078, +0.1773, -0.0158, +0.4612, -0.0097, +0.0344, +0.4431, -0.0293, +0.2263, -0.3492, +0.0373, +0.3111, -0.3341, +0.3314],\n[ -0.2234, +0.3046, +0.0611, -0.2822, +0.0183, +0.0652, -0.4832, +0.2223, -0.1448, +0.0276, +0.1012, +0.3600, -0.1284, -0.6043, -1.2978, -0.8725, -0.0866, +0.1470, +0.4283, +0.0699, +0.0635, +0.5191, -0.0263, +0.0685, -0.0539, +0.3351, -0.1752, +0.3458, -0.4523, -0.2304, -0.1956, -0.0134, -0.1779, -0.1767, -0.1972, -0.1104, -0.0667, -0.2914, -0.9693, +0.2018, +0.0919, -0.3138, +0.0910, +0.2717, +0.1102, +0.0138, -0.5037, +0.0832, +0.0590, -0.3267, -0.0313, +0.1724, -0.3492, +0.2826, -0.0482, +0.0152, +0.1023, -0.7773, -0.0515, -0.2389, +0.0211, -0.0255, -0.0368, +0.5986, +0.0725, -0.2325, -0.5902, -0.1262, -0.0356, +0.1307, -0.4985, -0.1194, -0.1714, +0.0975, -0.3656, +0.3823, +0.0281, +0.0863, -0.1686, -0.2519, +0.2029, -0.0693, +0.0241, +0.4671, -0.0035, +0.1302, -0.6907, -0.2194, +0.1807, -0.7022, -0.1678, -0.0593, -1.0474, -0.3847, -0.3561, +0.0089, +0.3198, +0.1647, +0.1293, +0.0437, +0.2253, -0.0569, +0.3280, -0.3060, +0.3071, -0.0946, +0.2558, +0.1250, -0.3373, -0.7435, +0.0748, -0.1516, -0.1449, +0.2877, -0.1991, +0.0102, +0.2082, +0.3499, -0.3268, +0.1190, +0.3183, +0.0166, +0.3925, +0.3659, -1.3411, +0.3606, -0.0859, +0.1691],\n[ -0.4373, +0.5870, -0.2470, -0.4369, -0.5017, -0.7323, -1.2199, -0.2451, -0.2447, -0.1360, +0.2145, -0.4427, -0.3696, +0.1239, +0.3161, -0.1466, +0.0734, -0.0463, +0.0277, -0.3268, -0.1238, +0.0240, +0.0479, -0.0531, -0.3103, +0.4964, +0.0494, -0.0860, -0.5420, -0.1102, -0.5141, +0.2544, +0.0141, +0.1847, -0.3437, +0.2420, -0.0345, -0.1352, -0.2411, +0.0601, -0.1543, +0.2220, -0.0458, -0.0342, +0.0031, +0.3370, -0.0459, -0.3162, +0.3164, -0.2459, -0.3363, +0.0468, -0.0645, +0.5785, -1.0524, +0.0431, -0.2120, -0.1114, +0.1165, -0.0978, +0.0010, +0.1229, +0.0559, -0.4454, +0.6569, -0.1128, +0.0485, -0.3288, -0.1035, -0.1959, -0.6063, +0.2157, -0.1107, -0.0526, -0.0856, -0.6510, +0.1339, +0.5464, -0.2989, +0.5216, -1.2263, -0.0108, +0.2021, -0.1379, +0.7368, -0.3330, -0.1118, +0.0816, -0.3946, -0.7703, +0.1186, +0.0387, -0.2798, -0.4680, -0.1312, +0.5733, +0.0175, -0.8109, -0.7925, +0.2221, -0.3326, +0.1711, -0.3284, +0.5274, -0.6371, +0.3497, -0.2010, +0.3155, -0.2545, -0.1489, +0.3425, +0.1253, +0.4334, -0.4114, +0.0696, +0.1568, -0.1666, +0.3577, +0.0306, -0.6268, -0.6966, -1.0236, -0.4172, -0.5622, -0.1003, -0.5679, +0.1524, -0.3371],\n[ -0.2767, +0.0972, -0.5547, -0.2201, -0.1170, -0.1576, -0.1602, +0.1382, -0.0808, +0.2028, -0.3989, +0.0158, -0.1357, -0.3024, -0.2257, -1.2166, -0.2502, -0.5230, -0.4460, -0.1411, -0.1846, -0.4805, -0.0032, -0.4232, -0.4742, -0.5456, -0.1574, -0.3298, +0.0333, -0.0440, -0.2283, +0.0253, -1.1936, +0.2652, -0.0474, -0.0169, -0.0632, +0.3591, -0.2793, +0.0314, -0.6320, -0.8212, -0.1199, -0.3018, +0.1836, -0.1475, -0.8715, -0.3799, +0.1702, -0.2543, -0.5776, -0.0772, +0.3332, +0.2767, -0.2859, +0.2075, -0.7332, +0.4793, -1.5267, +0.1474, -0.2003, +0.0032, -0.0866, -0.4733, -0.2157, -0.5489, -0.8860, +0.0229, -0.3014, +0.0358, -0.0763, -0.0744, +0.0927, -0.5832, +0.2188, +0.1248, -0.2539, -0.6919, -0.0873, -0.4554, +0.0821, +0.2124, +0.0322, -0.0390, -0.2129, +0.2774, +0.2383, -0.2737, -0.2540, -0.8198, +0.1893, -0.2351, -0.0728, +0.4741, -0.4950, -0.3812, +0.1700, -0.8968, +0.0391, +0.3027, -0.0014, +0.0654, -0.2479, +0.0851, +0.1058, +0.4011, -0.0536, -0.6038, -0.2781, -0.0519, +0.2117, -0.4242, -0.4415, -0.3171, +0.1510, -0.0039, -0.4738, -0.1233, -1.2393, -0.0213, -0.1457, +0.0515, -0.2666, +0.0735, +0.3065, -0.2219, -0.3103, +0.1352],\n[ -0.1894, -0.3178, -0.2146, -0.0917, -0.3281, +0.3759, -0.3827, +0.2162, -0.2736, -0.5178, +0.1408, -0.1238, -0.0214, -0.0124, -0.3949, -0.3133, -0.1027, +0.0348, +0.2293, -0.1680, -0.6537, -0.2278, -0.2112, +0.2586, +0.1426, +0.0794, +0.0601, +0.1368, -0.2424, +0.1392, +0.0391, +0.1776, +0.3126, -0.4101, +0.1359, +0.2845, +0.2675, +0.1771, -0.1543, -0.3432, +0.1683, +0.2882, -0.1131, -0.1933, +0.1945, -0.3527, -0.2665, -0.0031, -0.8195, +0.0226, +0.0603, -0.0813, +0.2884, -0.1688, -0.1183, +0.3030, +0.2942, +0.2613, +0.0640, +0.1961, +0.0728, -0.1439, -0.3157, -0.0657, +0.1147, +0.2127, -0.5807, -0.3566, +0.2828, -0.3506, +0.0159, -0.3167, +0.1337, +0.1766, +0.2469, -0.0833, +0.2147, -0.1919, +0.2639, -0.0558, -0.4039, +0.1532, -0.2222, +0.2294, -0.1847, +0.1543, +0.0267, +0.3783, +0.4722, +0.1427, +0.2415, +0.4090, -0.4329, -0.2182, -0.1861, +0.4997, +0.0788, -0.5247, +0.1604, -0.1814, -0.3566, +0.2227, -0.0276, -0.1325, -0.4941, -0.2409, +0.1634, +0.2456, +0.3919, +0.1564, -0.3376, -0.0926, -0.0169, +0.0950, -0.1314, +0.0283, -0.3400, -0.4327, +0.0396, -0.1139, -0.0001, +0.0763, -0.4060, +0.3790, -0.1646, -0.7353, +0.0369, -0.0515],\n[ -0.2086, -0.1809, -0.0925, -0.3050, -0.2023, -0.3849, +0.0063, +0.3838, -0.0679, -0.4047, +0.1106, -0.2605, -0.1714, -0.0836, +0.3509, -0.1618, -0.6817, -0.5073, +0.0951, +0.0166, -0.0730, -0.3489, +0.4669, +0.0451, +0.0663, +0.3284, -0.0380, -0.3944, -0.4299, -0.1957, -0.6752, +0.1119, -0.4006, +0.0351, +0.0977, +0.3906, +0.3201, -0.3789, +0.4969, +0.0857, -1.0318, -0.0316, +0.0653, +0.1053, -0.4342, -0.5321, +0.2690, +0.2056, +0.2295, -0.0461, -0.1716, -0.9352, +0.0156, +0.0124, -0.4358, -0.0842, -0.1229, +0.5391, -0.3714, -0.0735, -0.2581, -0.2032, -0.3048, -0.4434, -0.3753, +0.4077, +0.4080, +0.0238, +0.0651, +0.1965, +0.5052, -0.2532, +0.1327, -0.5991, -0.3155, +0.1131, -0.3228, -0.1098, -0.4889, -0.3557, +0.0582, -0.8156, +0.0351, +0.0854, +0.1539, -0.0416, +0.0878, +0.5755, -0.1741, +0.3042, +0.3049, -0.1249, -0.0155, -0.0495, -0.1473, -0.0890, +0.0657, -0.4408, -0.2135, +0.1564, +0.1787, -0.3268, -0.1219, +0.1648, +0.3581, -0.0800, -0.4765, +0.2241, -0.3060, +0.1448, -0.6599, +0.1232, -0.5561, +0.2646, -0.1117, -0.0416, -0.2256, -0.0567, +0.2988, -0.1756, -0.2153, -0.2089, -0.1916, -0.1445, +0.4029, +0.5686, -0.8428, -0.3141],\n[ +0.4546, -0.2401, -0.8765, +0.1447, -0.3474, +0.2069, +0.0291, -0.3461, -0.5166, -0.2007, +0.2375, +0.0309, +0.1591, +0.2316, -0.3278, +0.0879, -0.1175, +0.1829, -0.3574, -0.0183, +0.0943, -0.8221, -0.0635, +0.1241, +0.2995, -0.4990, -0.1297, -0.0750, +0.0564, +0.3013, -0.3528, -0.0187, -0.6252, -0.0760, -0.0297, -0.3231, +0.2837, +0.0155, -0.1604, -0.0188, -0.0737, +0.1051, +0.2268, -0.8192, +0.0854, -0.4684, -0.0896, -0.1865, -0.2881, +0.2112, +0.1710, +0.1251, -0.1999, -0.2006, +0.0150, +0.2386, -0.2525, -0.2776, +0.0791, -0.1655, +0.2221, -0.6359, -0.4413, +0.0507, +0.0245, -0.1480, +0.0307, -0.4909, -0.2057, -0.5612, -0.2967, +0.0754, +0.0988, -0.4456, +0.4071, -0.3079, -0.5466, +0.1572, -0.0346, -0.5972, -0.1862, +0.0957, -0.8404, +0.3361, -0.0397, +0.1259, -0.0430, +0.4487, -0.1305, -0.0816, -0.4018, +0.0301, +0.0845, -0.3938, +0.0034, -0.1890, +0.1008, -0.2038, -0.0580, +0.0037, -0.3044, -0.2627, +0.2022, +0.0591, -0.0465, -0.3806, +0.5580, +0.0273, +0.3072, -0.0389, -0.3435, -0.2277, +0.1378, -0.0317, -0.0693, +0.2321, +0.4751, -0.1210, +0.2986, +0.1641, +0.0845, +0.1088, -0.0719, +0.0558, -0.0499, +0.1624, +0.3028, -0.0444],\n[ +0.3694, -0.0619, -0.0750, -0.4331, -0.4578, -0.2562, -0.2689, +0.2755, -0.2460, -0.0051, -0.7781, -1.0708, +0.2076, +0.3512, +0.2633, -0.0265, -0.3566, -0.2862, -0.6983, -0.1114, +0.1958, +0.0863, -0.6004, +0.1575, +0.3171, -0.1933, +0.0496, +0.6272, +0.0932, +0.2021, +0.1328, -0.5198, -0.6713, -0.0838, -0.3921, -0.2568, -0.3613, -0.1204, +0.0601, -0.6830, +0.4960, -0.2160, -0.0535, -0.3319, -0.3293, +0.2971, +0.3285, -0.0228, +0.0649, -0.4740, +0.7016, -0.4892, -0.2840, +0.2251, -0.2327, -0.8171, +0.4519, -0.1815, -0.1825, -0.7546, -0.5589, -0.1238, -0.1216, +0.0837, -0.2976, -1.7269, -1.0251, +0.1344, +0.4895, +0.1124, +0.0124, -0.6756, -0.1620, +0.1031, -0.2471, -0.6356, +0.5671, +0.4402, -0.5158, +0.0154, -0.2029, +0.3559, +0.4752, +0.4763, -0.0111, -0.0330, -0.1440, -0.5386, -0.0265, -0.8968, +0.1154, -0.2805, -0.3890, -0.5239, -0.0655, -0.3710, -0.1921, -0.2713, +0.3029, -0.0201, +0.0902, +0.0504, -0.2419, +0.3148, +0.7004, +0.0841, +0.5304, -0.1880, +0.2689, +0.0060, -0.7380, +0.2010, -0.0502, -0.4943, -0.0261, +0.1489, -0.6708, -0.1913, -0.7632, +0.0502, -0.3367, +0.1165, +0.0408, +0.1340, -0.4637, -0.4536, -0.9485, -0.3013],\n[ -0.1725, -0.1858, +0.0331, +0.3456, -0.1368, +0.2509, +0.2902, -0.2707, +0.3620, -0.4504, +0.2074, -0.3254, -0.3116, +0.1972, -0.2880, -1.5750, +0.1065, +0.2296, -0.3981, -0.6005, -0.0860, -0.1587, +0.1138, -0.0057, +0.1169, +0.1022, +0.0771, -0.5353, +0.2160, +0.2135, +0.0991, -0.0715, -0.2361, -0.5277, -0.3601, +0.1271, -0.8232, -0.9594, -0.1746, +0.2128, -1.2460, +0.0310, -0.3488, +0.0794, -0.8006, -0.0880, +0.2133, +0.1811, -0.1658, -0.1150, -0.9967, +0.0063, +0.0494, -0.8586, -0.2293, -0.2535, +0.0954, +0.3826, -0.7305, -0.5031, -0.1241, -0.4392, -0.9065, -0.0479, -0.2143, -0.5246, -0.2355, -0.4192, -0.0020, -0.1630, +0.0590, -0.1096, -0.0354, +0.1383, -0.4181, -0.1156, -0.9471, -0.5348, -0.0057, -0.9101, -0.0550, -0.8399, +0.2349, +0.0682, +0.1741, +0.3622, -0.2623, -0.3513, +0.1216, -0.1540, +0.0167, +0.0928, +0.2289, -0.5268, -0.5032, -0.5759, -0.2395, +0.1103, -0.2771, -0.3320, -0.1327, +0.0540, -0.1549, +0.5291, -0.0706, +0.0489, -0.3762, +0.0153, -0.0167, -0.4286, -0.6423, +0.8254, +0.0893, -0.5137, -0.0482, -0.1103, +0.0620, -0.0734, -0.2348, -0.2156, -0.4396, +0.8179, -0.1603, -0.0299, -0.6343, -0.8946, -0.3538, -0.3519],\n[ -0.2354, -0.0994, -0.0970, -0.1210, -0.5758, +0.2825, +0.0853, -0.0817, +0.3374, +0.3253, -0.1718, +0.2518, -0.1108, -0.4627, +0.1909, +0.0746, -0.1913, -0.4916, -0.1558, -0.2477, +0.2525, -0.0559, +0.0598, +0.0704, -0.1778, +0.2072, -0.1712, -0.0083, -0.2989, -0.1084, +0.0435, -0.2145, +0.0225, +0.6034, +0.1502, +0.1591, -0.3021, -1.0728, -0.5351, +0.0799, +0.1981, -0.0034, +0.6635, -0.2042, -0.0471, +0.2838, +0.1760, -0.4701, +0.4796, +0.1516, +0.0430, +0.6459, +0.0252, -0.3035, -0.6315, -0.1916, -0.2847, -0.6922, +0.2426, -0.2787, +0.1770, +0.1493, -0.4729, +0.3312, -0.8047, -0.3841, +0.4028, -0.1849, +0.0628, +0.1960, +0.1757, -0.1554, -0.1836, -0.2858, +0.2676, +0.2611, +0.0566, -0.9536, -0.0094, +0.0276, -0.0257, -0.1097, +0.0644, +0.1793, +0.0124, +0.0730, -0.1807, +0.0420, -0.2505, +0.2622, +0.1184, -0.5243, +0.2392, -0.2308, +0.1152, -0.2640, -0.3274, -0.0795, +0.3790, +0.3166, +0.0136, -0.4956, -0.1574, +0.2242, -0.0071, +0.1597, -0.1925, -0.3563, -0.0431, +0.4689, -0.5352, +0.1372, +0.0079, -0.4182, +0.0542, +0.0660, +0.1484, -0.0518, +0.3963, -0.1329, -0.9245, -0.2170, +0.8141, -0.2753, +0.1995, -0.8927, +0.2048, -0.9033],\n[ +0.1385, +0.2524, +0.2341, -0.0525, +0.0286, -0.1349, +0.1474, +0.1847, -0.0895, -0.2732, -0.0785, -0.3748, +0.4309, -0.1217, -0.1987, +0.1887, -0.1932, +0.0054, +0.0628, +0.3407, +0.3116, -0.1444, +0.0419, -0.1252, +0.2750, +0.1297, +0.0635, -0.2700, +0.3252, -0.4535, +0.1030, +0.0394, -0.4224, -0.1120, -0.3679, +0.0162, -0.2485, -0.2971, -0.0216, +0.1285, -0.2827, +0.0020, -0.0438, +0.0292, +0.1771, +0.1373, -0.0630, -0.4031, -0.1995, -0.0976, +0.1109, +0.2382, -0.1350, -0.0708, -0.3023, +0.0379, +0.2145, -0.2867, -0.5431, -0.0735, -0.3522, -0.4424, -0.4257, -0.7687, -0.1674, +0.3218, -0.2861, -0.5267, -0.1729, -0.4813, -0.4816, -0.4595, -0.2712, -0.2190, +0.2677, -0.1222, -0.3066, -0.1060, +0.2256, -0.0821, +0.0364, -0.5062, +0.0821, +0.1147, +0.1934, -0.4142, -0.1493, +0.2036, -0.3093, -0.0189, -0.2693, +0.2322, +0.1105, -0.3197, +0.2832, -0.1993, -0.2103, -0.6401, -0.2555, +0.1762, +0.1250, -0.1426, +0.0233, -0.0284, -0.0694, +0.2206, -0.0664, +0.3228, +0.1130, -0.2081, +0.1334, +0.0279, -0.2022, +0.3889, +0.1545, -0.3145, -0.6417, -0.2156, -0.0909, +0.1342, -0.0219, -0.5034, -0.4547, +0.0030, +0.2268, -0.6860, +0.0075, +0.1278],\n[ -0.2374, +0.0805, +0.2663, +0.1124, +0.0258, -0.3193, +0.3090, -0.0598, +0.3276, +0.0092, -0.5305, -0.0264, -0.1383, -0.3224, -0.0795, -1.0162, -0.1734, +0.0706, +0.3049, -0.4068, +0.1737, -0.7058, +0.2571, +0.1217, -0.2815, -0.5398, +0.1680, -0.3644, -0.5849, +0.1535, -0.2410, -0.0299, +0.4319, -0.4040, +0.1864, -0.4186, +0.1141, -0.0188, -0.8473, -0.4871, +0.0764, -0.6763, -0.4378, -0.3592, +0.1187, +0.4060, -0.2282, -0.0395, +0.1306, -0.0833, -0.3435, +0.0764, -0.5312, -0.0791, +0.1757, +0.0578, -0.5238, -0.0140, -0.5122, -0.2480, -0.7816, -0.6827, -0.5802, +0.1869, +0.2833, +0.2551, -0.6782, -0.1467, -0.3754, -0.0426, +0.5844, -0.3302, -0.5216, -0.1696, +0.3585, +0.1013, -0.0017, +0.1690, -0.0158, +0.1443, -0.0728, -0.2636, +0.0779, -0.3160, +0.0778, -0.0808, +0.0080, -0.0362, -0.2650, +0.0755, -0.0979, +0.3609, -0.3260, +0.0995, -0.3250, -0.4137, -0.2061, -0.5269, -0.1452, +0.2401, -0.2095, +0.0808, -0.2633, +0.0889, +0.2723, -0.3161, +0.1089, +0.2698, -0.3279, +0.2626, -0.0076, -0.2605, +0.2754, +0.1855, +0.4763, -0.1059, +0.0752, +0.0249, +0.4125, -0.4099, -0.5385, -0.0468, +0.1375, +0.1461, -0.1498, +0.0242, -0.6608, +0.2718],\n[ +0.3181, -0.0578, +0.2270, +0.2960, +0.3759, -0.0994, -0.0301, +0.0580, +0.0580, -0.1176, -0.0211, -0.1902, +0.0512, -0.2255, -0.8401, +0.0308, -0.1803, +0.0669, -0.0963, +0.4547, -0.1982, -0.0926, +0.4024, +0.3177, -0.3376, -0.2049, +0.0444, -0.3649, +0.2239, -0.0188, -0.1576, +0.0524, +0.3666, -0.1009, +0.0668, +0.0819, -0.1313, +0.4450, -0.6085, -0.3848, -0.6414, +0.2856, -0.1182, +0.0912, +0.0397, -0.5215, +0.3353, -0.2102, -0.5109, +0.0882, +0.1674, +0.0885, -0.3224, +0.0901, +0.3159, +0.4244, -0.4800, +0.1526, +0.0061, +0.1857, -0.0979, -0.2406, +0.0730, +0.1405, -0.0050, +0.1824, -0.0160, -0.2087, -0.1628, -0.2413, +0.2396, +0.6054, -0.6268, -0.0734, -0.1073, -0.1242, +0.2214, -0.4174, -0.5380, +0.4581, +0.1509, -0.7234, -0.9114, -0.0332, -0.2386, -0.0100, -0.2586, -0.2640, +0.0461, +0.1833, +0.1354, -0.3236, +0.1699, +0.1157, -0.0966, -0.5738, +0.1736, -0.4470, -0.5721, +0.3542, +0.4103, -0.3574, +0.4307, +0.0229, -0.1325, +0.3490, -0.0651, +0.1874, -0.2291, +0.0530, +0.3738, +0.1955, +0.1027, +0.1591, +0.2077, +0.2210, +0.1001, -0.3243, +0.2351, -0.3068, -0.1417, -0.0853, -0.3813, -0.3430, +0.1111, -0.3707, -0.4524, +0.3608],\n[ +0.0381, -0.3280, -1.5348, -0.2310, +0.1609, +0.2576, -0.2199, +0.1194, +0.1368, +0.5134, -0.6136, +0.5420, -0.0032, +0.0184, +0.0213, -0.6279, -0.6801, +0.2122, -0.1931, +0.0708, -0.4318, -0.1370, -0.2482, -0.0169, -0.4338, -0.1085, +0.6117, -0.1325, -0.1302, +0.4041, +0.3929, -0.0002, +0.1998, +0.0515, -0.3162, +0.4465, -0.1745, -0.3913, -0.5087, -0.1100, -0.2249, -0.2461, -0.2671, +0.1915, +0.0139, +0.0838, -0.3395, -0.1856, +0.2210, -0.0315, +0.2527, -0.0433, -0.1347, +0.2930, -0.2082, +0.2286, +0.0255, -0.2495, -0.3158, -0.3498, +0.3091, -0.0525, -0.1256, -0.3423, -0.0553, -0.0529, +0.1197, -0.7197, +0.7501, +0.0290, -0.0370, -0.6256, +0.0000, +0.0983, -0.4345, +0.0663, +0.1609, +0.0146, +0.2122, +0.1216, +0.1759, +0.0309, -0.4094, +0.2527, +0.0841, +0.4982, -0.1519, -0.1223, +0.0607, -0.1248, +0.2532, +0.1901, +0.1263, -0.2375, -0.1312, -0.2293, -0.2532, +0.5176, -0.2599, -0.0855, -0.1362, -0.5210, +0.0224, +0.3870, +0.2114, +0.3244, +0.4654, -0.7012, -0.0922, -0.9771, -0.2505, +0.5230, -0.5313, -0.5063, -0.0315, -0.5956, +0.0800, -0.0199, -0.1976, -0.9358, -0.4071, -0.1917, +0.1467, -0.2294, -0.1170, +0.0695, +0.1403, -0.4278],\n[ +0.1029, +0.1445, +0.3256, -0.0877, +0.5028, -0.2229, -0.0061, +0.0184, -0.3173, -0.1645, +0.6963, -0.0568, -0.6695, -0.3263, -0.1246, -0.6652, -0.0307, -0.2701, -0.3961, +0.3665, -1.0864, +0.1994, +0.3494, +0.1447, +0.2856, +0.1528, +0.2521, +0.0676, -0.0895, -0.1963, +0.1029, -0.0675, +0.0657, -0.0773, -0.4179, +0.1854, -0.9043, -0.3480, +0.0186, -0.0853, -0.3580, -0.6232, +0.1994, -0.3973, +0.1558, -0.2424, -0.6349, +0.2492, +0.0654, -0.4719, -0.6174, +0.2362, -0.4908, -0.0194, -0.0402, -0.0680, -0.0284, -0.0382, -0.3183, +0.0208, -0.7145, +0.0223, +0.0710, -0.5593, +0.3360, -0.0591, -0.3271, -0.2059, +0.4912, +0.3903, -0.0288, +0.5235, +0.3245, +0.0820, -0.1104, +0.0020, -0.2973, -0.7333, +0.1925, -0.1251, +0.1177, +0.4494, +0.1400, -0.5209, -0.1851, +0.0925, +0.1926, +0.3427, -0.2087, +0.1852, -0.5303, -0.7136, -0.2598, -0.3725, +0.3738, +0.6966, -0.7630, -0.4744, +0.1341, -0.0536, -0.2411, +0.4594, +0.1060, +0.2383, -0.1521, -0.0214, -0.0980, +0.0557, +0.4497, +0.1257, +0.0278, -0.0656, -0.5980, -0.0110, +0.0955, +0.4506, -0.4515, +0.1015, +0.1491, +0.0256, +0.0409, -0.2116, -0.1528, +0.1721, +0.4331, -0.5740, -0.1334, -0.1490],\n[ +0.2384, +0.1767, +0.1947, +0.0587, +0.0436, -0.4647, +0.0460, -0.3978, +0.3851, -0.6429, -0.0757, +0.0771, +0.3342, -0.5142, -0.0504, -0.0614, +0.1761, +0.2849, -0.0273, -0.2299, -0.0279, -0.1199, -0.2008, +0.3238, +0.1584, +0.2584, +0.0005, -0.0247, +0.1486, -0.0961, -0.2141, +0.0457, +0.2077, -0.4971, +0.2886, -0.0251, +0.2316, -0.0343, +0.2933, +0.2489, +0.1717, +0.3198, +0.1717, -0.0490, +0.0506, -0.7074, -0.2308, -0.2186, +0.1620, +0.0328, +0.1614, +0.1332, -0.0487, -0.3712, -0.2751, -0.1583, +0.2474, +0.0419, -0.4341, -0.2045, +0.1139, -0.0188, -0.3290, -0.0657, +0.0096, -0.2058, -0.4276, -0.1128, -0.2121, +0.3103, -0.1906, +0.0304, +0.1177, -0.0511, +0.1057, -0.7063, +0.3458, -0.2067, -0.0213, +0.1076, +0.0151, -0.1120, -0.2189, +0.1475, -0.0077, -0.3843, -0.2418, -0.6991, +0.1953, -0.4116, -0.1400, +0.1521, -0.5215, -0.1150, -0.0251, +0.0277, -0.0684, -0.0070, +0.2457, +0.2435, -0.0386, -0.0929, -0.0177, -0.2300, +0.0347, +0.1269, -0.1599, +0.2427, -0.3234, +0.0156, -0.2406, -0.1611, -0.2369, +0.1289, -0.1268, -0.1186, -0.2554, +0.1911, +0.3922, -0.7103, -0.2174, -0.3823, +0.1010, +0.1203, +0.4065, -0.1545, +0.3333, -0.1270],\n[ +0.0320, +0.3374, -0.2328, -0.5468, +0.3057, +0.0867, +0.1920, +0.3238, +0.0958, +0.0865, -0.0978, -0.9149, +0.3992, -0.7304, -0.7107, +0.1157, +0.3527, +0.1362, -0.1631, -0.1291, -0.1038, +0.0851, +0.2969, -0.1787, -0.1327, -0.0016, -0.3977, -0.2473, -0.1113, -0.3001, +0.5036, -0.0856, +0.1597, -1.0896, +0.3766, +0.1266, +0.0147, -0.7126, +0.2683, -0.3971, -0.2376, +0.1728, +0.0654, +0.0889, -0.2988, +0.0767, +0.1215, -0.4817, -0.0649, -0.0350, -0.6930, -0.2363, -1.2543, -0.0226, -0.2209, +0.0390, +0.2717, -0.1385, -0.3073, -0.1550, -0.5586, -0.4812, +0.2852, -0.0278, -0.2483, +0.0254, +0.4223, -0.1026, -0.3841, -0.1561, +0.2930, +0.3772, -0.0625, -0.0882, +0.2467, +0.1206, -0.0632, +0.2479, -0.1358, +0.0352, -0.0730, +0.1995, +0.1430, +0.0082, -0.1685, -1.1753, -0.7206, -0.2929, -0.0186, -0.2444, +0.4176, -0.7420, -0.0811, +0.1876, -0.1151, +0.1599, -0.1971, -0.5971, -0.1488, +0.1225, +0.0460, -0.0519, -0.1219, -0.1250, -0.1093, -0.2462, -0.2120, +0.1604, +0.1361, +0.2755, +0.0633, +0.2230, +0.6084, +0.2955, +0.1139, +0.0836, -0.5077, +0.2327, +0.0441, -0.7536, -0.2307, +0.0111, +0.2659, -0.3169, -0.3589, -0.0047, -0.0293, -0.1917],\n[ +0.1849, +0.4786, +0.1204, -0.3298, +0.2282, +0.3278, -0.1501, -0.0854, -0.0439, -0.2816, -0.5327, +0.3156, +0.3149, -0.0344, -0.1886, -0.4860, -0.1034, +0.0903, +0.0569, +0.1075, +0.0721, +0.1677, -0.0501, +0.3348, -0.2958, -0.5643, -0.5416, -0.0616, +0.0486, -0.2416, +0.0556, -0.1499, -0.1884, +0.0063, -0.0879, +0.2615, +0.2102, -0.0322, +0.5728, -0.7324, -0.6729, -0.1240, -0.0435, -0.2956, +0.3191, -0.2177, -0.0368, -0.0261, -0.0666, -1.2822, -0.0419, -0.2362, -0.0318, +0.3002, +0.0324, -0.0798, +0.2130, +0.1350, +0.0346, +0.2781, +0.3518, +0.3837, -0.2617, -0.9864, -0.1951, +0.0896, +0.1452, -0.0149, -0.4673, -0.2097, +0.2540, -0.3603, -0.2436, -0.5239, +0.0675, -0.2038, -0.4511, -0.9040, +0.2373, +0.2649, -0.7154, -0.0777, -0.2655, +0.0074, -0.6301, +0.3651, -0.2757, +0.0316, -0.1454, -0.0407, -0.5047, -0.4833, -0.3325, +0.1745, +0.2487, +0.3029, +0.5754, +0.1331, +0.2339, -0.5040, -0.7949, +0.3252, -0.3651, -0.0125, -0.1580, +0.0285, -0.0523, -0.2012, -0.0764, -0.1872, +0.4235, -0.7683, +0.5005, +0.1516, -0.1920, +0.3039, -0.3789, -0.1149, -0.4491, +0.1532, -0.0276, -0.5430, -0.1058, +0.0728, -0.0873, -0.0693, -0.2627, -0.4874],\n[ +0.1766, +0.2027, +0.2184, -0.2155, +0.2002, -0.0767, +0.0247, +0.0769, +0.1097, -0.5324, +0.0716, -0.6547, +0.1549, -0.3553, -0.1583, -0.6442, +0.2656, -0.0242, -0.2430, +0.2241, +0.1981, +0.1088, +0.0831, -0.5510, +0.1084, -0.6368, +0.1111, -0.5365, -0.3738, +0.3743, -0.1497, +0.1148, -0.3921, +0.0518, -0.0321, -0.2311, +0.2755, -0.7492, -0.4376, -0.2182, -0.6939, -0.8483, -0.2843, -0.1963, -0.2650, -0.1096, -0.1861, +0.0298, +0.0037, +0.1153, -0.3358, -0.3163, +0.0478, -0.3209, -0.0710, +0.3083, -0.2630, -0.1924, -0.1803, -0.1369, -0.3061, -0.1886, -0.0521, -0.0039, -0.4548, -0.3571, +0.1289, +0.0565, -0.3850, -0.1660, +0.3697, +0.0093, +0.1096, -0.7151, -0.5122, -0.1048, -0.0407, +0.1807, -0.2780, -1.1252, -0.0171, -0.0800, -0.1387, -0.5066, -0.1341, -0.4523, +0.2285, +0.1949, +0.2985, -0.3676, -0.0402, +0.3106, -0.1804, -0.1138, -0.1518, +0.0386, -0.1846, +0.4102, +0.1630, -0.3054, +0.1310, -0.0050, +0.1931, +0.2023, -0.4601, -0.4869, -0.1454, -0.7915, -0.8167, -0.1141, -0.0829, -0.5105, -0.2998, +0.1506, +0.1659, -0.0161, +0.1262, -0.2265, -0.0906, -0.1643, -0.0997, -0.7213, -0.4537, -0.1466, -0.3031, -0.1776, -0.3410, +0.3465],\n[ -0.0680, -0.5316, -0.5061, -0.0773, -0.3060, -0.2216, -0.6029, -0.1825, -0.1359, +0.2300, +0.0623, +0.0920, +0.0022, -0.1120, -0.1106, -1.1256, +0.0279, -0.3801, +0.0112, +0.1688, +0.1709, -0.5387, -0.0696, -0.3568, -0.4497, +0.4913, -0.0399, -1.3545, -0.2932, +0.1045, +0.3458, -0.4757, -0.0703, -0.1544, -0.6656, +0.3997, -0.0279, +0.2120, -1.2224, -0.6230, -0.4677, -0.1049, +0.2119, -0.1797, -0.4644, -0.1679, +0.2749, -0.1451, -0.4456, +0.3104, -0.4102, +0.3043, +0.5440, -0.1511, +0.1171, +0.3151, -0.0670, -0.1808, -0.5695, -0.5017, +0.0862, -0.3984, +0.0258, -0.5704, +0.1754, -0.0489, +0.0302, +0.4248, -0.1910, -0.5007, -0.2559, -0.0414, -0.1863, +0.1481, -0.0454, -0.3999, +0.0606, +0.1871, +0.1180, -0.0014, -0.4116, -0.8299, +0.6789, +0.0242, -0.2057, -0.3620, +0.0898, +0.4069, +0.0505, +0.1478, -0.2953, -0.1294, +0.3536, -0.2027, -0.1965, +0.2124, -0.0305, -0.1242, +0.3511, +0.0112, -0.1523, -0.2483, +0.1767, +0.0946, -0.0883, -0.2642, -0.1009, -0.1412, -0.1936, -0.0337, -0.0854, +0.1496, +0.1209, +0.1613, -0.2123, -0.5184, -0.5626, -0.1087, -0.1012, -0.6158, -0.7041, -0.4685, -0.8154, +0.2760, -0.2103, -0.1968, +0.0849, -0.3954],\n[ +0.2532, +0.4208, -0.3291, -0.5551, -0.1859, -0.1518, -0.2786, +0.0658, -0.6929, -0.2819, -0.2395, +0.1718, +0.0343, +0.0182, +0.0734, -0.8299, -0.2525, +0.1285, +0.0562, -0.1593, -0.2454, -0.1303, -0.2009, -0.2577, +0.2121, -0.4224, -0.4803, -0.2205, +0.4951, -0.0603, -0.0738, -0.0561, +0.2528, -0.7239, +0.2362, -0.1281, -0.2817, -0.0915, -0.3098, -0.3482, -0.1657, +0.1685, +0.0365, -0.1210, -0.4351, +0.1768, -0.2815, -0.2552, -0.8653, +0.2226, +0.4609, -0.5362, -0.1000, -0.5827, -0.6283, -0.1828, -0.5187, -0.4818, -0.2862, -0.3698, -0.4264, +0.5897, +0.3011, +0.1451, +0.1300, -0.6738, +0.1766, +0.4159, -0.1141, +0.4150, -0.2877, -0.4758, +0.0208, +0.0290, -0.1260, -0.2702, -0.7108, -0.0338, +0.2733, +0.0843, -0.0556, -0.0368, +0.0105, -0.3436, +0.0248, -0.1892, -0.2123, -0.0928, +0.2068, -0.3971, +0.1089, +0.3620, -0.1929, -0.5377, -0.1463, -0.1753, -0.1031, -0.2935, +0.4052, +0.0367, +0.0097, -0.5875, +0.1436, -0.4216, -0.0872, -0.2351, -0.3716, -0.1434, -1.0513, +0.2828, -0.1845, +0.1664, -0.1173, +0.2712, +0.0878, -0.7084, -0.6339, -0.4937, +0.1035, -0.2160, -0.4060, -0.2531, -0.6691, +0.1202, +0.2531, -0.0490, +0.5874, +0.0633],\n[ -0.6739, +0.0277, -0.9564, -0.5067, -0.5287, +0.0684, -0.5332, -0.2934, +0.2182, +0.1192, -0.4239, -0.3156, -0.0208, +0.4953, -0.3716, -0.3657, -0.4834, -0.1870, -0.1509, -0.3200, -0.0222, +0.0546, +0.3764, +0.5946, -0.4095, +0.2046, -0.2208, +0.1072, -0.4266, -0.7344, -0.1398, -0.3482, +0.1835, +0.0850, -0.2169, -0.1673, +0.1917, -0.0598, -0.0835, +0.3213, -0.4629, +0.0055, -0.0855, -0.0968, -0.6663, -0.4341, -0.3010, -0.5688, +0.2849, +0.1352, -0.1461, +0.0679, +0.1243, +0.0837, +0.2299, +0.3890, +0.3232, +0.8568, -0.3093, +0.1023, -0.1423, -0.0950, +0.3483, -0.0398, -0.8729, +0.0589, -0.3755, -0.1726, -0.2203, -0.3867, -0.0227, -1.0963, -0.0057, +0.0516, -0.2886, +0.1596, +0.2880, +0.1165, +0.0612, +0.2892, -0.6804, +0.2979, +0.1742, -0.0116, +0.2316, -0.0595, +0.0568, -0.5258, -0.2719, -0.4404, +0.1292, +0.1016, +0.7038, -0.0262, +0.5274, -0.0229, +0.0430, +0.2201, +0.1085, -0.0375, +0.1192, -0.0778, +0.0535, -0.5996, +0.3843, -0.5089, +0.0677, -0.2134, -0.5909, +0.0015, -0.7516, +0.0565, +0.0375, -0.5614, -0.1333, +0.2093, -0.0427, -0.4205, -1.4405, +0.3511, -0.1710, -0.5402, -0.2186, -0.1229, -0.5740, -0.1826, -0.0947, -0.2646],\n[ -0.4790, -0.0459, -0.7397, -0.1887, -0.0117, -0.0881, -0.2339, +0.4489, -0.2226, -0.2678, -0.4339, -0.5233, -0.4704, -0.2454, -0.6791, +0.5416, +0.1632, +0.0939, +0.2976, +0.4227, -0.4494, -0.5834, -0.0442, +0.0945, -0.4829, -0.1668, +0.1462, -0.0841, +0.0265, -0.5589, +0.1268, +0.6885, +0.1927, -0.3751, +0.1673, +0.2637, +0.1409, -0.2660, +0.0257, -0.0269, -0.2051, +0.4255, -0.1496, +0.1084, -0.0217, -0.2359, -0.1428, -0.1115, -0.7085, -0.6233, -0.0985, -0.1075, -0.1553, -0.2027, +0.1923, +0.0471, +0.2331, -0.7869, -0.1220, +0.1538, +0.1171, -0.1364, +0.2306, -0.3217, -1.2153, -0.2076, -0.1103, -0.3231, -0.6678, -0.4851, -0.1963, -0.6715, -0.6025, +0.2298, -0.4343, -0.1760, -0.4037, -0.2442, +0.2234, -0.1880, +0.2596, -0.1924, -1.0538, +0.1892, -0.1999, -0.0033, -0.0755, +0.0571, -0.3725, +0.0816, -0.3538, -0.3673, -0.1607, -0.7918, +0.1611, +0.4097, -0.2851, +0.3155, +0.2837, -0.7129, +0.2556, -0.7155, -0.2264, -0.0045, +0.2039, -0.0958, -0.3221, +0.1626, -0.3246, -0.3097, -0.1555, -0.0424, -0.0175, +0.2186, -0.0899, -0.3513, -0.3811, -0.4246, -0.1244, -0.1209, +0.1675, -0.5267, -0.0279, +0.4418, +0.0478, +0.1246, +0.0393, -0.4219],\n[ -0.3571, +0.0904, -0.8551, +0.4132, -0.0919, +0.3857, +0.2689, +0.2051, +0.1931, -0.3377, -0.3208, -0.0723, -0.1699, +0.0462, -0.1263, +0.0483, +0.1950, -0.1277, +0.0313, +0.2242, +0.0103, -0.2061, +0.0631, +0.1930, -0.0647, -0.4123, +0.1690, +0.1474, +0.1182, -0.5403, +0.3640, +0.0169, +0.3996, +0.2360, +0.2264, +0.0977, -0.4711, +0.3267, -0.2800, -0.4045, -0.0261, -0.0814, -0.1084, +0.0173, +0.0070, -0.1826, +0.1416, -0.3700, +0.1904, +0.3461, -0.8709, +0.2058, -0.0601, -0.1735, -0.1307, -0.0786, -0.0667, +0.0989, -0.1120, +0.5435, +0.2793, +0.3009, -0.1373, +0.1923, +0.3372, -0.0255, -0.4204, +0.0504, +0.2350, +0.1833, -0.0786, -0.2449, -0.0600, -0.1826, -0.6286, -0.0817, -0.2410, +0.1011, +0.2879, -0.7720, +0.0685, -0.5338, -1.0387, -0.2374, +0.0941, +0.2234, +0.1887, -0.6712, +0.0789, +0.0920, -0.1733, +0.2348, +0.3533, +0.5173, -0.2887, +0.1298, -1.0297, -0.0811, +0.3376, +0.4740, -0.3197, -0.1907, -0.1645, -0.5704, -0.1747, +0.1463, +0.3165, +0.3489, -0.0581, -0.2501, +0.1131, -0.1146, -0.2154, -0.1804, -0.2339, -0.2814, -0.5182, -0.3433, +0.3677, +0.2729, -0.1904, -0.0585, -0.7208, +0.3472, -0.0146, +0.2520, -0.2723, -0.5874],\n[ -0.5168, -0.1916, +0.0044, -0.2001, -0.4914, -0.2367, +0.1037, -0.3593, -0.2739, +0.0966, -0.4070, -0.6904, -0.6564, +0.0903, -0.2040, +0.4127, -0.4820, +0.1590, +0.1689, -0.6444, +0.2427, +0.0580, -0.2541, -0.0060, +0.1797, -0.7206, +0.0552, -0.2466, +0.0206, +0.4403, +0.2936, -0.4458, +0.1578, -0.2735, +0.1138, +0.3035, +0.3536, -0.0996, -0.1689, -0.0803, +0.0302, -0.0041, +0.2768, +0.0295, -0.5365, -0.1148, +0.1867, +0.4169, +0.1963, -0.1607, +0.4705, -1.0241, +0.4417, +0.1275, -0.7696, -0.2660, +0.0380, -0.2972, +0.3949, +0.7762, -0.3281, +0.0599, +0.3288, -0.4384, -0.1356, -0.1655, +0.0742, -0.2041, -0.1239, -0.1804, +0.2637, +0.0942, -0.1011, -0.2973, -0.4499, +0.0090, +0.1412, -0.2019, +0.0945, +0.1347, -0.1332, -0.3886, +0.1739, +0.1864, +0.0391, -0.0350, +0.3963, -0.2367, +0.0131, -0.5565, -0.2354, +0.3302, +0.1141, -0.2861, -0.4460, +0.4304, -0.1393, -0.4753, -0.1158, -1.3481, +0.0292, -0.1015, -0.1453, +0.2802, +0.0700, -0.3325, +0.0110, +0.2497, +0.0056, +0.3542, -0.2658, +0.2565, +0.1598, -0.0339, -0.3039, -0.2208, -0.2119, -0.0196, +0.2157, -0.3472, -0.3380, -0.0314, -0.3920, -0.3210, +0.3201, -0.8747, +0.1812, +0.0732],\n[ -0.5735, -0.1147, -0.1845, -0.0670, -0.3959, +0.0605, +0.0751, -1.3236, -0.2729, -0.0563, +0.0870, -0.2487, -0.2976, -0.1418, -0.0036, -0.3945, +0.2133, +0.1156, +0.0328, -0.2110, -0.0954, +0.0828, +0.0342, +0.2200, -0.0155, -0.2676, +0.1296, +0.2238, +0.3469, -0.4331, +0.3707, -0.0087, +0.2342, -0.1123, -0.2393, +0.0042, -0.7079, -0.5531, -0.2589, -0.1329, -0.2473, -0.0557, -0.1092, -0.1055, +0.3648, +0.0103, -0.2566, +0.0428, +0.3451, -0.1363, -0.5322, -0.7636, -0.3877, +0.2084, +0.0500, -0.9000, +0.0186, -0.6299, -0.6176, +0.2841, -0.6676, +0.1776, -0.3414, -0.3621, -0.4323, -0.8091, +0.1711, -0.5524, -0.1845, -0.1440, +0.2521, +0.3597, -0.2215, -0.0636, -0.1948, -0.0981, -0.0632, +0.3646, -0.7443, -0.2697, +0.0858, -0.1100, -0.2158, +0.2193, -0.1480, -0.7157, +0.0280, +0.1051, -0.2165, -0.0942, -0.0988, -0.0529, +0.1711, +0.5135, -0.4368, +0.2158, -0.1203, +0.2175, +0.0274, -0.1588, +0.0862, -0.2315, +0.2650, -0.4179, +0.0053, +0.2025, +0.1570, -0.4194, +0.3459, -0.6148, -0.0766, -0.5666, +0.1655, -0.1656, +0.0047, +0.1534, +0.0364, -0.1854, -0.0359, -0.1716, -0.3132, -0.1275, +0.3301, +0.0841, +0.0211, -0.1663, -0.1611, +0.3612],\n[ -0.3046, +0.1990, +0.3593, -0.0441, +0.0111, -0.3952, +0.0887, +0.2970, +0.0432, -0.3783, -0.1590, -0.1455, +0.0672, +0.3143, -0.4494, -0.2105, -0.2141, -0.5621, -0.5350, +0.1795, +0.0950, -0.6389, -0.1119, -0.3228, -0.1351, -0.0998, +0.1916, -0.6883, -0.0802, -0.2211, -0.4179, -0.4322, -0.1921, +0.2114, -0.3894, +0.3510, -0.8963, -0.2446, -0.3355, -0.2554, +0.3715, -0.0832, +0.3089, +0.1687, -0.4003, +0.2255, +0.0172, -0.0353, -0.3352, +0.1071, -1.2214, -0.2593, +0.1820, -0.3545, +0.0520, +0.0625, -0.3255, +0.0246, -0.9975, +0.5751, -0.1415, +0.1819, -0.1992, +0.0300, -0.1407, -0.6492, +0.0380, -0.1231, -0.0581, +0.2151, +0.2173, +0.0064, -0.1583, +0.0302, +0.0198, -0.1276, -0.3126, -0.2499, -0.7244, -0.5974, -0.0122, -0.3023, -0.3659, +0.0236, -0.8076, -0.0052, -0.4533, +0.1279, +0.1329, -0.4658, -0.5219, -0.0994, -0.3155, -0.2044, -1.4585, -0.4960, +0.6144, -0.2893, +0.0656, -0.3623, +0.0771, -0.0557, -0.3057, -0.0149, -0.1504, +0.1180, -0.0191, -0.2747, +0.1425, -0.5355, -0.2589, -1.0663, -0.0238, +0.2357, -0.0646, -0.8185, -0.0544, +0.0328, -0.0684, -0.0288, -0.6393, +0.0760, -0.3438, -0.4502, -0.1043, +0.2516, +0.2664, -0.2895],\n[ +0.0107, -0.3197, +0.2582, +0.1588, -0.7646, -0.7069, -0.4099, +0.1225, +0.2743, -0.3283, -0.0027, +0.1298, -0.1093, +0.4504, -0.1028, -0.5416, -0.2146, +0.2392, -0.0145, +0.1768, +0.0453, +0.1953, -0.8660, -0.5489, +0.0746, +0.3120, +0.1501, +0.0138, -0.7171, +0.1558, -0.8054, +0.0637, -0.3851, -0.4554, -0.0707, +0.0844, +0.1226, +0.0906, -0.6054, +0.5808, +0.0931, -0.4798, -0.1459, -0.3292, +0.1951, -0.2652, -0.2279, -0.5004, +0.7729, -0.2178, -0.6389, -0.2515, +0.1850, -0.6675, -0.0282, -0.3199, +0.1368, +0.3026, -0.4689, -0.0329, +0.2913, +0.0254, -0.3636, -0.1038, +0.1523, +0.1738, -0.1826, +0.0448, -0.0613, -0.0619, +0.0614, -0.1873, -0.3742, +0.0553, -0.1018, +0.0197, +0.1035, -0.2413, -0.4748, +0.2443, -0.1595, +0.1139, -0.2503, +0.3971, -0.5707, -0.6110, +0.3676, -0.6585, -0.0983, -0.8527, +0.5715, -0.5112, +0.0883, -0.1990, -0.1421, +0.5473, -0.0631, -0.3381, -0.0705, +0.2045, -0.3428, -0.3719, +0.1965, -0.4464, -0.2206, -0.0407, -0.3212, -0.0742, -0.0010, -0.0920, +0.0179, -0.1233, -0.4241, +0.0697, +0.0483, -0.4507, -0.7664, -0.4521, -0.2175, -0.3721, -0.3304, -0.2537, +0.1789, -0.2410, -0.0701, +0.3168, -0.2336, -0.2128],\n[ +0.1155, -0.0102, +0.4249, -0.0738, -0.5340, +0.0663, -0.0927, +0.1452, -0.3015, -0.2405, -0.6384, +0.0373, -0.0334, +0.1594, -0.0984, -0.3201, +0.3151, -0.3141, -0.0241, -0.2851, +0.0472, +0.5239, +0.2989, +0.0661, +0.0884, +0.1384, +0.1193, +0.3296, -0.3185, +0.2506, +0.1458, +0.1626, +0.2309, -0.3974, -0.0698, -0.1044, +0.0031, -0.5501, +0.0439, -0.7990, +0.2289, -0.1844, +0.3783, -0.4319, +0.1479, -0.1089, -0.0206, +0.0524, +0.2799, +0.1135, -0.2630, -0.3034, -0.0279, -0.3430, -0.3194, +0.2728, -0.2198, +0.0995, -0.5879, -0.0968, -0.6351, -0.2687, +0.2761, -0.1994, +0.1186, +0.0766, -0.0157, -0.4284, +0.4550, +0.0266, +0.1227, +0.1949, +0.0795, -0.1303, -0.0641, -0.2911, -0.2093, +0.1755, +0.3525, +0.2074, +0.0776, +0.0871, -0.4309, +0.0466, +0.2179, -0.7288, -0.3884, +0.0433, +0.0226, -0.5845, -0.3762, +0.1555, -0.1571, +0.1242, -0.0361, +0.0694, -0.0607, -1.1456, -0.2432, +0.2734, -0.0147, +0.0134, -0.1083, -0.1578, +0.2299, +0.1616, +0.3512, -0.1246, +0.1847, -0.1015, +0.3420, +0.1070, +0.0237, +0.1442, +0.0974, -0.0842, -0.0235, +0.2302, -0.1871, +0.0759, -0.7443, +0.0672, -0.9173, -0.2906, +0.2102, -0.0130, -0.7445, +0.2416],\n[ +0.0570, -0.1107, -0.1294, +0.2502, -0.2984, +0.0134, +0.2178, -0.2031, -0.0498, -0.9746, -0.1293, +0.3008, -0.2820, +0.2322, +0.0366, +0.0613, -0.2685, +0.0761, +0.0324, +0.6167, -0.0038, -0.3098, +0.1486, -0.2141, -0.1495, -0.0758, -0.0287, -0.1225, -0.0489, -0.2928, -0.0031, -0.2655, -0.2361, +0.0487, +0.1637, -0.0362, -0.6581, -0.2809, -0.2362, -0.3010, -0.2808, +0.1002, +0.3643, +0.1101, -0.5130, -0.1977, -0.7063, +0.0310, -0.1288, -0.0684, -0.2172, -0.9830, -0.9522, +0.2543, -0.2441, -0.2201, -0.3622, -0.6192, -0.0511, -0.3572, -0.2073, -0.3067, -0.5459, -0.8507, +0.1938, -0.8735, -0.1393, +0.1941, -0.5221, -0.2935, -0.1622, +0.2692, -0.6518, -0.0784, -0.0032, +0.0453, +0.0612, +0.0740, +0.0828, -1.5962, -0.0105, -0.2185, +0.1251, +0.2271, -0.0830, -0.3206, +0.2678, -0.3253, +0.1302, -0.2226, -0.3965, +0.0605, +0.2157, +0.0511, +0.1486, -0.0471, -0.7112, +0.1349, +0.0479, -0.3281, +0.0545, -0.1693, +0.1596, +0.0366, +0.3659, +0.4518, -0.5676, +0.0444, -0.4920, -0.4001, -0.3816, -0.3445, +0.3152, +0.2686, +0.0873, -0.6441, -0.1246, +0.0525, -0.0302, -0.6167, -0.3785, -1.3627, -0.6195, -0.2583, -0.0883, +0.0941, -0.0932, +0.1709],\n[ -0.0902, -0.0489, -0.5621, +0.1108, -0.0253, +0.2008, -0.0230, -0.1481, -0.1891, -0.0729, -0.0272, +0.0692, +0.0149, +0.4408, +0.5853, +0.1148, -0.5399, +0.1653, -0.0507, -0.0729, +0.0799, -0.5047, +0.0211, +0.0545, -0.1527, -0.0146, +0.1207, +0.2242, +0.5105, -0.1659, -0.3076, +0.0075, -0.0780, -0.0728, -0.9238, +0.0010, -0.2337, +0.1043, +0.3628, -0.3925, +0.0371, -0.0933, -0.1642, -0.4299, +0.1116, +0.3309, +0.1078, -0.1654, +0.1247, -0.0723, +0.1428, -0.0070, +0.2646, -0.2366, -0.2596, +0.0486, +0.1331, -0.3141, +0.2621, +0.1173, +0.1017, +0.3161, -0.3979, +0.0467, -0.0058, -0.1483, -0.4923, +0.3230, +0.3847, -0.8560, -0.1140, -0.2858, +0.0235, +0.0051, +0.2581, -0.1739, +0.0148, -0.0270, -0.1098, -0.4381, +0.2964, +0.0043, -0.2012, +0.3487, +0.0805, -0.3836, +0.3054, -0.2178, -0.1003, -0.8272, -0.8094, -0.1763, -0.4160, -0.0718, +0.0883, -0.5837, -0.0250, -0.2355, +0.2095, -0.3145, -0.4140, -0.1567, +0.1556, -0.1557, -0.0385, +0.4225, -0.4201, +0.4154, -0.1397, +0.0064, -0.4551, +0.0587, -0.3060, +0.2072, -0.1990, -0.6681, +0.4071, +0.1429, +0.1864, +0.0583, +0.3094, +0.1376, -0.0151, +0.2482, +0.0534, +0.3590, -0.0524, +0.0072],\n[ +0.2165, -0.0784, +0.4527, -0.2131, -0.0842, -0.2151, +0.2852, +0.3085, -0.0545, +0.0476, -0.3126, -0.1004, -0.3292, -0.3253, -0.0299, -0.3426, -0.6258, +0.2322, +0.1995, -0.7055, -0.4208, -0.2351, +0.5525, -0.0116, -0.1168, +0.3035, -0.4497, -0.0070, -0.6314, +0.2021, +0.2401, -0.6943, +0.6166, +0.0522, -0.1277, +0.0814, +0.1048, -0.1549, +0.0236, -0.0543, +0.1134, -0.1267, +0.0161, +0.0846, -0.3257, +0.3672, +0.4620, -0.0554, -0.6860, -0.5655, -0.0291, -0.0336, -0.0591, -0.0668, +0.4952, -0.6696, -0.6313, -0.4199, +0.1644, -0.1448, +0.3275, +0.0785, +0.1629, -0.3694, -0.0415, +0.3981, -0.3228, -0.4313, -0.2086, -0.3006, +0.0319, -0.1707, -0.6463, +0.4033, -0.2931, -0.2915, -0.0374, -0.5228, -0.0761, -0.0541, -0.0648, -0.4858, -0.3321, +0.0406, +0.0690, +0.1793, +0.3161, +0.3441, -0.1787, +0.1655, +0.1323, +0.0511, -0.5903, -0.4039, -0.5803, -0.0993, +0.0511, -0.4996, -0.2091, -0.8687, +0.3873, -0.8987, -0.7894, +0.4019, +0.2001, -0.0819, -0.2723, -0.1041, -0.0835, +0.3914, -0.1363, -0.0651, +0.1017, +0.1881, -0.1245, +0.1765, +0.1574, -0.4187, -0.3413, -0.4192, +0.0699, +0.2365, -0.2552, -0.6480, -0.2559, -0.0381, -0.2464, -0.2568],\n[ +0.1548, +0.0221, +0.3021, +0.0748, -0.0730, -0.3777, -0.0878, +0.1624, +0.0181, +0.0029, -1.0661, -0.2232, -0.0371, +0.2084, +0.0641, +0.3700, +0.2076, -0.1308, -0.6938, -0.0521, +0.3410, -0.1052, +0.0010, +0.1400, -0.3497, -0.0732, +0.0856, +0.1817, -0.3963, -0.3211, +0.1612, +0.0391, +0.1267, +0.1567, -0.8114, +0.0540, -0.3109, -0.1081, +0.1441, +0.1741, -0.1098, +0.2501, +0.1449, -0.0120, -0.1075, -0.2483, -0.7078, -0.2693, -0.0336, +0.0171, -0.0570, +0.1460, +0.1243, -0.1844, +0.1645, -0.4268, +0.0438, -0.2597, +0.0547, -0.1758, +0.0263, -0.8264, +0.0728, -0.2166, -0.1181, +0.2654, -0.2549, +0.0277, +0.1403, +0.4358, -0.0468, +0.0663, -0.5248, +0.2055, -0.2036, -0.2702, -0.7653, +0.1719, +0.0243, +0.0066, +0.0388, -0.1156, -0.2506, -0.1971, +0.1007, -0.0745, +0.0724, -0.1242, +0.0428, +0.0739, +0.1192, +0.1407, -0.0524, +0.0955, -0.0253, -0.0342, -0.5003, +0.0988, +0.4093, +0.0309, -0.1069, -0.4025, +0.0368, -0.0331, -0.0844, +0.1159, -0.0163, +0.1731, +0.3613, -0.0142, -0.0990, +0.0854, +0.0900, -0.1528, -0.1269, -0.3659, -0.0846, +0.1304, +0.0177, +0.2548, +0.1939, -0.0656, -0.1266, +0.0675, +0.0898, -0.0352, -0.0179, -0.0786],\n[ -0.1477, -0.3154, +0.0705, +0.0553, -0.0120, -0.2545, -0.1162, -0.0357, -0.0614, -0.1063, -0.1024, -0.2153, -0.2259, -0.0393, -0.6030, -0.7395, +0.2510, +0.6092, -0.1453, +0.1808, +0.0889, +0.0641, -0.1695, -0.1308, -0.7268, +0.1051, -0.3328, -0.5555, -0.1323, -0.0470, +0.4817, -0.2015, -0.1558, -0.2925, -0.3102, -0.1149, -0.1001, -0.2164, -0.1036, +0.3618, +0.3640, +0.0790, +0.1367, +0.1737, -0.3290, -0.2891, +0.3472, +0.0828, +0.2550, +0.0813, -0.2269, -0.5814, +0.4380, -0.3854, -0.1073, -0.3434, -0.2344, -0.3333, -0.9712, +0.3074, -0.0863, +0.0101, +0.4935, -0.4539, +0.2815, +0.4703, -0.1730, -0.4645, -0.0182, -0.6704, +0.4409, +0.0128, +0.3440, +0.0742, -0.0970, +0.0381, -0.9197, +0.1764, -0.3166, -0.5216, +0.1401, +0.1343, -1.0300, +0.0069, +0.3072, +0.0852, -0.2241, -0.0990, -0.1649, -0.0966, -0.0429, -0.0274, +0.0635, +0.3952, -0.1940, +0.1375, -0.3230, +0.6582, +0.1195, +0.0523, +0.0047, -0.4729, +0.2348, -0.1884, +0.3227, +0.0247, +0.1520, -0.4772, -0.0548, +0.1463, +0.2491, -0.0230, +0.2104, -0.0490, -0.4887, -0.7157, +0.4019, +0.1893, +0.1334, -0.3488, -0.0239, -0.6403, -0.5436, -0.3965, +0.0472, +0.0951, +0.3826, -0.0119],\n[ +0.2345, +0.0402, +0.3096, +0.1290, -0.0478, +0.1970, +0.0213, -0.4571, +0.3814, -0.0410, -0.5314, -0.2755, -0.3864, -0.9906, -0.6441, -0.0119, +0.5029, +0.0528, +0.1426, +0.1558, -0.3429, +0.3553, -0.0140, -0.1257, +0.5183, -0.1416, +0.1306, -0.1761, +0.4427, +0.1535, +0.0231, +0.2305, -0.1755, -0.5971, +0.3964, +0.4374, -0.4033, -0.1754, +0.0906, -0.0307, -0.6401, +0.3662, -0.0488, -0.0305, -0.1799, -0.9114, -0.6223, -0.6197, +0.1350, -0.2145, +0.0602, +0.4021, -0.5092, -0.3139, +0.2932, +0.0838, -0.2362, -0.0395, -0.5941, +0.0945, -0.0716, +0.2605, -0.4417, +0.3200, +0.2494, +0.0426, -0.2447, -0.5307, +0.0455, -0.1078, +0.2131, -0.2504, -0.3621, +0.1624, -0.1116, +0.1532, +0.1341, -0.2960, -0.0485, +0.2547, -0.1497, +0.5129, -0.7462, -0.2898, -0.2437, -0.3651, +0.2329, -0.0681, -0.0266, -0.5809, -0.3785, +0.3757, +0.1572, -0.0039, -1.0997, -0.3780, -0.0220, -0.6294, -0.5320, -0.0276, -0.1373, +0.4767, +0.0286, -0.6791, -0.1094, +0.2245, +0.7109, +0.2029, -0.2408, +0.1082, +0.3360, +0.3084, +0.0412, -0.0498, -0.0024, +0.0016, -1.4137, -0.2534, -0.1986, -0.4153, +0.7093, -0.2851, -0.3148, +0.4090, -0.2257, -0.3811, +0.0754, +0.1079],\n[ +0.1737, -0.1012, +0.3843, -0.3052, +0.0454, -0.0999, +0.0607, +0.5805, -0.0379, -0.1394, +0.1873, +0.2086, -0.0369, +0.2190, -0.9182, -0.1614, +0.0378, +0.0651, -0.3026, +0.1249, -0.0495, +0.7594, +0.2221, -0.1335, +0.4775, -0.3486, -0.3901, -0.2565, -0.2424, -0.2014, -0.4993, -0.3226, -0.4516, -0.0422, -0.1739, +0.2270, +0.2250, +0.2292, +0.5843, +0.3938, -0.8007, -0.1657, -0.2410, +0.0172, -0.1222, +0.3231, -0.2751, -0.2512, +0.2162, -0.6735, -0.1592, -0.5736, +0.3512, -0.1531, +0.0767, -0.1251, -0.3526, +0.2640, +0.3694, +0.0025, -0.0883, -0.9067, +0.1201, +0.0180, +0.1013, +0.1774, +0.0458, +0.1053, -0.1279, +0.1314, -0.7170, -0.0099, +0.0331, -0.6149, -0.3880, +0.3317, -0.1987, -0.1860, +0.2181, -0.2855, +0.0181, +0.1234, +0.0470, -0.4842, +0.1452, +0.0126, -0.0338, +0.0576, -0.1719, +0.4222, +0.0425, -0.1328, -0.2182, -0.3864, -0.2636, -0.4534, +0.3275, -0.4392, -0.3175, -0.0483, +0.0194, -0.2320, +0.1536, -0.0455, +0.2573, -0.3990, -0.7663, +0.0444, +0.2442, +0.2573, -0.4178, +0.0640, -0.1134, +0.0385, -0.0557, -0.1481, -0.1792, +0.1467, +0.0214, +0.0728, -0.3110, +0.2292, -0.8796, -0.1230, -0.3657, +0.3918, +0.4578, +0.0213],\n[ -0.0058, -0.0501, -0.1675, +0.2440, +0.0982, -0.3081, +0.2197, -0.0078, +0.2634, +0.4613, +0.0454, +0.0953, +0.2481, -0.9030, -0.5912, -0.1769, +0.3100, -0.0666, -0.4307, -0.2750, +0.0457, +0.4329, +0.1131, +0.1623, -0.4575, -0.0464, +0.0587, +0.0037, +0.1761, +0.1243, -0.3456, -0.1771, +0.1526, +0.1889, +0.0453, -0.0169, +0.2543, -0.2468, -0.1749, -0.1054, +0.1356, -0.0753, -0.2053, -0.5136, +0.1298, -0.3350, -0.3511, -0.6631, +0.1640, +0.0080, -0.5760, -0.1730, -0.4837, +0.0082, -0.3955, -0.1565, +0.1675, +0.3669, +0.4185, -0.1695, +0.6183, -0.4073, -0.2162, -0.4735, -0.1205, -0.6054, -0.1156, +0.0477, +0.1337, -0.3585, +0.0539, -0.3033, -1.1556, -0.6334, +0.2199, +0.1659, -0.5817, +0.1128, -0.3548, -0.0180, -0.3140, -0.3119, -0.3624, +0.3207, -0.1126, +0.2188, +0.1764, -0.2787, -0.6278, -0.0367, +0.4690, -0.1529, +0.0098, -0.4824, +0.0185, -0.1893, -0.7196, +0.0138, -0.7285, -0.1746, +0.2740, -0.6991, -0.8272, -0.0170, -0.2122, +0.0546, -0.0070, +0.0135, -0.9043, -0.2043, -0.0655, +0.0533, -0.2891, -0.3379, +0.1854, +0.0024, -0.4407, -0.4790, -0.6179, -0.4374, -1.1258, -0.8652, -0.7400, +0.2832, +0.3050, +0.3432, +0.1818, -0.3477],\n[ +0.0421, +0.0760, -0.0937, -0.3289, -0.1264, -0.2004, +0.2442, -0.7426, +0.1581, +0.1091, -0.2079, +0.1762, -0.2847, +0.2595, -0.0436, +0.0309, -0.0391, -0.3279, +0.0869, +0.1888, +0.2382, -0.1285, +0.0044, -0.0093, +0.1621, +0.0545, +0.3107, -0.0062, +0.4497, +0.2697, +0.1220, -0.3746, +0.2347, +0.1905, -0.2530, -0.0934, -0.3307, -0.2066, -0.0438, +0.1751, -0.2122, -0.2497, +0.1197, +0.3855, -0.0690, -0.0119, +0.1714, -0.1429, +0.2048, +0.3442, -0.0357, -0.3776, +0.2283, +0.1940, -0.0545, -0.3471, -0.0467, -0.4649, +0.0441, +0.0288, -0.0065, +0.0178, -0.0776, -0.2727, +0.3306, -0.4464, +0.1174, +0.1001, +0.0399, +0.0524, -0.0003, -0.1412, -0.0648, -0.2413, -0.4105, -0.0880, -0.1160, +0.1251, +0.1382, -0.0681, +0.4072, -0.1640, -0.2284, +0.1872, -0.0280, +0.2319, -0.2080, +0.0299, -0.3826, -0.1097, +0.1318, -0.1643, -0.0302, -0.5179, +0.2203, -0.0162, -0.6722, -0.0071, -0.5593, +0.1680, +0.0923, -0.1145, -0.2368, -0.0590, +0.3251, -0.3777, -0.2606, -0.3314, -0.1094, -0.2820, -0.1278, +0.1683, -0.2518, +0.1133, -0.1676, +0.0311, +0.2705, +0.2880, +0.1390, -0.0344, +0.2985, -0.0291, -0.6771, -0.3216, +0.1436, -0.3997, +0.1744, -0.0892],\n[ +0.2227, -0.2971, -0.3469, +0.4666, -0.0545, +0.1139, +0.1211, -0.3773, +0.0639, +0.3880, -0.3827, +0.1726, -0.3401, -0.2897, -0.4629, +0.1177, +0.0280, -0.3550, -0.5237, +0.0578, +0.0203, -0.4134, -0.2219, -0.1620, +0.1527, -0.3433, -0.0286, +0.1367, -0.4098, -0.0532, +0.0170, -0.1500, -0.2434, -0.1257, +0.1599, +0.3132, -0.4962, +0.0203, -0.1200, -0.3780, -0.0548, -0.1382, +0.0468, -0.2005, -0.2922, +0.0412, -0.3139, -0.2190, -0.1921, -0.3008, -0.1326, +0.1474, -0.0546, -0.3791, +0.2720, -0.4730, +0.2390, -0.2128, -0.1345, -0.3941, +0.2550, -0.3516, +0.0163, +0.2963, +0.0107, -0.0940, -0.5645, -0.3014, -0.0069, +0.2026, -0.0785, +0.4574, +0.1301, +0.2480, -0.0485, -0.2237, -0.0629, -0.2352, +0.0711, +0.2740, -0.1476, +0.3646, +0.3072, -0.2861, -0.1857, -0.0471, +0.0310, -0.3897, +0.0805, +0.0024, +0.0570, +0.3988, -0.0309, -0.0894, +0.2665, -0.3530, +0.1016, +0.2017, +0.4422, -0.0812, -0.0641, -0.4382, -0.0046, -0.3735, -0.1861, +0.2945, +0.0989, +0.1345, +0.0340, +0.0190, -0.2118, +0.0686, -0.3836, +0.1731, +0.1700, -0.2080, +0.2979, -0.5672, +0.0529, +0.1361, -0.1965, -0.1959, -0.1695, +0.0855, -0.2045, -0.1573, -0.3586, -0.1811],\n[ -0.2061, +0.1187, -0.4152, +0.3454, +0.3325, -0.4535, +0.0175, -0.1316, +0.1508, +0.0567, -0.3291, +0.0792, -0.3860, -0.6880, +0.0831, +0.0106, -0.2324, -0.1728, -0.2045, +0.5967, +0.1797, -0.7193, -0.1522, +0.4142, +0.2647, -0.1462, +0.2758, +0.2446, +0.0061, -0.0743, +0.2720, +0.1632, -0.1003, -0.2277, -0.5311, -0.1379, +0.1877, +0.0882, -0.1241, +0.0654, +0.0969, +0.0503, +0.1742, -0.0763, +0.1149, -0.1509, -0.2626, -0.0685, -0.0073, +0.5289, -0.0256, +0.3609, +0.1023, -0.4789, -0.3071, +0.2803, +0.0987, +0.0184, -0.2751, +0.0439, +0.7189, -0.4512, -0.6046, +0.3523, +0.2981, -0.1774, +0.3571, -0.2574, +0.0502, +0.2503, -0.2745, -0.2874, +0.1629, -0.2583, -0.0399, -0.2499, +0.0662, -0.0962, -0.1815, -0.4266, +0.1113, -0.8154, -0.3679, +0.0954, +0.0696, -0.2440, -0.1968, -0.3760, +0.0211, -0.5700, +0.2556, -0.3801, -0.0156, +0.0770, -0.4611, +0.0781, -0.1871, -0.1328, -0.0320, -0.7022, -0.3862, -0.0156, +0.3554, -0.2388, +0.1465, +0.1270, -0.1418, +0.1951, -0.4408, -0.5125, -1.0156, +0.4492, -0.6887, +0.2781, +0.1575, -0.3814, -0.2889, +0.2475, -0.1107, +0.0171, -0.3111, -0.2021, +0.2899, +0.1741, -0.3082, +0.0318, -0.1960, +0.3964],\n[ +0.0908, -0.0992, -0.5574, -0.1475, +0.0634, -0.2495, -0.1826, +0.3553, +0.0084, +0.2624, +0.0450, -0.1352, -0.0266, +0.0049, -0.2243, -0.0175, -0.2538, -0.0782, -0.6108, -0.1035, +0.2119, -0.2691, -0.0323, +0.0551, -0.0377, +0.3972, -0.4437, -0.7321, +0.0651, -0.1293, -0.0490, +0.1585, -0.0975, -0.1757, -0.2343, -0.0055, +0.1740, +0.0048, -0.6559, +0.0373, +0.3460, -0.9263, +0.1879, -0.0669, +0.3651, +0.5088, -1.0274, -0.3409, -0.0143, +0.2722, +0.2818, -0.1034, -0.6006, +0.4080, -0.1925, -0.1793, -0.3323, -0.5531, -0.1189, +0.1274, +0.1319, -0.4859, -0.2834, -0.5362, +0.2529, -0.0815, -0.8830, -0.1149, +0.4302, +0.3286, -0.8478, +0.3274, +0.1833, -0.8509, +0.0124, +0.1571, -0.3137, +0.2163, -0.2620, +0.3702, +0.0263, -0.8892, -0.3077, -0.5872, -0.2785, -0.0548, +0.6223, +0.1073, +0.2722, +0.1667, -0.7687, -0.3004, -0.1409, +0.0977, -0.6361, +0.0895, +0.2523, -1.1700, -0.1937, -0.0640, -0.5124, -0.2828, +0.0786, +0.1680, +0.1903, -0.0973, +0.0276, -0.2992, -0.1459, -0.5329, -0.5601, -0.3211, -0.4753, -0.5864, +0.1028, -0.3598, -0.0670, -0.2731, +0.4096, -0.2266, +0.3978, -0.0444, -0.2637, +0.2951, -0.5427, -0.2461, -0.5261, +0.0949],\n[ +0.1618, -0.4905, +0.3258, -0.1014, -0.0287, +0.5747, +0.0951, -0.1661, -1.0580, -0.0250, -0.1151, +0.1218, +0.0448, +0.0009, -0.2375, +0.1923, -0.0808, -0.1496, -0.4040, -0.0011, +0.1566, -0.0025, +0.0013, -0.1833, -0.4217, -0.1717, +0.0899, -0.1539, -0.0715, +0.2257, -0.0181, +0.3481, +0.1751, +0.1634, -0.5412, +0.0892, -0.2702, +0.1308, -0.4672, -0.3700, -0.2379, +0.1514, +0.3748, +0.0297, +0.0131, +0.2860, +0.0202, -0.4741, -0.3178, -0.2200, +0.0164, +0.0544, +0.2424, -0.3790, +0.0947, +0.0634, +0.1048, -0.5026, -0.4402, -0.2208, -0.1970, +0.2420, -0.2852, +0.1045, +0.0041, +0.1532, -0.2748, +0.2220, -0.1802, -0.1195, -0.0320, +0.0941, +0.0821, +0.1148, +0.2390, +0.3889, +0.0018, -0.3051, -0.0233, -0.0228, -0.3145, -0.0425, +0.4689, +0.1950, +0.1288, -0.6089, +0.2982, +0.1527, +0.1825, +0.1216, +0.4530, +0.1668, -0.3275, +0.1676, -0.2089, -0.0556, -0.6145, -0.0445, -0.8699, -0.2688, +0.3719, -0.7730, -0.1762, +0.0905, -0.3222, +0.3221, +0.4203, -0.0613, +0.1184, +0.2110, +0.2703, +0.0339, -0.1625, +0.2663, +0.1400, +0.0936, -0.3175, -0.3469, -0.0820, -0.0730, -0.0210, +0.1526, -0.0458, -0.0371, +0.1708, -0.1264, -0.4222, +0.2816],\n[ -0.1449, -0.4340, +0.3384, +0.1579, -0.1091, -0.2229, -0.7007, -1.1185, -0.1052, +0.1772, -0.3176, -0.0954, -0.3036, -0.3146, +0.2082, -0.6706, +0.0364, +0.1425, +0.3943, -0.2101, +0.0523, -0.2765, +0.0090, -0.0655, -0.2401, +0.4830, -0.2957, +0.0572, +0.0692, -0.1737, +0.4769, -0.4658, +0.0137, +0.0588, -0.5256, +0.3810, -0.0043, +0.0705, -0.7742, -0.2515, +0.4426, +0.0385, -0.0394, -0.0789, -0.1381, +0.2169, +0.0898, -0.4948, -0.4634, -0.0958, -0.1589, -0.4759, +0.2111, -0.2565, +0.0694, +0.0705, +0.0388, +0.1178, -0.0762, -0.4617, -0.8680, -0.2346, +0.2338, -0.7045, +0.3175, -0.1283, -0.1354, -0.4054, +0.2363, -0.0522, -0.0029, +0.1389, +0.1709, +0.3458, -0.1531, +0.1611, -0.4193, +0.1833, +0.2374, +0.6556, -0.0475, +0.3806, +0.4965, -0.2974, -0.0217, +0.2118, -0.0464, +0.3696, -0.0580, -0.8348, -0.7580, +0.0611, +0.1105, -0.1347, -0.0760, +0.1793, -0.3194, -0.6205, +0.4345, +0.0319, +0.0245, +0.3281, -0.2088, -0.8847, +0.1012, +0.1181, +0.0141, -0.4330, -0.0720, +0.3774, +0.2740, -1.0438, +0.1583, +0.0846, -0.1631, +0.3802, -0.0777, +0.1888, -0.3915, +0.2461, -0.8871, +0.0106, +0.0323, -0.0571, -0.1533, -0.4462, -0.1460, -0.3907],\n[ -0.2878, -0.0615, +0.2505, +0.1581, -0.6736, +0.2485, -0.3433, -0.5456, -0.5950, -0.1342, +0.1008, -0.0716, -0.0541, +0.1849, -0.3383, +0.2208, -1.0103, -0.3722, -0.0319, -0.0332, -0.4669, -0.1479, -0.0878, -0.1468, -0.3543, +0.5402, +0.1786, -0.0781, +0.0761, -0.5135, +0.5701, +0.1104, -0.0162, +0.2412, +0.0370, +0.0211, +0.0297, +0.2620, -0.0827, -0.6327, +0.0140, +0.0513, -0.0192, -0.2413, -0.5702, -0.0003, +0.4197, -0.3194, +0.1597, -0.2770, -0.5649, +0.1293, -1.0851, -0.4333, -0.3866, -0.8689, -0.1534, -0.1899, -0.1301, -0.2131, +0.1348, -0.1494, -0.3751, +0.3482, -0.0373, -0.1306, -0.2304, +0.5142, +0.1681, -0.5262, -0.3565, -0.2431, -0.2511, +0.4271, +0.2971, -0.3514, -0.1895, -0.0938, +0.3170, -0.0274, +0.1666, -0.2795, -0.5762, -0.3793, -0.1089, +0.1886, +0.5871, -0.2598, -0.3023, -0.8249, +0.2772, +0.1765, +0.0883, -0.0666, +0.1009, +0.2339, -0.0881, -0.3906, +0.1244, -0.1059, +0.0345, +0.1081, -0.3789, -0.3119, -0.3382, -0.3980, -0.2057, +0.0522, -0.4197, +0.2586, -0.0300, +0.1536, +0.2187, -0.0763, +0.0302, +0.3941, +0.6030, -0.6882, -0.1713, -0.2873, +0.4185, +0.3159, +0.2026, +0.1525, -0.4530, -0.0410, -0.0349, +0.2280],\n[ -0.6061, -0.0909, -0.0550, -0.2184, -0.4677, -0.4432, -0.2260, +0.1602, -0.3579, -0.2434, -0.3035, -0.3499, -0.4640, -0.0961, -0.8689, -0.3364, -0.5608, -0.2340, -0.3993, -0.4102, +0.2083, +0.0047, -0.1841, -0.8708, +0.2682, -0.2035, +0.0297, -0.1514, -0.3664, +0.0290, -0.3759, +0.3887, -0.3711, -0.5360, -0.2174, -1.1284, -0.5156, -0.1612, -0.0737, -0.4903, -0.4929, -0.1873, +0.0404, -0.1258, +0.4220, +0.2427, -0.0421, +0.4974, +0.0100, +0.3258, -0.3710, -0.0276, -0.6241, -0.2282, -0.2343, +0.5713, -0.1646, -0.3397, +0.2585, +0.0450, -0.1207, -0.1081, +0.2005, -0.5321, -0.3488, +0.0880, +0.1172, +0.1975, -0.2884, -0.3175, +0.1191, +0.1787, -0.4443, -0.2308, -0.1809, +0.1833, +0.3746, -0.1612, -0.1746, +0.1444, -0.1684, +0.1569, -0.5966, +0.0297, -1.1438, -0.3301, -0.0267, +0.1739, -0.1595, -0.7382, -0.8372, -0.2308, -0.3466, -0.4795, -0.2313, -0.7824, +0.1980, +0.2107, -0.6244, -0.0962, -0.1942, +0.2395, -0.3016, -0.1821, +0.4167, -0.0695, -0.2468, +0.0869, -0.5590, -0.2276, -0.4163, +0.2860, +0.1813, -0.0321, -0.2457, -0.0202, -0.1410, +0.1888, -0.0146, -0.7551, -0.0123, +0.1800, -0.2752, +0.0198, -0.0279, -0.4698, -0.4623, -0.3625],\n[ -0.0845, -0.0307, +0.2515, +0.0145, +0.1197, +0.3987, -0.4345, -0.3723, +0.0691, +0.1694, -0.3358, -0.0540, -0.0187, +0.5110, +0.1727, -0.1340, +0.0335, -0.1176, -1.2247, -0.0016, +0.2501, -0.1295, -0.3627, +0.2087, -0.0750, -0.0229, -0.1075, -0.4822, +0.1749, +0.2224, +0.2493, -0.8074, -0.3120, -0.1274, -0.7827, +0.0645, -0.1956, -0.1618, +0.1351, -0.2641, -0.0409, +0.2512, +0.1865, +0.1569, -0.3542, -0.1335, -0.9144, -0.3099, -0.0042, +0.0161, -0.5852, -0.4469, +0.1677, -0.5367, +0.5580, +0.0745, +0.0027, -0.1883, -0.4742, -0.3860, -0.5103, +0.4660, -0.8256, -0.7132, -0.0312, -0.3823, -0.1519, +0.2287, +0.2300, +0.1152, +0.1736, +0.2429, -0.1806, +0.2374, -0.3979, +0.1320, -0.5556, +0.2660, -0.8979, -0.6503, -0.0790, +0.1668, -0.4129, -0.4282, +0.3145, -1.3375, -0.1544, +0.1570, +0.1358, -0.4562, -0.5580, -0.0270, -0.0299, -0.2310, -0.3223, -0.2773, -1.2287, +0.0719, +0.1664, -0.1294, +0.0616, -0.1551, -0.1501, -0.1315, -0.0196, -0.1675, +0.0702, -0.1678, +0.0950, -0.2934, -0.1290, +0.0739, +0.0199, +0.1733, +0.0355, -0.7176, -0.2725, -0.2942, -0.3444, -0.3210, -0.2305, +0.0826, +0.2586, -0.2365, +0.0963, +0.0902, +0.1406, +0.0500],\n[ -0.1641, +0.1803, +0.0176, -0.0164, -0.1597, -0.1098, +0.1263, -0.3531, +0.2009, -0.2688, +0.2281, +0.0484, +0.0412, +0.4032, +0.0177, -0.7202, -0.1029, +0.2414, -0.3872, +0.4367, +0.0134, -0.3715, -0.1411, -0.0280, -0.3206, -0.0946, +0.1634, -0.5124, -0.0615, -0.1953, -0.2134, -0.3191, +0.2202, +0.0565, -0.0890, -0.2060, -0.4671, +0.2055, -1.1555, -0.0669, +0.1648, -0.1441, -0.1714, -0.1822, +0.2035, -0.7000, +0.0695, +0.0956, +0.0437, -0.0890, -0.1878, +0.2828, -0.2674, +0.1618, +0.2366, +0.0729, +0.3175, +0.0306, -0.5201, +0.0188, +0.3739, -0.5527, -0.3397, +0.0665, +0.3225, -0.2450, -0.3998, -0.1282, +0.3574, -0.2880, +0.2108, -0.1360, -0.0243, +0.0996, -0.4179, -0.2257, -0.4723, +0.4179, -0.1489, -0.0351, +0.3217, -0.4655, -0.5597, +0.2265, +0.0331, +0.1609, -0.0233, +0.3688, +0.2562, -0.0856, -0.3728, -0.0548, +0.4144, +0.1663, -0.5262, +0.3334, -0.1203, -0.0692, +0.1855, +0.0311, +0.0978, +0.0475, -0.0780, +0.3128, -0.0074, -0.6474, +0.1366, -0.2251, +0.0069, -0.0087, +0.0467, +0.0438, +0.0939, +0.4966, -0.1934, -0.6285, -0.2758, +0.1596, +0.0895, -0.2190, -0.8005, -0.2677, -0.3218, +0.3237, +0.0888, -0.2617, +0.0320, +0.1497],\n[ -0.1253, -0.0350, -0.0856, -0.0467, +0.1283, +0.2748, +0.1479, +0.2745, +0.1078, -0.3786, -0.3089, +0.0064, -0.1175, +0.0248, +0.2094, -0.2929, -1.4566, +0.2682, +0.0442, +0.3729, +0.2005, -0.5589, +0.1468, +0.4390, -0.8793, +0.2898, +0.3577, -0.6922, +0.0979, +0.3569, +0.0792, +0.1176, -0.2124, +0.1861, -0.0484, -0.0443, +0.2162, +0.0916, -0.2512, +0.0367, +0.0633, +0.0470, +0.0792, +0.0034, -0.1414, +0.3856, -0.1245, -0.1696, -0.1526, +0.1365, +0.3263, -0.0325, +0.3004, +0.2138, +0.1553, +0.2231, -0.1920, -0.5498, -1.3362, -0.0097, -0.1920, +0.2219, -0.2937, -0.2273, -0.2927, +0.2756, -0.1875, -0.1068, -0.3225, +0.0928, +0.5012, -0.3023, -0.1227, -0.5252, -0.3104, -0.5777, +0.0303, -0.2129, -0.6370, +0.4402, -0.4292, -0.2224, -0.0492, -0.2785, +0.2249, -0.3533, -0.2766, -0.1833, -0.1814, -0.9459, -0.2622, -0.6534, -0.2456, +0.3434, -0.0585, +0.0774, -0.1196, -0.2611, +0.3853, +0.1695, +0.1012, +0.0691, -0.0476, +0.0520, -0.2332, +0.0186, -0.1382, +0.3594, -0.0141, +0.3252, -0.3512, -0.0661, +0.1478, +0.4440, +0.0711, -0.1353, -0.2475, -0.0257, +0.5104, +0.1134, -0.1690, +0.0288, -0.4795, -0.1104, +0.0961, -0.1073, -0.6397, +0.0654],\n[ -0.1892, +0.0568, -0.2412, -0.6013, +0.3256, +0.2101, +0.1802, -0.2477, +0.0262, -0.4132, -0.1172, +0.1311, +0.2263, +0.1451, +0.3181, -0.1332, +0.1379, -1.1694, -0.3646, -0.1916, -0.4726, -0.4236, -0.0609, -0.0810, -0.0390, -0.0253, +0.2483, +0.4296, +0.0255, -0.9403, +0.3116, -0.8595, +0.2039, -0.3203, -0.0686, -0.2876, -0.7740, -0.4882, +0.2804, +0.1530, -0.3647, -0.2899, -0.1928, +0.0574, -0.9411, +0.2955, +0.5295, -0.1495, -0.2544, -0.0502, -0.6493, -0.0688, -0.2562, -0.0072, +0.0258, +0.2712, +0.4828, -0.0741, +0.0550, -0.1493, +0.0222, -0.0950, -0.1410, -0.0260, -0.0008, -0.2603, +0.0681, +0.0436, +0.1496, -0.0550, -1.4449, +0.2512, -1.1742, -0.6389, -0.1976, -0.2944, -0.3775, +0.2221, -0.0417, -0.0980, +0.0640, +0.0496, -0.1026, +0.2860, -0.6115, +0.0723, -0.0295, +0.1601, +0.0567, +0.2533, -0.5541, +0.4440, +0.2326, -0.3252, +0.1586, -0.2256, -0.6957, +0.0371, +0.3295, +0.4163, +0.1884, +0.0308, -0.9381, +0.1548, -0.1683, -0.1657, -0.3450, -0.0012, +0.2329, +0.3270, +0.2418, -0.0308, +0.2985, -0.2371, -1.1584, -1.2930, -0.0451, +0.1693, +0.2680, +0.4843, +0.1023, +0.0457, +0.2715, -0.0179, -0.3231, -0.0171, -0.0038, -0.2308],\n[ -0.4185, +0.0518, -0.0622, +0.1876, -0.0893, +0.0031, -0.1216, -0.8492, -0.5659, +0.2852, -0.2889, -0.1508, +0.2531, -0.0835, +0.0111, +0.0087, -0.3572, +0.2532, +0.0334, -0.3784, -0.1595, -1.3338, -0.3168, -0.3062, +0.5036, -0.3661, +0.4044, +0.3749, +0.1713, +0.0696, -0.2597, -0.2785, -0.1862, -0.0663, -0.1889, +0.3290, +0.1240, +0.5744, +0.2029, +0.6920, -0.0658, -0.2106, -0.2397, -0.7892, +0.3586, +0.2038, +0.2659, +0.0079, +0.0883, +0.1599, -0.1192, +0.3185, +0.0724, -0.1714, -0.1568, +0.1129, +0.5232, +0.2673, +0.0950, +0.2312, -0.2412, -0.4621, -0.9121, +0.0927, -0.2441, -0.0055, -0.3084, +0.0480, +0.2937, -0.5931, -0.1304, -0.1331, -0.0718, -0.4675, -0.2199, -0.4012, -0.7162, -0.0440, +0.3202, +0.0727, -0.0885, +0.2881, -0.2546, +0.2202, -0.0740, +0.0747, +0.3164, +0.1560, +0.1748, -0.1047, +0.0710, -0.1201, -0.0498, +0.1716, +0.2840, -0.2102, +0.1068, +0.0459, +0.1061, -1.3161, -0.0708, -0.6666, -0.0305, +0.0622, -0.3030, +0.0992, -0.4008, +0.1789, +0.0664, +0.1021, -0.1184, +0.2772, +0.2421, -0.6209, -0.4482, -0.6124, -0.0749, -0.0634, +0.4178, -0.0745, -0.5763, +0.3940, -0.2924, -0.4282, -0.2118, -0.5264, -0.8030, +0.1691],\n[ -0.0243, +0.1186, +0.1214, -0.7917, -0.0578, +0.0656, +0.1574, -0.4321, -0.8055, -0.0509, +0.2256, +0.1686, -0.0946, -0.1584, +0.1417, +0.1943, -1.2738, +0.0080, -0.1580, -0.5160, +0.2872, +0.0854, +0.1843, -0.0101, -0.0975, -0.1252, -0.3296, -1.1518, -0.1937, -0.0913, +0.1553, -0.3593, +0.7464, -0.2383, -0.8537, +0.0278, -0.0484, +0.1455, +0.0117, -0.0073, -0.5213, +0.0542, -0.3462, -0.4633, +0.1424, -0.0178, -0.0359, -0.8284, -0.5606, -0.0482, +0.3423, +0.2033, +0.3398, -0.2188, +0.0563, +0.4824, -0.7406, -0.2805, +0.2481, +0.0907, +0.1630, -0.2325, -0.1028, -0.0367, +0.1283, +0.1714, -0.4324, +0.3293, -0.4413, -0.3222, +0.0196, +0.4370, +0.2912, +0.1991, +0.0462, +0.3073, -0.5100, -0.0354, +0.0971, -0.6773, +0.1030, -1.0156, -0.3781, -0.3812, +0.5229, -0.1198, +0.1014, +0.3792, -0.0822, -0.5402, +0.3907, -0.4183, +0.2017, -0.2403, +0.1254, +0.1106, -0.2351, -1.0903, +0.0545, -0.6915, -0.4463, +0.2522, +0.0548, +0.0415, +0.1845, -1.2566, +0.3470, -0.1079, -0.1252, +0.3779, +0.2574, -0.1457, +0.6287, +0.2764, +0.0868, -0.3796, +0.1852, +0.2793, -0.1176, +0.0419, -0.3765, +0.2774, -2.0774, +0.1867, -0.3176, -0.0142, +0.1466, -0.0167],\n[ -0.2577, -0.3209, -0.2173, -0.9016, +0.1397, -0.0210, +0.0456, +0.3509, -0.3827, -0.4699, +0.0811, +0.1087, +0.3465, -0.3480, -0.0577, -0.1263, +0.1601, +0.1168, -0.3422, -0.1298, -0.8658, -0.3096, +0.0007, -0.2192, +0.1675, -0.2581, +0.0042, -0.3348, +0.2278, -0.3934, -0.1930, +0.1503, -0.1282, -0.0508, -0.1116, +0.0845, -0.1293, -0.4505, -0.3138, +0.2516, -0.8298, +0.2572, -0.2498, +0.0418, -0.4383, +0.1897, -0.0101, -0.0634, +0.4161, -0.1021, +0.0685, +0.0620, -0.0885, +0.0695, -0.3611, +0.2575, +0.0583, -0.3698, -0.0590, -0.0599, -0.0708, -1.1328, -1.4975, -0.4391, -0.0188, +0.2601, -0.3437, +0.0091, +0.1288, -0.1438, -0.7818, -0.5881, +0.1493, +0.1140, -0.2837, +0.0317, -0.3420, -0.2077, -0.0265, +0.2358, -0.0638, +0.2327, +0.2399, +0.0046, +0.3208, -0.2417, +0.1361, +0.1809, -0.1479, -0.2120, -0.0466, -0.4996, +0.0346, +0.0137, +0.1121, +0.0929, -0.0005, -0.2860, -0.8237, -0.1306, +0.3829, -0.0539, -0.4165, -0.1393, +0.0856, +0.1007, -0.2190, +0.5631, -1.2789, -0.0171, -0.3736, -0.0174, +0.0349, -0.4041, -0.0510, +0.1716, -0.6770, -0.0002, -0.3129, -0.7005, -0.9493, +0.0616, -0.2887, +0.0076, +0.2085, -0.6462, +0.1981, +0.1615],\n[ +0.3304, +0.1152, -0.6717, +0.1143, +0.2115, +0.0698, -0.1820, +0.1726, -0.0369, +0.0439, +0.1313, -0.1625, -0.0757, +0.0836, +0.0388, +0.1446, +0.0832, +0.0089, -0.4948, -0.0825, -0.3839, -0.4609, +0.1577, -0.0576, +0.0792, -0.0768, +0.1828, +0.3302, -0.1742, -0.3798, +0.0916, +0.1784, +0.0494, +0.0835, +0.0700, +0.1665, +0.3586, -0.6019, +0.2448, +0.6077, +0.1405, -0.1039, -0.0866, -0.0915, -0.0754, +0.2220, -0.5158, -0.5315, +0.2973, -0.0670, +0.0497, +0.2268, +0.1621, +0.0508, +0.0365, +0.3368, +0.3022, +0.3330, +0.2109, -0.3269, -0.3239, -0.2013, +0.2996, +0.1457, -0.2253, -0.3094, +0.1806, +0.1514, +0.2331, +0.0763, -0.1444, -0.1580, +0.3315, -0.5235, +0.2613, +0.1944, +0.1859, +0.3933, +0.4284, -0.2695, +0.1532, -0.0814, -0.6060, -0.1824, -0.2279, -0.3816, -0.9168, -0.2387, +0.0473, +0.0513, -0.0929, -0.2477, +0.0768, +0.4879, -0.5346, -0.4995, -0.2313, +0.0707, -0.0713, -0.1675, +0.2942, +0.0147, +0.2039, +0.3673, -0.0179, -0.4249, +0.0179, -0.4301, -0.2318, -0.0834, -0.3560, +0.1333, -0.0454, -0.3471, +0.2927, +0.2090, +0.1567, -0.2355, +0.0565, -0.2313, -0.0992, -0.4685, +0.0392, +0.0334, -0.4125, -0.0562, +0.1348, -0.0555],\n[ +0.2765, +0.2651, -0.1661, +0.0773, -0.1170, -0.0005, +0.1935, +0.2157, -0.0871, -0.0412, +0.2278, +0.0088, +0.5312, +0.1020, +0.4036, +0.1912, +0.1202, -0.2561, +0.0533, -0.3085, -0.3528, -0.2470, +0.8043, -0.1908, +0.3315, -0.0677, -0.2639, -0.7734, -0.0086, -0.8340, +0.1080, +0.3147, -0.0394, -0.1705, -0.6031, +0.5434, -0.0039, -0.0621, +0.1619, +0.6574, -0.3513, +0.4419, -0.0093, -0.3592, -0.3253, +0.2011, +0.3291, +0.4329, -0.2797, -0.0186, -0.6435, +0.2140, +0.0672, -0.3524, +0.1114, +0.0474, -0.7333, -0.2670, +0.1535, -0.8170, +0.3432, -0.4108, -0.1007, +0.1650, -0.0829, -0.4034, -0.3486, -0.0583, +0.2539, -0.0878, -0.9101, -0.2531, +0.2064, -0.3594, -0.1831, +0.2126, +0.4375, -0.0240, +0.1841, -0.3827, -0.3310, -0.3788, +0.0986, -0.7037, -0.1839, +0.2655, +0.0209, -0.1485, +0.3097, +0.1903, -0.0578, +0.1213, +0.0722, +0.3004, +0.4606, +0.0119, -0.0622, +0.0207, -0.6240, -0.2635, +0.3157, +0.1758, -0.3678, +0.3457, -0.3850, -0.4129, -0.5109, +0.3018, -0.2841, -0.7261, +0.1670, -0.2980, +0.0840, -0.0656, +0.2121, -0.0174, -0.1468, -0.6263, +0.2111, -0.8132, +0.1847, -0.1166, -0.1332, -0.1358, -0.1872, -0.5252, +0.0908, -0.3466],\n[ -0.3072, +0.0593, -0.2193, +0.4342, -0.0824, -0.0014, -0.1008, -0.2842, +0.0379, +0.2284, +0.0750, +0.1432, -0.2198, -0.2522, -0.3912, +0.0649, -0.1187, +0.2494, -0.0734, +0.1627, -0.2980, -0.2779, +0.3082, -0.2920, +0.5340, -0.1274, -0.2058, +0.0024, +0.0237, +0.3097, -0.2613, +0.0268, +0.0602, -0.0818, +0.0302, -0.7059, +0.1893, +0.1382, -0.3246, -0.9833, -1.0995, +0.0738, +0.2231, -0.2223, -0.2500, +0.3808, +0.2266, -0.4070, +0.1142, -0.6488, -0.1621, +0.2094, -0.2307, -0.4893, +0.0355, +0.0290, -0.1048, -1.1536, -0.5460, -0.1715, -0.0416, +0.5042, -0.3416, -0.1424, -0.0845, -0.0123, +0.2568, +0.1480, -0.8059, -0.6547, +0.1523, -0.1598, -0.1313, -0.3420, +0.0431, +0.2401, -0.1837, +0.1552, +0.1038, +0.1037, -0.2919, +0.0035, +0.0380, -0.1548, -0.1816, -0.4945, +0.2192, +0.2169, +0.4100, -0.6803, -0.2764, +0.3512, +0.0954, -0.2026, +0.2252, -0.2983, +0.2042, -0.5496, +0.0732, -0.0592, -0.1785, -0.2568, +0.1295, -0.1812, -0.1574, -0.1005, -0.1900, -0.2671, -0.2191, -0.1296, +0.3192, -0.0245, +0.1966, +0.3232, -0.1156, +0.3218, +0.3045, +0.1002, -0.3261, +0.3375, -0.1016, +0.1813, -0.5060, -0.0189, -0.0428, +0.0821, -0.1383, -0.0483],\n[ -0.0485, -0.0761, +0.2687, -0.3376, -0.0789, -0.3058, -0.1430, -0.6755, +0.2649, +0.7666, -0.2482, -0.4595, -0.0853, -0.3108, -0.1604, -0.1217, -0.4340, +0.2198, +0.0220, -0.3623, +0.6541, -0.2147, -0.1381, -0.3438, -0.1808, -0.1435, +0.2867, -0.9132, -0.2100, -0.6305, -0.3223, +0.0752, -0.0067, +0.0911, -0.0818, -0.3216, -0.1769, +0.1011, -0.2282, +0.1190, -0.1974, -0.0207, -0.3438, -0.0572, -0.2476, -0.2491, -0.1069, -0.4519, +0.1434, +0.2403, -0.3082, +0.0892, +0.0322, +0.0599, +0.0814, -0.3499, +0.0373, -1.0612, +0.0223, +0.1832, +0.0886, +0.0553, -0.0571, +0.2441, -0.3607, -0.1565, -0.2459, -0.0615, -0.7580, +0.2557, -0.9257, +0.0689, -0.0736, -0.2414, +0.1422, +0.2079, -0.4424, -0.3469, -0.0933, +0.3856, -0.0750, -0.1159, +0.4371, +0.1831, +0.1788, +0.0338, -0.0747, -0.4103, +0.3391, -1.0040, +0.4627, +0.1272, +0.2411, -1.0926, -0.2224, -0.1995, +0.1803, -0.0627, -0.6347, -0.1275, +0.2026, -0.0792, -0.0308, +0.1798, -0.0517, +0.2374, -0.4876, +0.0377, -0.4819, -0.0827, -0.2996, -0.4329, +0.3787, -0.1345, +0.1394, +0.3466, +0.1525, +0.3288, -0.0679, -0.6395, +0.0492, +0.2418, -0.2533, +0.2143, -0.6310, -0.7643, +0.3824, -0.5283],\n[ -0.2216, -0.2724, -0.3635, -0.5285, +0.2326, -0.0175, -0.1538, +0.0782, +0.6778, -0.3808, -0.1319, +0.1996, -0.1311, -0.3252, +0.4766, -0.1242, +0.5385, -0.1809, -0.0201, -0.1445, -0.2975, -0.3141, -0.0635, -0.1437, -0.1520, +0.2272, -0.1175, -0.6078, -0.0450, +0.0698, -0.0833, -0.0208, -0.9297, -0.7583, -0.8347, -0.2802, -0.1081, -0.2378, +0.0218, -0.3091, -0.3305, +0.0505, -0.1155, -0.9380, +0.0521, +0.4455, +0.2421, +0.2943, -1.0620, -0.1135, +0.1517, +0.2703, +0.1553, +0.1838, -0.0901, +0.4649, +0.0869, +0.0020, -0.3046, -0.1267, +0.2966, -0.0340, +0.0251, -0.0225, -0.4745, +0.1096, +0.4214, +0.0260, -0.9900, +0.1529, +0.0409, +0.0435, +0.4192, +0.1484, +0.2290, +0.3483, +0.0416, +0.0547, +0.2778, +0.0500, +0.1722, -0.2871, +0.1912, -0.1819, +0.0170, -0.1954, +0.1159, +0.1133, +0.0251, -0.5399, +0.1444, +0.2560, +0.2234, +0.0129, +0.1838, -0.0347, +0.1374, -0.3880, +0.4386, -0.5715, -0.0648, -0.1808, -0.1905, +0.1060, +0.0285, +0.0654, +0.1420, -0.3763, -0.3592, +0.1827, +0.0626, -0.0212, -0.1470, -0.0439, +0.3878, -0.5607, -0.0129, +0.0216, +0.0846, +0.2230, -0.8648, +0.0159, +0.3718, -0.0508, -0.5289, -0.7366, -0.0457, +0.4186],\n[ -0.0214, -0.0530, +0.0137, -0.2432, -0.0927, -0.2797, +0.0852, -0.4463, -0.0110, +0.2777, -0.1540, +0.1232, +0.1111, +0.0501, -0.0242, +0.3283, +0.1690, +0.7622, -0.3503, -0.2473, +0.0353, -0.3887, -0.4612, -0.1503, +0.0814, +0.3865, -0.1694, +0.0120, +0.4389, -0.0711, +0.1713, +0.3489, +0.0682, -0.2548, -0.2882, -0.0319, +0.3242, -0.4394, +0.3936, +0.0420, +0.2637, -0.4346, -0.2826, -0.1212, -0.2662, +0.0626, +0.1920, +0.1084, -0.1428, -0.1077, +0.4388, -0.4603, +0.0879, -0.1445, +0.0940, +0.1186, -0.0631, -0.5513, +0.2869, +0.0772, +0.3672, -0.6910, -0.0244, +0.2878, +0.2721, +0.0819, -0.2527, -0.0537, -0.1003, +0.3021, +0.1214, -0.0519, -0.1527, -0.1574, -0.0269, -0.1360, +0.1854, +0.4206, -0.0306, -0.2780, -0.0115, -0.1752, -0.3788, -0.2699, +0.1797, +0.1841, +0.1184, -0.1995, -0.2032, -0.0399, +0.0875, -0.5725, +0.4417, +0.2053, +0.0468, +0.4927, +0.1521, -0.1696, +0.0752, -0.3104, +0.5265, +0.1238, -0.2677, +0.0530, +0.2844, +0.1389, -0.0011, +0.1489, -0.0156, +0.0328, -0.8756, -0.1634, +0.3669, -0.4764, +0.2505, +0.4518, -0.2778, -0.0361, -0.2592, +0.2754, -0.9665, +0.0985, -0.0931, -0.1472, -0.0249, +0.1736, -0.0344, +0.4884],\n[ +0.0076, -0.0792, +0.4573, +0.2103, -0.3263, +0.5124, -0.1254, -0.3027, +0.4467, +0.1534, -0.0088, +0.0828, +0.1483, -0.0149, +0.3330, -0.1840, -0.8589, +0.1954, +0.1535, +0.2053, -0.1536, -0.3341, +0.2597, -0.6422, +0.0675, -0.2398, +0.3120, -0.7673, +0.1112, -0.3301, +0.0317, +0.0886, +0.0414, -0.2466, -0.1473, -0.7178, +0.4703, -0.4728, +0.1195, +0.4448, -0.3061, -0.6861, -0.3048, -0.2956, +0.3244, +0.4717, -0.4695, +0.5288, -0.6678, -0.0470, -0.0538, -0.1436, -0.9947, -1.1587, +0.0508, -0.2462, +1.0473, +0.2293, +0.4339, +0.3946, -0.2774, +0.0805, +0.1161, +0.6042, +0.4060, -0.2910, -0.5440, +0.1522, -0.3688, -0.9751, -0.8646, -0.4894, -0.3640, -0.3734, +0.2146, +0.2350, +0.1571, +0.2132, -0.0705, +0.0330, +0.1605, -0.0053, -0.1780, -0.9285, +0.0206, -0.5987, -0.2628, +0.3601, +0.1762, +0.2956, -0.0877, +0.1974, -0.0082, +0.5044, +0.2951, -0.1490, -0.5688, -0.4528, -0.4916, +0.2803, +0.0985, +0.0523, -0.2049, -0.0192, +0.2865, -1.0200, -0.1378, +0.1502, +0.4630, -0.1785, -0.2855, -0.5179, -0.2046, +0.2921, +0.0592, +0.0598, -0.5597, +0.0560, +0.5616, -0.0732, -0.0935, -0.0076, -0.0293, -0.0054, -0.5336, -0.0486, -0.0284, -0.7210],\n[ +0.0138, +0.1312, -0.5257, -0.4086, +0.0728, +0.1993, +0.1312, +0.0431, +0.0887, -0.0362, -0.2398, -0.2054, -0.0455, -0.0584, -0.2373, +0.2380, +0.2169, +0.0082, +0.2734, -0.1091, -0.4689, +0.2916, -0.1617, -0.1124, -0.1273, +0.0680, -0.0110, +0.0820, +0.0190, -0.2237, -0.2111, +0.0737, +0.1487, +0.1199, +0.3229, -0.1435, -0.1576, +0.2985, -0.4750, +0.2688, -0.4223, +0.3317, +0.1938, -0.0482, +0.2846, -0.1528, -0.1198, -0.3543, -0.1499, +0.1163, +0.2298, +0.0175, +0.2827, +0.1979, -0.1932, +0.3102, -0.0683, +0.1764, -0.0746, +0.0251, +0.2647, -0.3658, -0.4789, +0.3222, +0.1481, +0.0715, +0.1229, +0.2837, -0.3743, -0.1212, -1.3068, +0.1790, +0.3244, -0.2563, -0.2422, +0.2098, -0.2671, +0.3374, -0.1205, -0.7220, +0.4286, +0.3452, +0.1179, +0.4031, -0.3413, -0.6037, +0.2143, -0.2744, +0.2182, +0.4431, -1.0548, +0.1050, -0.0165, -0.2962, -0.2426, -0.1212, -0.1963, +0.2281, -0.6649, +0.0373, +0.0941, +0.1760, +0.1101, +0.0707, -0.6022, -0.0103, -0.3880, -0.4686, +0.4758, -0.5176, +0.1639, -0.0607, -0.6359, -0.2935, -0.1718, -0.2972, -0.3843, -0.0077, -0.7114, -0.0130, -0.2512, -0.2410, -0.2260, +0.2555, -0.2789, +0.1600, +0.1542, -0.1273],\n[ -0.2428, +0.3061, +0.2298, -0.4761, -0.3310, +0.1217, -0.4854, -0.0608, +0.0785, +0.0976, +0.0841, +0.0917, +0.3270, -0.5238, +0.0938, -0.4166, -0.1472, -0.0707, +0.0508, -0.2121, -0.2691, -0.3868, +0.3209, +0.1520, -0.3473, +0.3797, +0.1491, -0.0827, +0.0303, -0.0662, -0.0583, -0.0114, -0.2137, -0.0716, -0.2505, +0.0234, +0.1942, -0.2429, -0.1812, +0.2127, -0.3146, -0.0560, -0.1975, -0.2884, +0.1577, -0.1066, +0.3612, -0.4182, +0.3614, +0.1490, +0.2338, -0.1749, +0.1586, -0.1450, -0.3109, +0.0085, +0.2919, -0.5191, -0.4061, +0.3164, -1.2844, -0.0685, -0.0588, +0.0751, -0.3219, +0.4100, -0.0801, -0.3325, +0.1215, +0.2556, -0.3045, -0.4596, -0.0324, +0.0205, -0.0232, +0.1530, +0.0951, -0.2800, +0.0506, -0.5846, -0.1273, -0.0996, +0.2067, +0.1167, +0.3611, -0.0991, -0.1651, -0.3704, -0.1254, -0.3887, -0.1392, +0.2495, -0.1188, -0.5125, -0.7474, +0.2729, -0.2425, -0.3024, +0.1678, -0.0371, -0.1116, -0.4206, -0.2825, -0.0830, +0.0164, +0.3703, -0.5570, -0.2507, -0.2276, +0.3190, -0.4609, -0.1061, +0.1248, -0.1382, +0.0740, +0.0562, +0.0275, +0.2151, -0.1987, -0.6144, -0.0418, +0.2406, -1.1566, +0.1270, -0.4743, +0.0390, -0.0635, -0.0234],\n[ +0.1849, +0.2056, -0.8912, +0.1386, -0.2061, +0.2078, +0.0089, -0.5197, -0.3154, -0.3073, -0.4449, +0.1493, +0.4017, -0.4632, +0.0683, -0.0695, -0.2991, -0.1667, -0.8045, -0.2511, -0.2451, +0.4418, +0.2032, +0.1380, -0.1905, -0.2897, +0.0508, -0.0452, -0.0836, +0.1389, +0.1278, -0.0631, +0.0659, -0.0119, -0.4652, +0.0809, -0.0130, +0.1559, -0.2362, -0.0178, +0.4270, +0.2486, -0.0051, -0.1884, -0.2139, -0.6266, -0.2996, -0.6199, -0.1317, +0.0074, +0.4751, +0.2395, +0.2592, +0.1341, -0.0190, -0.2331, +0.2688, -0.7262, +0.2291, -0.1426, -0.0483, -0.0462, -0.1390, +0.1052, -0.6746, +0.1354, -0.0905, +0.1046, -0.1011, +0.0623, -0.6305, -0.6835, -0.2048, +0.0765, +0.2925, -0.0006, +0.4725, -0.5182, -0.1838, +0.1910, -0.4000, +0.0748, -0.5708, -0.1253, -0.1979, +0.4603, +0.2264, -0.1244, -0.1174, +0.1527, -0.0043, -0.0938, -0.1171, -0.2813, +0.1696, -0.7883, +0.0192, +0.0551, -0.6117, +0.1119, -0.5844, -0.5764, -0.0239, +0.0728, -0.0364, -0.3200, -0.2571, +0.1682, -0.0850, -0.1715, -0.2180, +0.4132, +0.1072, +0.3502, +0.2404, +0.0948, +0.0001, +0.0032, +0.0910, -0.0538, +0.4683, -0.1478, -0.0923, +0.0925, +0.3624, -0.3939, -0.1887, -0.0461],\n[ -0.6513, -0.0528, -0.0799, +0.0180, -0.0639, +0.1353, +0.1291, -0.4689, -0.1285, +0.0403, -0.0495, -0.2101, -0.0562, +0.0413, +0.3756, +0.2829, -0.4205, +0.0386, -0.2373, +0.1858, -0.1519, -1.2870, +0.0758, -0.0572, -0.0928, +0.0717, +0.2179, -1.3161, +0.0086, -0.3917, -0.3191, -0.1952, -0.4163, +0.1665, +0.0394, +0.1485, -0.3066, -0.5259, +0.0706, -0.1073, -0.7096, -0.3626, -0.2640, -0.3450, -0.0300, -0.0195, -0.2554, +0.3610, -0.6629, -0.0244, +0.0810, +0.1760, -0.1691, -0.0746, -0.1397, +0.2106, -0.3880, -0.8769, -0.1162, -0.3865, +0.4369, +0.0547, -0.2496, +0.2524, +0.2667, -0.1418, +0.0266, -0.3762, -0.0858, +0.0070, +0.1271, +0.3415, +0.3415, +0.0483, +0.4017, -0.0354, -0.3031, +0.3032, +0.1559, -0.0946, +0.3062, -0.2216, +0.1691, -0.0621, +0.3155, +0.2635, +0.2077, +0.0959, +0.0565, +0.2541, +0.0393, +0.0420, +0.1090, -0.3292, -0.2852, -0.1791, -0.3682, +0.3089, -0.0278, +0.0165, -0.2482, -0.0125, +0.1965, -0.2145, +0.2043, -0.7790, -0.2388, +0.1049, -0.0955, -0.0739, -0.3513, -0.7518, -0.0023, -0.1168, +0.2432, +0.1824, -0.4548, -0.1216, +0.2842, +0.0144, +0.1611, +0.1462, +0.4332, -0.4961, +0.0503, -0.1672, -0.2520, +0.0050],\n[ -0.4374, +0.0677, +0.1759, -0.0717, -0.5386, +0.0776, -0.0142, -0.5765, -0.3650, +0.3188, -0.7733, -0.4377, -0.7963, -0.3026, -0.2102, -0.8911, -0.4924, +0.3700, -0.0042, -0.4664, -0.1002, -0.1016, -0.1787, +0.1152, -0.0962, -0.2385, -0.0872, -0.2961, -0.1030, +0.1671, -0.2583, -0.2238, -0.1293, -0.0782, -0.4061, -0.3127, +0.1150, +0.1069, +0.0334, +0.2424, -0.0779, +0.1749, +0.1685, +0.4895, -0.4784, -0.5112, +0.3653, -0.3491, -0.0358, -0.0161, -0.7627, -0.2534, -0.1499, -0.6884, +0.1699, +0.1398, +0.0126, -0.6539, -0.5635, +0.3513, +0.0543, +0.4240, -0.0515, -0.6213, -0.1297, +0.5964, +0.2096, +0.0586, -0.3029, +0.1138, -0.5396, -0.2229, -0.8885, -1.1727, +0.0485, -0.2309, +0.2224, +0.3499, -0.0101, +0.0408, +0.1950, +0.1235, -0.3856, +0.0873, -0.3156, -0.1496, -0.0588, -0.2328, +0.2851, +0.3854, +0.1741, +0.1355, +0.0666, -0.2230, +0.0363, +0.3607, -0.8664, +0.0231, -0.1652, -0.2408, -0.2333, +0.1644, -0.5628, +0.2266, -0.0874, -0.7965, +0.4027, +0.1313, -0.0736, +0.3931, +0.1241, -0.5326, -0.1481, -0.2934, +0.3501, -0.5848, -0.3682, -0.0025, -0.0314, +0.0481, +0.1469, -0.2880, -0.2509, +0.0841, -0.2406, -0.1403, -0.3320, -0.1264],\n[ +0.2835, -0.2385, +0.0157, -0.1775, -0.0661, +0.2835, -0.2914, -0.0802, +0.0005, -0.9043, +0.3188, -0.0884, +0.1066, -0.4199, -0.4391, -0.2280, -0.6856, +0.0331, -0.9060, +0.1107, +0.1094, +0.2466, -0.1458, +0.0564, -0.8896, -0.7002, -0.1500, +0.3055, -0.1778, -0.1791, +0.1248, +0.0839, +0.0610, +0.0625, -0.0862, -0.0261, -0.1398, -0.1335, -0.6727, +0.2547, -0.7196, +0.1070, +0.2992, +0.0814, +0.0297, +0.0174, -0.1268, -0.2557, -0.8446, -0.0955, -0.1967, -0.1212, +0.1532, -0.5281, -0.1994, +0.1381, +0.1805, -0.5466, +0.0128, -0.1981, +0.3126, +0.1236, -0.3552, -0.0086, -0.4803, -0.1862, +0.0102, +0.0481, -0.3328, +0.1807, -0.3255, -0.5793, +0.1044, +0.0913, +0.0427, -0.0351, -0.7696, -0.1137, -0.0585, -0.0956, +0.2934, +0.1989, +0.4311, -0.3571, +0.1281, +0.1825, -0.0877, +0.1746, +0.3465, -0.0489, -0.0920, -0.1478, -0.2537, +0.0394, -0.2832, +0.1150, +0.1023, +0.0576, +0.1433, +0.1745, +0.2718, +0.0142, +0.1164, -0.1714, +0.0859, -0.1570, -0.0102, +0.3979, -0.4980, -0.0764, -0.5556, -0.3163, +0.0547, -0.1635, +0.1372, +0.2434, -0.0717, +0.1115, +0.0103, +0.2736, +0.1025, +0.3237, +0.1072, -0.1598, +0.2211, -0.9020, -0.0138, -0.0541],\n[ +0.5913, -0.1232, +0.4854, -0.9341, -0.0232, -0.2916, -0.0394, -0.6606, +0.2191, +0.5984, -0.3938, -0.0504, -0.0565, -0.5473, -0.1880, +0.7157, -0.0056, -0.4979, -0.2422, +0.0283, -0.0454, -0.1495, +0.1372, -0.1654, -0.0747, +0.0656, +0.0167, +0.3254, +0.2891, -0.4931, +0.0534, -0.2193, +0.0124, +0.0179, -0.2722, +0.1878, +0.0150, +0.2374, -0.6084, -0.2652, -0.2618, +0.0132, -0.0302, +0.0346, -0.1478, -0.6839, +0.0778, -0.3381, +0.2921, -0.1248, +0.2446, +0.0116, -0.4571, -0.1960, +0.5189, -0.6623, +0.2867, -0.5312, +0.5299, -0.0400, -0.3333, +0.3019, +0.7214, -0.6399, +0.0920, +0.0674, -0.2953, -0.3731, -0.1957, +0.2722, +0.5687, +0.1900, -0.1495, +0.2474, +0.5458, -0.0210, +0.3382, +0.1387, +0.3749, -0.7159, -0.2116, +0.2044, +0.0299, -0.6997, -0.0607, -0.1249, +0.4435, +0.1194, -0.3956, -0.3680, +0.7815, -0.3192, -0.2005, +0.6671, +0.1049, +0.1786, -0.0453, +0.0361, +0.3828, +0.1632, -0.2219, +0.0587, -0.0778, +0.2726, -0.1161, -0.8251, -0.9922, -0.1238, -0.2244, -0.0043, -0.8359, -0.4257, +0.3097, -0.0342, -0.6175, +0.1667, +0.2046, -0.4767, -0.5294, +0.3274, +0.6918, -0.2039, +0.0763, -0.1972, +0.2738, -0.1745, +0.3617, -0.1313],\n[ +0.3985, -0.7089, -0.0332, +0.0505, +0.0036, -0.0961, -0.0241, +0.2885, -0.2104, +0.0825, +0.2986, +0.1737, -0.1259, -0.6318, -0.5804, -0.4030, -0.5987, -0.4087, +0.0740, -0.4228, +0.0196, -0.0847, -0.1110, +0.1669, +0.2755, -0.6475, +0.1167, -0.0541, +0.1044, -0.0361, -0.1183, +0.1291, -0.0599, +0.1331, -0.2997, +0.2303, +0.1575, +0.3870, -0.1515, -0.4865, -0.6648, +0.1327, +0.1144, +0.0974, +0.0034, -0.2743, +0.0202, -0.1749, -0.1239, -0.3202, -0.3870, +0.0799, -0.1327, +0.0868, +0.3767, +0.1424, +0.1184, +0.4465, -0.1336, -0.0741, -0.3882, +0.1285, -0.3939, -0.7510, -0.6497, -0.2128, -0.4897, -0.1906, +0.2443, -0.0178, +0.2300, +0.1188, -0.1663, -0.2625, +0.1943, -0.1735, +0.1914, -0.3450, -0.7589, -0.1376, -0.2713, -0.5135, +0.1341, -0.1255, +0.1950, +0.0079, +0.1064, +0.1392, -0.1342, -0.5934, +0.2223, -0.7193, -0.3631, -0.0109, -0.6700, -0.0695, +0.3342, -0.3029, -0.4486, -0.3059, -0.1220, +0.0722, +0.1273, +0.0583, -0.4831, +0.1510, -0.1138, +0.0517, -0.1325, -0.2582, +0.2351, -0.1619, -0.0685, +0.0409, +0.1921, -0.5289, -0.8433, -0.3236, +0.1013, -0.0917, -0.4555, +0.2112, +0.5211, +0.0347, -0.0369, -0.0381, +0.1335, +0.0780],\n[ +0.3391, -0.1031, +0.5497, -0.0912, -0.0822, +0.0400, +0.0104, -0.2520, -0.2326, -0.1946, +0.1817, +0.1504, -0.1373, -0.3782, +0.2782, -0.0811, -0.1670, -0.0752, -0.0038, +0.0659, -0.1489, +0.0530, -0.1602, +0.2660, +0.3501, -0.3531, +0.0742, +0.4502, -0.1074, +0.3760, +0.3817, +0.3847, -0.0346, -0.2125, -0.3708, +0.1074, -0.5406, +0.6945, +0.0836, +0.5442, -0.0731, +0.1704, -0.2789, -0.2147, -0.1203, -0.4953, -0.2338, -0.6956, -0.2753, -0.2008, +0.3548, +0.2988, -0.0777, -0.1839, +0.2720, -0.3387, -0.0846, +0.2037, +0.1424, +0.1685, -0.0319, -0.2133, +0.2418, -0.2023, +0.0885, -0.2266, -0.0139, +0.1143, +0.2558, +0.1364, -0.0488, -0.3719, +0.1516, +0.0701, -0.0487, -0.1628, +0.6667, -0.0374, -0.0616, -0.6835, +0.3366, +0.3745, +0.3795, -0.3324, +0.0391, +0.1536, +0.4374, +0.3632, -0.1966, -0.3302, +0.0733, +0.2078, +0.1054, -0.1593, -0.4613, -0.0141, -0.1862, +0.3851, -0.3174, +0.1968, -0.2634, +0.0754, +0.0973, +0.4310, +0.0077, +0.1042, +0.5596, +0.1151, -0.0996, +0.2052, +0.0175, -0.1184, -0.0746, -0.0426, +0.1649, -0.1099, -0.1221, -0.8761, +0.1507, +0.0753, -0.1373, +0.0390, +0.3692, +0.0726, +0.1944, -0.3016, -0.0347, -0.0190],\n[ +0.1592, -0.9045, -0.3388, +0.2854, -0.3764, -0.2765, -0.2451, +0.2391, -1.0949, +0.0841, +0.3115, -0.3353, -1.3785, +0.1600, +0.3432, +0.2445, +0.1564, +0.0087, +0.3491, -0.5923, -0.2462, -0.4761, -0.4795, +0.1626, -0.1966, -0.2813, -0.1815, -0.1901, -0.4001, +0.0344, -0.1321, +0.0140, -0.3800, +0.1196, -0.1599, -0.2739, +0.0795, -0.2032, +0.3463, +0.0708, +0.2574, -0.0073, -0.2222, -0.2274, -0.1679, +0.0262, +0.3934, -0.0947, +0.3593, -0.0873, -0.4258, -0.1662, -0.2950, +0.0077, -0.0881, -0.3166, -0.4573, -0.0139, +0.1009, +0.1217, +0.3329, +0.2159, +0.2382, +0.0680, +0.2457, +0.0094, -0.1908, +0.4279, -0.4443, +0.0266, +0.3519, +0.1389, -0.9522, -0.1226, +0.1761, +0.0645, -0.3624, -0.0373, -0.6709, -0.6824, +0.0779, +0.4334, -0.1997, +0.2059, -0.2751, +0.4227, +0.1345, +0.3756, -0.6810, +0.1684, +0.0572, +0.0989, -0.7624, -0.0815, +0.0952, -0.1433, -0.1627, -0.0365, -0.4531, -0.3340, -0.3930, +0.0047, -0.7260, -0.7704, -0.1741, -0.6385, -0.2428, -0.2376, -0.2202, +0.3776, +0.3622, +0.3179, -0.5478, -0.0946, -0.4054, -0.3539, +0.1668, -0.3799, -0.6982, +0.2685, -0.1232, -0.1367, +0.1098, -0.0299, +0.2043, +0.4321, +0.6063, -0.1830],\n[ +0.0292, -0.0010, +0.2158, -0.4331, -0.0310, +0.0424, +0.3919, -0.7883, +0.2159, +0.1283, +0.1814, -0.1074, +0.2870, +0.3619, -0.0246, +0.1734, -0.2848, +0.0568, +0.2306, +0.1180, +0.3157, -0.5900, -0.2666, -0.6148, -0.2705, +0.4858, +0.0142, -0.2339, +0.3535, +0.3406, +0.0438, -0.1729, +0.1806, +0.1529, -0.3753, +0.1985, -0.6612, -0.0589, +0.2622, +0.3362, -0.3548, +0.0882, +0.0478, -0.0368, +0.1482, +0.2514, +0.1228, -0.1455, -0.1251, +0.0611, +0.0816, +0.0377, -0.1065, +0.0500, -0.3750, -0.2686, -1.3232, -0.3649, -0.5384, -0.3742, +0.1670, -0.3885, -0.0707, +0.4238, -0.0005, +0.3778, -0.9734, +0.3905, +0.6022, -0.0947, -0.7236, -0.1353, +0.0807, -0.8187, -0.0729, -0.3487, -0.1654, +0.2132, +0.0264, +0.1123, -0.0456, +0.1316, -0.7286, +0.2312, -0.2189, -0.0716, -0.3669, -1.2613, +0.1973, -0.5447, +0.1262, -0.4810, +0.3795, -0.6663, +0.3199, +0.0797, +0.0648, -0.7192, +0.3327, -0.3189, -0.0497, -0.3963, -0.2406, -0.2792, +0.0374, +0.1829, +0.1853, -0.0994, -0.7060, -0.1172, -0.0547, -0.2612, -0.9416, -0.3015, -0.0489, +0.0872, +0.2002, -0.5830, +0.0093, -0.2521, -0.3983, -0.5610, -0.1444, -0.0213, -0.1840, +0.5584, +0.1573, -0.0839],\n[ -0.0054, -0.1552, +0.0445, +0.5473, +0.3489, +0.0942, +0.1283, +0.1088, +0.2221, +0.0951, -0.2887, -0.3084, +0.0476, -0.6876, -0.1634, -0.0490, +0.0768, +0.1582, -0.3275, -0.2437, -0.1178, -0.0054, -0.2570, +0.1724, +0.0588, -0.8220, +0.3945, +0.3071, +0.0125, +0.0827, +0.2293, +0.1611, -0.0184, +0.3169, +0.3854, -0.1290, +0.3770, +0.0940, +0.0282, +0.1175, -0.1819, +0.0814, -0.3593, +0.4649, +0.4170, -0.3505, -1.3469, +0.1904, +0.0756, -0.4528, -0.1058, +0.0501, +0.1074, +0.0082, +0.0061, -0.3440, -0.0304, -0.1088, -0.0586, +0.0032, -0.0331, -0.0033, +0.0723, -0.3142, -0.1265, -0.0172, +0.1410, -0.0386, -0.2496, +0.1728, +0.2118, +0.3957, +0.1726, +0.1006, -0.3493, -0.1489, -0.3966, -0.2837, -0.0478, -0.0989, +0.1569, +0.0148, -0.0214, -0.0335, -0.0149, +0.4945, -0.5369, -0.0122, -0.0322, +0.2602, +0.2476, -0.1682, -0.4900, +0.1432, +0.0293, -0.3913, -0.0559, +0.1990, +0.1417, -0.4177, +0.3085, -0.2313, +0.0765, -0.3012, +0.1796, -0.5680, +0.3003, +0.1191, -0.1923, -0.1704, -0.2999, -0.4388, -0.7674, -0.0022, +0.1210, +0.1297, -0.2887, +0.1796, +0.3751, +0.2259, -0.0375, -0.0734, +0.3011, +0.1191, -0.0989, +0.0234, +0.4871, +0.0155],\n[ -0.2794, +0.4030, -0.8714, +0.1589, +0.3095, -0.2938, -0.5479, -0.1597, -0.1159, -0.3491, +0.4800, +0.0262, +0.3959, -0.6992, -0.7997, -0.1646, +0.2888, -0.8217, +0.1957, +0.2923, +0.0236, +0.2229, +0.0514, +0.1859, -0.2631, -1.0410, +0.0539, -0.3402, -0.0101, -0.2683, +0.3238, +0.0962, -1.0021, -0.0456, -0.9495, +0.3728, -0.0316, -0.0075, +0.2809, +0.3721, -0.1940, +0.0839, -0.5128, +0.3385, -0.0146, +0.0474, -0.7893, -0.0244, -0.4119, -0.1384, -0.3560, -0.5578, +0.0636, -0.5752, -0.2949, +0.5623, +0.1826, -0.2331, -0.2763, +0.1912, -0.1321, +0.0604, +0.1944, -0.1728, -0.3342, +0.0470, -0.0843, +0.3187, -0.0226, +0.0507, -0.2920, -0.9700, -0.0704, -0.3181, -0.1028, -0.3946, -0.3361, +0.3495, +0.2483, +0.2804, -0.0085, -0.2937, -0.2936, -0.0616, -0.0207, -0.4355, +0.1786, +0.2164, +0.1249, +0.3248, +0.2681, -0.1227, +0.2755, +0.2102, -0.8579, -0.1426, -0.0127, +0.0146, +0.3679, +0.1330, +0.2826, +0.1176, -0.0155, +0.4931, +0.1725, +0.0041, -0.0312, +0.1938, +0.5592, -0.0589, +0.0588, -0.0504, +0.1824, +0.1543, +0.2127, -0.1573, +0.0825, -0.1487, +0.0773, -1.0069, +0.3602, +0.1695, -0.1658, -0.4918, -0.2411, -0.0045, +0.1308, -0.0919],\n[ -0.0144, -0.1173, -0.0091, -0.0784, +0.3286, +0.4062, -0.4460, +0.0764, -0.0014, -0.2451, -0.3954, +0.1172, +0.1226, -0.0482, -0.0108, -0.5943, +0.5661, +0.1431, -0.0304, +0.0496, -0.6318, +0.0808, -0.1243, +0.2488, -0.3727, +0.1838, -0.1293, -0.5253, -0.2261, -0.0610, -0.0719, -0.9734, +0.0533, -0.2570, +0.1856, +0.1661, -0.0942, -0.4841, +0.3055, -0.3630, -0.3041, +0.3149, +0.0270, -0.2742, -0.3865, -1.0538, -0.2327, +0.1288, -0.3333, -0.0792, -0.1043, -0.0144, -0.4203, -0.2229, -0.1949, -0.1688, +0.0234, +0.4592, -0.2843, +0.1205, -0.1586, -0.1057, -0.4400, +0.2341, -0.0925, -0.0170, +0.6462, -0.0736, +0.2749, +0.4940, +0.0412, +0.1284, -0.1656, +0.1447, +0.2860, +0.1238, +0.2212, +0.1228, -0.2901, +0.2669, +0.2428, +0.4930, +0.3968, -1.0630, -0.3707, -0.4179, -0.0529, +0.1792, +0.0515, -0.0135, -0.3825, +0.0324, -0.3215, +0.0142, -0.6385, -0.3276, -0.2980, -0.3898, -0.3575, +0.0054, +0.0518, +0.1864, +0.1134, -0.0957, +0.1494, -0.0553, -0.4238, +0.0880, -0.0626, -0.4486, -0.7110, +0.0714, -0.0491, +0.1992, -0.4954, -0.2394, -0.1724, +0.1084, -0.3245, -1.6636, +0.2524, -0.6147, +0.0663, +0.3333, -0.1083, -0.7804, -0.1384, -0.2598],\n[ -0.2018, -0.5408, +0.0414, +0.1091, +0.3718, +0.2390, +0.4673, -0.2912, +0.0094, -0.0723, -0.3211, -0.1889, -1.4788, +0.3892, +0.1359, -0.1354, -0.0398, -1.3709, -0.2299, +0.2701, -1.3452, -0.3284, -0.1971, -0.7869, +0.1948, +0.2171, -0.2420, +0.3016, +0.4040, +0.6372, +0.0466, -0.6071, +0.3696, -0.0766, +0.1127, +0.3249, +0.1572, +0.1858, +0.1178, +0.1339, +0.0237, -0.0347, -0.0048, -0.1617, -0.7564, -0.1252, -0.0638, -0.1959, -0.4785, -0.1408, -0.3030, +0.2420, -0.3357, -0.0812, -0.1504, +0.1962, +0.1617, +0.0325, -0.2494, -0.1924, +0.2825, -1.0908, -0.5430, -0.2254, -0.0517, -0.2560, -0.2175, -0.2357, -0.2966, -0.1681, +0.2224, -0.1229, -0.9143, -0.3390, +0.1872, +0.0530, -0.0697, -0.6127, -0.3514, +0.0880, -0.2166, -0.2539, +0.0688, +0.1428, +0.2118, +0.1108, -0.0943, +0.1286, -0.3968, +0.1960, -0.0700, +0.5808, +0.0374, -0.5084, -0.3881, +0.0575, +0.2369, +0.5628, -0.2804, +0.4851, +0.1104, +0.0284, -1.1431, +0.1047, -0.3442, +0.1048, -0.0653, +0.1550, +0.0134, -0.2567, +0.4448, +0.0309, +0.1873, +0.2369, +0.0031, -0.3404, +0.3019, +0.1128, -0.1842, +0.0481, -0.0884, +0.3235, -1.0687, -0.1321, +0.2978, -0.2362, +0.0378, +0.1971],\n[ +0.2535, -0.2242, -0.0908, +0.5499, -0.2328, -0.3475, +0.2398, -0.1673, +0.1231, -0.6901, +0.0918, -0.0608, +0.0751, +0.2779, -0.3674, -0.0832, +0.1389, +0.0036, -0.0265, -0.0870, -0.0760, -0.3173, +0.0605, +0.2014, -0.3164, -0.0104, -0.0169, -0.3861, -0.2773, +0.0635, +0.0945, -0.5024, +0.1406, +0.0821, -0.0719, -0.1437, -0.3810, +0.2135, -1.1224, -0.0387, +0.1928, +0.1379, +0.0183, -0.0645, -0.0482, +0.1046, -0.2203, +0.0935, +0.2519, -0.2642, +0.1156, -0.3227, -0.1677, -0.2038, +0.0520, +0.0623, -0.1027, +0.0175, -0.0584, +0.0420, +0.1626, -0.8398, -0.0657, -1.0170, +0.0191, -0.9715, +0.0380, -0.1604, -0.6074, -0.0295, -0.2755, +0.1088, -0.1964, +0.2977, -0.3666, -0.3127, +0.2279, +0.0716, -0.5187, +0.1724, +0.1011, +0.0378, -0.5323, +0.0495, -0.5084, +0.0564, +0.0451, +0.1388, +0.0659, -0.0164, +0.1866, -0.4834, +0.0857, +0.0201, +0.1044, -0.0152, +0.1856, +0.4617, +0.0526, -0.0806, -0.1098, +0.0640, -0.1741, -0.1642, -0.1463, -0.2830, +0.0557, -0.3862, +0.4099, -0.3520, -0.0970, +0.2255, +0.0056, +0.2791, -0.3706, +0.2901, -1.1393, +0.2312, -0.0597, +0.1108, -0.1686, -0.3515, -0.0123, +0.6058, +0.0172, +0.1834, +0.1619, +0.0642],\n[ +0.1424, +0.2771, -0.4119, -0.4453, -0.0806, -0.0313, +0.0615, +0.0926, -0.0154, +0.1544, +0.0413, -0.0090, +0.0009, +0.1263, -0.5136, +0.1391, +0.1423, -0.2059, +0.3629, +0.0897, -0.2172, -0.0581, -0.4607, -0.6676, +0.2999, +0.1044, -0.0923, +0.0546, +0.1458, -0.0486, +0.0829, -0.4731, +0.0825, -0.4984, -0.2204, -0.0278, +0.1608, +0.3046, +0.0612, +0.0308, -0.1741, -0.1062, -0.1894, +0.3414, +0.3394, +0.2683, -0.5036, -0.4462, +0.1405, -0.0271, -0.1875, +0.3876, -0.0901, +0.1749, +0.2442, -0.6949, -0.3359, -0.3871, -0.1453, -0.6950, -0.1424, +0.1110, -0.0955, +0.1216, +0.3672, -0.1412, -0.1766, +0.0060, -0.1309, +0.0924, -0.0070, -0.0084, -0.7878, +0.0983, -0.2382, +0.3212, -0.0379, -0.0494, -0.4345, -0.3754, +0.1283, +0.1056, +0.1202, +0.2826, -0.2463, +0.2949, -0.3612, -0.0565, -0.1518, +0.1379, -0.4485, -0.4255, -0.0535, -0.8579, +0.3174, -0.0837, -0.0182, +0.0520, -0.1110, +0.1957, +0.4089, +0.0444, +0.1904, +0.0056, +0.1465, +0.2565, -0.5486, -0.6667, -0.0006, -0.8126, -0.1335, -1.0578, +0.1406, -0.3655, +0.1303, -0.0280, +0.3007, +0.2532, -0.0795, +0.0004, -0.1005, +0.3299, +0.0909, -0.3170, +0.1544, +0.3338, +0.0422, +0.0507],\n[ +0.4021, -0.5260, +0.0290, -0.7127, +0.3292, +0.3961, -0.4303, -0.1401, +0.1137, +0.1113, -0.2227, -0.2856, +0.1078, -0.5298, -0.2920, -0.3745, -0.7590, +0.1277, -0.5133, -0.5365, -0.0345, +0.3073, +0.0114, +0.5255, +0.2158, +0.2393, +0.2765, +0.0396, +0.1927, -0.1368, +0.0504, -0.1286, +0.2574, -0.3011, +0.0240, +0.1241, +0.0423, -0.2273, -0.0235, +0.0078, -0.6403, -0.3858, -0.5220, -0.6192, +0.4249, -0.5309, -0.1737, +0.1826, -0.1282, +0.5431, -0.0770, -0.3576, +0.1624, +0.0156, -0.4015, -0.5880, -0.4772, +0.1299, +0.1412, +0.3017, -0.3183, +0.2800, +0.1079, -0.3318, -0.3915, -0.0236, -0.2534, -0.0653, +0.0220, -0.5014, -0.0448, +0.4895, -0.5066, +0.0322, -0.4296, +0.5146, +0.0200, -0.5356, -0.0013, +0.0327, -0.3246, -0.2293, -0.2541, -0.4495, -0.5473, +0.2847, -0.1901, -0.2312, -0.1694, -0.6842, -0.1511, -0.1708, +0.1053, -0.5946, -0.3606, -0.2067, -0.1559, +0.0507, -0.2181, -0.0979, -0.1372, -0.1785, +0.0791, -0.2753, -0.1703, +0.0233, +0.2168, +0.4180, -0.4231, +0.0265, -0.0540, +0.1337, -0.4377, -0.2039, -0.2325, +0.1548, -0.3411, -0.4092, -0.7270, +0.5202, +0.1287, +0.2782, -0.1197, +0.1253, -0.0600, -0.5236, -0.6407, -0.1578],\n[ +0.2248, -0.0462, -0.8284, +0.3114, +0.1852, +0.1834, +0.2051, +0.0036, +0.3927, +0.0424, +0.3012, +0.3949, -0.1332, -1.5290, +0.1408, +0.1054, -0.7331, -0.1059, -0.2528, -0.3250, -0.0382, -0.2839, +0.2565, -0.1068, -0.9446, +0.1441, -0.3193, -0.2635, +0.0176, +0.0265, -0.0176, -0.2029, -0.0225, +0.1164, -0.1823, -0.3814, +0.0164, +0.1373, -0.3667, -0.7848, -0.0255, -0.0129, +0.1221, +0.1512, +0.1171, +0.1466, +0.5229, -0.6438, -0.0683, +0.3447, -0.0477, +0.3481, +0.0824, -0.2259, -0.0708, -0.1149, -0.0211, -1.2025, -0.1185, +0.0719, -0.1111, -0.1185, +0.2291, -0.6489, -0.0429, -0.1608, -0.9844, -0.2071, +0.3207, -0.7894, -0.4253, -0.2946, -0.1613, +0.0676, -0.2046, +0.0975, -0.1958, +0.1665, -0.0771, -0.4754, -0.7693, +0.2628, +0.2711, +0.0761, +0.0111, -0.2896, -0.1529, -0.2488, +0.4496, +0.1245, -0.4776, -0.0319, -0.0583, -0.0840, +0.1204, +0.2630, -0.0326, +0.1000, -0.2014, +0.1737, -0.3519, +0.0156, +0.1333, +0.6454, +0.0150, +0.1053, -0.0615, +0.1535, -0.6005, -0.6815, +0.3927, -0.0731, +0.2823, +0.1357, -0.1806, -0.1308, -1.2679, +0.2003, +0.2598, -0.3577, -0.1103, -0.1210, +0.3821, +0.0155, +0.1079, +0.0055, +0.1147, -0.2482],\n[ -0.1591, -0.0014, +0.1338, -0.7125, -0.7493, -0.1707, +0.3043, +0.1481, +0.1947, -0.5341, -0.3419, +0.2912, +0.2841, +0.0855, +0.0824, -0.3565, +0.0561, +0.0377, +0.2687, -0.2008, +0.4876, +0.1415, -0.7958, -0.1996, -0.4545, -0.0042, -0.3079, -0.1734, -0.2051, -0.0527, -0.1679, -0.2587, +0.3030, -0.8262, -0.0625, -0.4694, +0.2859, -0.0385, -0.3980, -0.5660, -0.5973, +0.3560, -0.0174, -0.1013, +0.0510, +0.1240, +0.3538, -0.0418, +0.4386, +0.2131, -0.0653, +0.1108, +0.1378, -0.0472, +0.2413, -0.2423, +0.0077, -0.8678, -0.8777, -0.1584, -0.9594, -0.4108, +0.1470, -0.6456, -0.0121, -0.3689, +0.1646, -0.9434, -0.4325, -0.2030, -0.0612, +0.0111, -0.1285, +0.1007, +0.0899, +0.0277, -0.0589, -0.0392, +0.1973, +0.0096, -0.2397, +0.4534, -0.1920, +0.1441, +0.0410, +0.3059, -0.5382, -0.3187, +0.2552, -0.0264, +0.1063, -0.2650, +0.1642, -0.0897, -1.0989, -0.3342, -0.7970, -0.3262, +0.1747, +0.4497, +0.0286, +0.0872, -0.0096, +0.1537, -0.0268, -0.1654, -0.2807, +0.0615, -0.5338, +0.2320, -0.1725, +0.0327, -0.4462, -0.2303, -0.0239, +0.2228, -0.0059, +0.0378, +0.1797, -0.5790, -0.3018, +0.0120, +0.2499, -0.0770, -0.7413, +0.1908, -0.6291, -0.0909],\n[ -0.1240, -0.4172, -1.2085, -0.3691, +0.0492, -0.1166, +0.1979, -0.2357, +0.1683, +0.5632, +0.2264, +0.3212, -0.3016, -0.1201, +0.2582, -0.1198, +0.0025, +0.1921, -0.2394, +0.4200, +0.6327, -0.4111, -0.3970, -0.0531, -0.3382, +0.1123, +0.1504, -0.3475, +0.1592, +0.1284, +0.2017, -0.2621, -0.3080, -0.0995, +0.1393, -0.1166, +0.0436, -0.2743, -1.1490, +0.0053, +0.1106, +0.0826, -0.0937, -0.0162, -0.2341, +0.0701, -0.2890, -0.1967, -0.3510, +0.2121, -0.0558, +0.1720, -0.0741, -0.0036, +0.2633, -0.3739, +0.0536, -0.1093, -1.3638, -0.0535, -0.1404, -0.1913, -0.1090, +0.0445, -0.2999, +0.3621, -0.2209, -0.3109, -0.0994, -0.0474, +0.2710, +0.0227, +0.0429, -0.0851, -0.1066, -0.0245, -0.1123, +0.1840, -0.8776, -0.0159, -0.1842, +0.1612, +0.6969, -0.5558, -0.5212, +0.2787, -0.1248, +0.1391, +0.1134, +0.1405, -0.5579, +0.2416, +0.0664, +0.1687, -0.1130, -0.1811, -0.0078, -1.1484, +0.3887, -0.3861, +0.1172, +0.3303, +0.3138, +0.1004, +0.2782, -0.3874, -0.2019, -0.6500, -0.0303, -0.6276, -0.2639, -1.1029, +0.1096, +0.2257, -0.3056, -0.4345, -0.0964, +0.0984, -0.1954, +0.2322, -0.5411, +0.0700, -0.3115, +0.3259, +0.0910, +0.1975, -0.2759, +0.0448],\n[ -0.3904, -0.0199, -0.3537, -0.4832, -0.1628, +0.0043, -0.0414, +0.1218, -0.2269, +0.2141, +0.3481, +0.0906, +0.0224, +0.2132, +0.3279, +0.5876, -0.3077, -0.4872, -0.6630, -0.4332, +0.2724, -0.3827, +0.1056, +0.2548, -0.5510, +0.4017, -0.1022, -0.4635, -0.1546, -0.3860, -0.2522, -0.7110, -0.9669, +0.2434, +0.2342, +0.3793, +0.1495, +0.6897, -0.1169, +0.2553, +0.0812, -0.0038, -0.2082, -0.6254, -0.2263, +0.3667, +0.0377, -0.0467, +0.0525, -0.4292, +0.3255, +0.1481, -0.3301, +0.1646, -0.0131, +0.3723, +0.0310, +0.1258, +0.2263, +0.3037, -0.5007, -0.8377, -0.4179, -0.1642, -0.3209, +0.0559, +0.5227, +0.2157, -0.3959, -0.2160, +0.1486, +0.2303, +0.1043, +0.0733, +0.3248, -0.0592, +0.3219, -0.2249, -0.0967, +0.1890, -0.1448, -0.4372, +0.4339, -0.4763, +0.1388, -0.1501, -0.5580, -0.4443, +0.0384, +0.1303, -0.4852, -0.2745, +0.2143, +0.5117, -0.1081, -0.0613, +0.0808, -0.1280, -0.0541, +0.3160, +0.0524, +0.0024, -0.1632, +0.1957, -0.2029, -0.0273, -0.6920, +0.1693, -0.3618, -0.6140, -0.1295, +0.1724, -0.2306, +0.1818, +0.3156, +0.6412, +0.1616, -0.5177, -0.0959, -0.1618, -0.0358, -0.3144, -0.2045, +0.0474, -0.5212, +0.1608, -0.3414, +0.1889],\n[ -0.1657, +0.0451, -0.3340, +0.3003, -0.0376, +0.2246, +0.3963, -0.6580, +0.4921, +0.0738, -0.8389, -0.0540, +0.4077, +0.2517, +0.1214, -0.0437, +0.2799, +0.1646, -0.1190, -0.0437, -0.0101, -0.4510, -0.1197, +0.4256, +0.4241, +0.2016, -0.2480, -0.2848, +0.0910, -0.3252, +0.0920, +0.1780, +0.1210, -0.2491, +0.4193, +0.0180, -0.2447, +0.1820, +0.4304, -0.6427, -0.0964, -0.4044, -0.1593, +0.3301, -0.1972, +0.1570, +0.1535, +0.2956, +0.0852, +0.5078, -0.2621, +0.0667, +0.3003, +0.1874, -0.4642, +0.0619, -0.1780, -0.0325, +0.0594, -0.0962, +0.0029, -0.0338, -0.6791, +0.0143, +0.2036, -0.6389, -0.3388, -0.3286, +0.2323, -0.3636, +0.0878, -0.1679, +0.3731, -0.6431, -0.0756, -0.0599, -0.0401, +0.3496, -0.3530, +0.0650, +0.1341, -0.2022, -0.6015, -0.3657, +0.1225, -0.1339, -0.0863, -0.2255, -0.0403, -0.1262, -0.0083, +0.3607, +0.4136, +0.4825, +0.2525, -0.1267, +0.2066, -0.0973, +0.1031, -0.0115, -0.2364, +0.2755, +0.0984, -0.4746, -0.1369, -0.0256, +0.1211, -0.1850, +0.1281, +0.0125, -0.4228, -0.1674, +0.0763, +0.1332, -0.1609, +0.2957, -0.0578, -0.1904, +0.2217, +0.4961, -0.1120, -0.0485, +0.0111, -0.3721, -0.2940, -0.2656, -0.0420, -0.0380],\n[ -0.0861, +0.1449, -0.6769, +0.0344, -0.2920, +0.1292, -0.2751, +0.1184, +0.3807, -0.3596, -0.7922, +0.0296, +0.6053, -0.8649, -0.6145, -0.9584, -0.1484, -0.2938, +0.5678, -0.1980, +0.9455, +0.1873, -0.4021, -0.0092, +0.1739, +0.0755, -0.2697, -0.5705, -0.1806, -0.4981, -0.9201, -0.2681, -0.5527, -0.5562, -0.2761, -0.1184, +0.0279, +0.0548, +0.2510, +0.1212, +0.2111, -0.2896, +0.0753, -0.1386, -0.3318, +0.4249, -0.4348, -0.6503, -0.9838, -0.0485, +0.1144, +0.7076, -0.1162, -0.0505, +0.1097, +0.1913, +0.1892, -0.0247, +0.0125, -0.2954, -0.0785, -0.7718, -0.6086, -0.0690, +0.5044, +0.0031, +0.0960, +0.2154, +0.3533, +0.5952, -0.1162, +0.5624, -0.2424, +0.0859, -0.2930, -0.1793, +0.7442, +0.0831, -0.1739, +0.2988, -0.7403, +0.6559, -0.8099, -0.1913, +0.1954, -0.3384, +0.0240, +0.2432, -0.3941, +0.2698, -0.2234, +0.1533, -0.0102, +0.0815, +0.3778, +0.1553, +0.2522, +0.2298, +0.1750, +0.3060, -0.3154, -0.9199, +0.1745, +0.4966, -0.1204, +0.0371, -0.9282, -0.1539, +0.2182, -0.2074, -0.5206, +0.4416, -0.6532, -0.0871, -0.1050, -0.0336, +0.0717, -0.0583, +0.0276, -0.7181, +0.2044, +0.2626, -0.1477, -0.1966, +0.5259, -0.2208, +0.1887, -0.3999],\n[ +0.3951, -0.1828, -0.5690, -0.4359, +0.1588, -0.0299, -0.0165, +0.1315, +0.1514, +0.0204, -0.0711, +0.4348, +0.1533, -0.5613, +0.1055, +0.1061, +0.0002, +0.0874, +0.1442, +0.1372, -0.2141, -0.2999, +0.1639, +0.0992, -0.1852, -0.2600, +0.0525, +0.4053, -0.2276, +0.1156, -0.1959, -0.2561, +0.1651, +0.2641, +0.0408, -0.2071, -0.3731, -0.1776, +0.2279, +0.2284, -0.0525, -0.2135, +0.2042, +0.1038, +0.0338, +0.0044, -0.1703, -0.2930, +0.3392, +0.3723, +0.3568, +0.1010, -0.6759, +0.1756, -0.0830, -0.0264, -0.3639, +0.2400, +0.1893, -0.7797, -0.2049, +0.6765, -0.9135, +0.1923, -0.4428, -0.2858, +0.1698, -0.0717, -0.0732, -0.1378, +0.1252, -0.3311, +0.3699, +0.2626, -0.1844, +0.2205, +0.1067, +0.2054, +0.1633, -0.0336, +0.1384, +0.2030, -0.4712, +0.1841, +0.1662, -0.4934, +0.2673, +0.1616, -0.1415, +0.1551, +0.1830, -0.7949, -0.1026, -0.0908, -0.2148, -0.0862, -0.3247, +0.2256, +0.4064, +0.0255, -0.2503, -0.1194, +0.0556, +0.2649, -0.1426, -0.3441, -0.4780, -0.0587, +0.1581, +0.0628, -0.9718, +0.4334, +0.0918, +0.0179, -0.2611, -0.2913, +0.0065, -0.3420, +0.1218, +0.0496, +0.4038, -0.2304, +0.3929, +0.2798, +0.0511, -0.0994, -0.1512, +0.0164],\n[ -0.6535, -0.3824, +0.4637, +0.5439, +0.0985, +0.2432, -0.1147, -0.4929, -0.4969, -0.3765, +0.3427, -0.1913, +0.1222, +0.5814, -0.0775, -0.0383, -1.3698, -0.0067, +0.1913, -1.1719, -0.2135, -0.6633, +0.1711, -0.2446, -0.9635, +0.1336, +0.2132, +0.6358, -0.1220, -0.0156, +0.4911, +0.6260, -0.4446, +0.2189, -0.4621, -0.6081, +0.2439, +0.0313, -0.3464, +0.2112, -0.2890, -0.0802, -0.5375, +0.0254, +0.6150, -1.0068, +0.3383, +0.0250, +0.0761, -0.0716, -0.1940, +0.0720, -1.4718, -0.4669, +0.2584, -0.3336, -0.1380, -0.7112, +0.1706, -0.2030, -0.3693, +0.5311, -0.3647, +0.3416, -0.6086, +0.5480, -0.5410, -0.3621, -0.0866, -0.2212, -0.0025, -0.6617, +0.0389, +0.3139, +0.3728, +0.2861, +0.5063, +0.0971, -0.8710, +0.0632, +0.0308, +0.9254, -0.0867, +0.0167, +0.2415, -0.0486, -0.3985, +0.0814, +0.3215, -0.5556, -0.5459, -0.5135, -0.7426, -0.1787, +0.1893, +0.1558, +0.3495, -0.1245, -0.3737, -0.4077, -0.0901, -0.0731, -0.3761, -0.0108, +0.0373, -0.4581, -0.4615, -0.7974, +0.0734, -0.2704, -0.7788, -1.6055, -0.0438, -0.0669, -0.5751, -0.1561, +0.2110, -0.4103, +0.1436, -1.0879, -1.3066, -0.4354, -0.8070, -0.1949, +0.1103, -0.1615, -0.6326, -0.1823],\n[ -0.2145, +0.0256, -0.1075, -0.8793, -0.6322, +0.3680, -0.5028, +0.6511, +0.1314, -0.2389, -0.4655, -0.4032, -0.5368, +0.1693, +0.2421, +0.1469, +0.1714, -0.0657, +0.1218, +0.2696, -0.5204, -0.0782, -0.1420, -0.1775, +0.2941, -0.5320, -0.0746, +0.1977, +0.3242, -0.4141, +0.1552, +0.1736, +0.4251, +0.3324, -0.3292, +0.1409, -0.3070, -0.4303, +0.1468, +0.0513, -0.4863, +0.2182, -0.4064, +0.1852, -0.4060, -0.1219, -0.8666, -0.8484, -0.4552, +0.0625, +0.2701, -0.2608, +0.1336, +0.3453, -0.1685, -0.8681, +0.0575, -0.0430, +0.4210, +0.4562, -0.2986, -0.5420, +0.2249, -0.0550, -0.8807, -0.3987, +0.1510, +0.0237, +0.2368, -0.1508, -0.2971, -0.4406, -0.1031, -0.8893, -0.0671, -0.0399, +0.3169, +0.6297, +0.0355, -1.3984, +0.0138, -0.0887, -1.0362, -0.6752, +0.1021, +0.2615, +0.2444, -0.1559, -0.3158, +0.0953, +0.3399, -0.0697, +0.0909, +0.3761, +0.1800, -0.2838, -0.6784, +0.1379, -0.5262, -0.0328, -0.0282, -0.1017, -0.1234, +0.1816, -0.0022, -0.1195, +0.3849, -0.5240, -0.2758, -0.5856, +0.1173, +0.4278, -0.2713, -0.2139, +0.3127, -0.4888, -0.5226, -0.1098, +0.1856, +0.1307, +0.3204, +0.1158, -0.2220, -0.1268, -0.0441, +0.3987, -0.0886, +0.0944],\n[ +0.4614, -0.0341, -0.3127, -0.3417, +0.1270, -0.2438, +0.1256, +0.2616, +0.1769, +0.2554, +0.0401, +0.4384, -0.3171, +0.1049, -0.6874, -0.8721, -0.2163, -0.0291, +0.2594, +0.0246, +0.0724, +0.2490, +0.0425, +0.0459, +0.2011, -0.5738, -0.1479, -0.3346, -0.0266, +0.2086, -0.0040, +0.0719, -0.2202, -0.2643, -0.2547, +0.1521, -0.3104, -0.2468, -0.1023, -0.3194, +0.2488, -0.0491, +0.3947, +0.1225, -0.2908, +0.1780, -0.0005, +0.1691, -0.2036, -0.2187, -0.1570, +0.1058, -0.2051, -0.3514, -0.1599, +0.2463, +0.1237, -0.0981, +0.2591, +0.4577, -0.3913, -0.0632, +0.2139, -0.1243, -0.2953, -0.2283, -0.6659, -0.1692, -0.1968, -0.0003, +0.0315, -0.0684, +0.1006, +0.1359, +0.1536, -0.3270, -0.0839, -0.2444, -0.3855, -0.4013, +0.1105, -0.0854, +0.2298, +0.0779, -0.1536, -0.1694, +0.1131, +0.1775, -0.6872, +0.5421, -0.0455, +0.3147, +0.0818, -0.1207, +0.3427, +0.1563, +0.1190, -1.2023, -0.7227, -0.3360, +0.2061, -0.2620, +0.2675, -0.3546, +0.1095, +0.3345, -0.7043, -0.0666, -0.7135, +0.1534, -0.5890, +0.0591, -0.3491, -0.2425, +0.2764, +0.3161, -0.6132, +0.1511, -0.3858, -0.2196, -0.0678, -0.0624, -0.3383, +0.0621, -0.0103, -0.0182, +0.0191, +0.0176],\n[ +0.3280, -0.6080, +0.1045, +0.0185, -0.3494, +0.1962, -1.1529, -0.1475, -0.2069, -0.0661, -0.1141, +0.0642, -0.3862, +0.2403, +0.1153, +0.1199, +0.2059, -0.4460, -0.1596, +0.1914, +0.4296, -0.2750, -0.0871, -0.1282, -0.1870, -0.0862, -0.1798, -0.2860, -0.8532, +0.0651, +0.2535, -0.5395, -0.3260, +0.1887, -0.1736, +0.0770, -0.0835, +0.2064, +0.0084, +0.1442, +0.2413, +0.0272, +0.3630, +0.1719, -0.2559, -0.3920, -0.4000, +0.0806, +0.0958, -0.1785, -0.1645, -0.1561, -0.0642, +0.0036, -0.2139, -0.1258, +0.8286, +0.1008, -0.0724, -0.4328, -0.3329, +0.1013, -0.2245, -0.1468, -0.7493, -0.0072, -0.1338, -0.2106, -0.2979, +0.0908, +0.0397, -0.1220, +0.2189, +0.3704, -0.0164, -0.2005, +0.1434, -0.5151, +0.0326, -0.1560, -0.3458, -0.1486, +0.2803, +0.0808, +0.2556, -0.1591, -0.0879, +0.2555, +0.0905, +0.3518, -0.3664, -1.1826, -0.2343, +0.1236, -0.2352, -0.1818, +0.1976, +0.3657, +0.1101, -0.0302, +0.1046, +0.3013, -0.4328, +0.5137, +0.3280, -0.3893, -0.4061, +0.2101, -0.1510, +0.1469, -0.0312, -0.1720, +0.1547, +0.2162, +0.0479, -0.2715, -0.1427, -0.2122, +0.0937, +0.3623, +0.5012, -0.3414, +0.3632, +0.0504, +0.1106, +0.1535, -0.3803, +0.0121],\n[ -0.4640, -0.1327, -0.0716, +0.0056, +0.2762, -1.1126, +0.0626, -1.0688, +0.2330, -0.2329, +0.5559, -0.1453, -0.5149, +0.0023, +0.1085, -0.4709, -0.5074, +0.0952, +0.1463, +0.2491, -0.0619, +0.0158, -0.0328, -0.5344, +0.1023, -0.0300, -0.2085, +0.1359, +0.0592, -0.2524, -0.1559, -0.0085, +0.0068, -0.0124, +0.3539, +0.2308, +0.0371, -0.1474, +0.0236, -0.5550, +0.0501, +0.1425, -0.5211, -0.4661, -1.4098, +0.0612, +0.2326, -0.0758, -0.0272, -0.0956, +0.0071, -0.4600, -0.2366, -0.0167, -0.5775, -0.5638, -0.0820, -0.4716, -0.5335, -0.2214, -0.3552, +0.2573, -0.2350, +0.4470, -0.7306, +0.4852, -0.2049, -0.3014, -0.2232, +0.3578, +0.1289, -0.2788, -0.4551, -0.3855, -0.4936, -0.1730, +0.0164, -0.1968, -0.1534, -0.3116, +0.0401, -0.2093, -0.0669, -0.1232, +0.6732, +0.0041, -0.1624, +0.1671, -0.3598, -0.2023, -0.7892, +0.3155, -0.1869, -0.3023, -0.4224, +0.4675, -0.1488, -0.6090, -0.7404, -0.2382, +0.2172, -0.7886, -0.5513, -0.1992, -0.7805, -0.1544, -0.0783, -0.2322, +0.2890, +0.0905, -0.0698, +0.4080, +0.3539, +0.0809, +0.4526, +0.4208, -0.0471, -0.2460, -0.6651, +0.1086, -0.4729, -0.5561, -0.4785, -0.4709, -0.1186, -0.6847, +0.0113, -0.2090],\n[ +0.1128, +0.2196, +0.0491, +0.0871, -0.0427, -0.1228, -0.1325, -0.0775, +0.0799, -0.4258, -0.4283, +0.0695, -0.4842, -0.7364, -0.3856, -0.5542, +0.1488, -0.0655, +0.1610, +0.2014, -0.0839, -0.0175, -0.1677, -0.4381, -0.3510, -0.3124, -0.1308, -1.4672, +0.0946, -0.4583, -0.6771, -0.2482, +0.0564, -0.0899, -0.3302, +0.3769, -0.2056, +0.1952, -0.7006, -0.0171, +0.0339, -0.7665, -0.2355, -0.4328, +0.4867, -0.9649, -0.7657, -0.2226, -0.0585, -0.0788, -0.1219, +0.2336, +0.0329, -0.1373, -0.0480, -0.4672, +0.1248, -0.3234, -0.0123, -0.0402, -0.7285, +0.1463, -0.2133, -0.3333, -0.3750, -0.3789, +0.2528, +0.2849, -0.2088, -0.1508, -0.0521, -0.2693, -0.0864, -0.2134, -0.0674, -0.0502, +0.0965, +0.1491, -0.0258, -0.8792, +0.0514, +0.2205, +0.1569, +0.1199, +0.0771, -0.2032, -0.1334, +0.1342, +0.4965, -0.3000, +0.4028, -0.4422, +0.3067, -0.2638, +0.0714, +0.0542, -0.0185, +0.0883, -0.4793, +0.0931, -0.1196, +0.2565, +0.1506, -0.2275, -0.0765, -0.0661, +0.0736, +0.5985, -1.2571, -0.4318, +0.0778, -1.4763, +0.0302, -0.0896, +0.0087, -0.1240, -0.1418, -0.4563, -0.2404, -0.2635, +0.0763, -0.3100, -0.1861, -0.0644, -0.0371, -0.0219, +0.3791, -0.3473],\n[ +0.4353, -0.0027, -0.1405, -0.4122, -0.3522, +0.0526, -0.1707, -0.0385, -0.1975, +0.2925, -0.4776, -0.0418, +0.0446, +0.2244, +0.1293, +0.3907, -0.9109, -0.0468, +0.0137, -0.0460, +0.4094, +0.0427, +0.2939, -0.1557, +0.0021, -0.1208, -0.1217, +0.4177, -0.3536, +0.0166, -0.1902, -0.2393, +0.0345, +0.0799, -0.7020, -0.1663, -0.5196, +0.1123, +0.2173, -0.3454, -0.1350, -0.2988, -0.4965, -0.3230, +0.1591, -0.0365, +0.1462, -0.2123, -0.0619, -0.1740, +0.0770, -0.2098, -0.0199, +0.0795, -0.3884, +0.1162, -0.2321, -0.4210, -0.6708, +0.0618, +0.0358, -0.1079, +0.2677, +0.1349, +0.2558, -0.1510, -0.2075, -0.0258, -0.2588, +0.0050, -0.1894, -0.1519, -0.1532, -0.3941, -0.0508, +0.1084, +0.1652, -0.1217, -0.1805, -0.5194, -0.2663, -1.1767, -0.1488, -0.1212, +0.0229, +0.3766, -0.5909, +0.2650, +0.2342, -0.2097, +0.2822, +0.5491, +0.3619, +0.0758, +0.2908, +0.3512, +0.2550, +0.0763, -0.7615, -0.0578, +0.1206, +0.2908, -0.2384, +0.0054, -0.1382, -0.1125, -0.0447, -0.2442, +0.1242, -0.0534, +0.0024, +0.1971, -0.1923, -0.0621, -0.3769, +0.1903, +0.1829, -0.1082, -0.1367, -0.0554, -0.7187, -0.4493, +0.2949, +0.0883, -0.3279, +0.2849, +0.1060, +0.3663],\n[ -0.6415, -0.3939, -0.5202, -0.1639, -0.4648, -0.1451, +0.4356, -0.6945, +0.3483, -0.5865, -0.3782, -0.0060, -0.0677, +0.1958, -0.2324, -0.1777, +0.0295, +0.5604, +0.0249, -0.2261, -0.2063, -0.0416, -0.0950, -0.2548, +0.0042, -0.3812, +0.2529, +0.0167, +0.0483, +0.3357, +0.1415, +0.0178, -0.1509, -0.0159, -0.2740, +0.0397, -0.0819, +0.1141, -0.3993, +0.0913, -0.2224, -0.7560, -0.2634, +0.3233, +0.0027, +0.3794, +0.2278, +0.6810, -0.7867, -0.0823, -0.0451, -0.3029, -0.0513, -0.4509, +0.4310, +0.1516, -0.4726, -0.1546, -0.3385, +0.1362, +0.3105, +0.0986, -0.7286, -0.1418, +0.0951, +0.1071, -0.1305, -0.0904, -0.9688, -0.3975, +0.0978, +0.4764, -0.2437, +0.1659, +0.3128, +0.1478, +0.2060, -0.8387, +0.0795, -0.3924, +0.3558, -0.5638, -0.5464, +0.3735, -0.3795, +0.3854, -0.2423, +0.0661, -0.3528, -0.1318, -0.2049, -0.5105, +0.5074, +0.0987, +0.4461, -0.8709, -0.2308, +0.5754, -0.1341, -0.9593, -0.0423, +0.0077, -0.0788, +0.0913, -1.0023, -0.2929, -0.7491, +0.1581, -0.1464, +0.2154, +0.2309, -0.2193, -0.2466, -0.5195, +0.1740, -1.0865, +0.0758, -0.3392, +0.1485, -0.0161, -0.6096, +0.0713, -0.1700, -0.1644, -0.3517, -0.1076, -0.6289, -0.0868],\n[ -0.0279, -0.1180, -0.2685, -0.4734, +0.4130, +0.2278, +0.2734, -0.1383, -0.1294, -0.2393, -0.2655, -0.2045, +0.1723, -0.1902, -0.0720, -0.1001, +0.0693, -0.2959, +0.4904, -0.4231, -0.6569, -0.5207, +0.0924, -0.2339, +0.1349, -0.0989, +0.2269, -0.5261, +0.0686, +0.2270, -0.1708, -0.7268, +0.0170, -0.8337, +0.1631, -0.1823, -0.1832, +0.4833, +0.2301, +0.2152, -0.1013, +0.1557, +0.1638, -0.1074, +0.0427, -0.0506, -0.2892, -0.2381, +0.2450, +0.2302, -0.3176, +0.0832, +0.1322, -0.4486, +0.4323, -0.5670, +0.0120, +0.1856, -0.0379, +0.1852, -0.0068, -0.4816, -0.5400, -0.5379, -0.1145, -0.6362, -0.2237, +0.0393, -0.2324, -0.1508, -0.5168, +0.1399, -0.8280, +0.0271, +0.0271, -0.1355, +0.3420, +0.0033, -0.0524, -0.0132, -0.0686, -0.2197, -0.2397, +0.4131, +0.3879, +0.1894, -0.0590, -0.2728, +0.2001, -0.5037, -1.1579, -0.1231, +0.1030, +0.3018, -0.1117, +0.2328, -1.1153, +0.2872, -0.8792, +0.1519, -0.0997, +0.3196, -0.4409, +0.3572, -0.1474, +0.0191, -0.4200, -0.3569, +0.0807, -0.5480, -0.5429, +0.1992, -0.0004, -0.7959, -0.6011, +0.0583, +0.1194, -0.1493, +0.1473, -0.6284, -0.2466, -0.5721, -0.0804, -0.0444, +0.2544, +0.0155, -0.0306, -0.3982],\n[ +0.5828, -0.1462, -0.2782, +0.0690, -0.1789, -0.1596, +0.0840, -0.4970, -0.2908, -0.3823, -0.8830, -0.0833, -0.4294, +0.3447, +0.4591, -0.1768, -0.4054, -0.2783, +0.0228, +0.2754, -0.3646, +0.3944, -0.0349, -0.1119, +0.0405, -0.7938, -0.1085, -0.1693, +0.3024, -0.5234, +0.1899, +0.1598, -0.0956, -0.0660, +0.8926, +0.0428, -0.1308, -0.9002, -0.1008, +0.0826, -0.0661, +0.2094, -0.2457, +0.0941, -0.1031, +0.0496, -0.6188, +0.3200, +0.3472, -0.1994, +0.0598, -0.6498, -0.3135, +0.1850, -0.3448, -0.5044, -0.0509, -0.9147, +0.2559, +0.0753, +0.0753, +0.0294, -0.3612, -0.0921, -1.4746, +0.4441, +0.3017, +0.1874, -0.4263, +0.4468, +0.2712, +0.0128, -0.0261, +0.3705, -0.4319, -0.0988, -0.0705, -0.4702, +0.1396, -0.3963, +0.0893, -0.2172, -1.2196, -0.6357, -0.1207, +0.1123, -0.2076, -0.1591, -0.1512, -0.0582, -0.3533, -0.5694, -0.2883, +0.2884, -0.0825, +0.0193, -0.2499, -0.5041, +0.2356, -0.0106, +0.0655, +0.1411, +0.1246, -0.5655, +0.0270, -0.5696, +0.1255, -0.0826, -0.0638, -0.5297, -0.9973, +0.2699, -0.2058, -0.0949, -0.3804, -0.1981, +0.0273, -0.1266, +0.0663, +0.2089, +0.1680, -0.1503, -0.1509, +0.1122, -0.7540, +0.2932, -0.8315, -0.3090],\n[ +0.1706, +0.0610, -0.5838, -0.0865, -0.0213, -0.2678, +0.0303, +0.3113, -0.3995, +0.3783, -0.5923, +0.1832, +0.1734, +0.0254, -0.3922, +0.3137, -0.3262, +0.0790, -0.0224, +0.0657, -0.0827, -0.9136, +0.1583, -0.0134, +0.0211, -0.0033, +0.0590, +0.0649, +0.0960, -0.3352, +0.0149, -0.8295, +0.2650, +0.2434, -0.0466, +0.1953, -0.3053, -0.4421, +0.0103, -0.0617, -0.4838, -0.0559, +0.1233, +0.0918, -0.3375, +0.0673, +0.1396, -0.4121, +0.0399, +0.1877, -0.9721, +0.0536, -0.4329, -0.0968, -0.0310, -0.0441, -0.1357, -0.2994, -0.1415, +0.3444, +0.0769, -0.1848, -0.8968, +0.2653, +0.1815, -0.0268, +0.4176, -0.0595, +0.1591, -0.1656, -0.0402, +0.0691, -0.7454, -0.3123, -0.4003, +0.0267, -0.0143, +0.2760, +0.2060, -0.7292, -0.2153, +0.2515, -0.2929, +0.0335, +0.0227, -0.4146, -0.1046, -0.0436, -0.3568, -0.0046, -0.1406, -0.0310, +0.3112, -0.5021, -0.0665, +0.2722, -0.2953, -0.1284, +0.4788, +0.0499, -0.1760, -0.5210, -0.4396, -0.0102, -0.1382, +0.1617, -0.1325, -0.6286, -0.2559, +0.2053, +0.0603, +0.1821, +0.0275, -0.4626, +0.0678, -0.1890, -0.0034, -0.2487, +0.1380, -0.1140, -0.0490, -0.0591, +0.2868, +0.2875, -0.2210, +0.1894, +0.1656, -0.2314],\n[ +0.1326, +0.1445, -0.4052, -0.2978, +0.5537, +0.1333, +0.3951, -0.2189, -0.2499, +0.1027, -0.1314, -0.2956, -0.4292, +0.1320, -0.5522, -0.3736, -0.1077, -0.3843, -0.6754, -0.2406, +0.2837, -0.7301, -0.1675, -0.2725, +0.0578, -0.6014, +0.2112, -0.0551, -0.0484, -0.3406, -0.0537, -0.5426, -1.5375, +0.2983, +0.3546, +0.1420, -0.0795, -0.4401, -0.4079, -0.3726, +0.3343, -0.2322, -0.2032, +0.2038, -0.1928, -0.3805, +0.0599, +0.0578, +0.4813, +0.2921, -0.1916, -0.0337, -0.2057, +0.1114, +0.0827, +0.1672, -0.0961, -0.0529, -0.2337, +0.0616, +0.1257, -0.8919, +0.1716, -0.1828, -0.3268, -0.1773, -0.3308, -0.8293, -0.8749, -0.4553, -0.2627, -0.1717, +0.0148, +0.1005, -0.0882, +0.0134, -0.3633, -0.1698, -0.3961, -0.0632, -0.8008, +0.0911, -0.5091, +0.0745, +0.0184, -0.5566, +0.0934, -0.1507, -0.3232, +0.1291, -0.9442, +0.3393, +0.1318, -0.0475, -0.6027, -0.4318, +0.0814, +0.0791, -0.4565, -0.6858, -0.5338, -0.5991, +0.3650, +0.1171, -0.2716, -0.1989, -0.6467, +0.3248, -0.0296, +0.1573, -0.8836, -0.1377, -0.1053, -0.0780, -0.2601, -0.2829, -0.2949, -0.0864, -0.0371, -0.1057, +0.2542, -0.0073, +0.0424, +0.2498, +0.0555, +0.4797, -0.3376, -0.4506],\n[ -0.0255, -0.1650, -0.2974, +0.0178, +0.0392, -0.0186, +0.1872, +0.0655, -0.1296, +0.2720, -0.1809, +0.3607, -0.2829, -0.7414, -0.0197, +0.4503, +0.1493, -0.0222, -0.6118, -0.1622, +0.4015, -0.0279, +0.0294, -0.2557, +0.0963, +0.1893, +0.0614, -0.4798, +0.0077, -0.0389, +0.2097, +0.0587, -0.0738, +0.2764, +0.4512, +0.3174, -0.1535, -0.2276, -0.7275, -0.3815, -0.7228, +0.0201, +0.1117, -0.0261, -0.2577, +0.0478, -0.1840, +0.3085, -0.1759, -0.0425, -0.4142, +0.0371, -0.0894, +0.4119, +0.0659, -0.2694, +0.1903, -0.8607, -0.6496, -0.0941, +0.2064, -0.0151, +0.5710, +0.5431, +0.1864, -0.0135, -0.0710, -0.4110, -0.2441, -0.5753, -0.4009, +0.2134, +0.2212, +0.2904, -0.2814, -0.3143, -0.5441, +0.1319, -0.1148, -0.2074, +0.0995, -0.0569, -0.2905, -0.0324, -0.4356, -0.3295, +0.1332, +0.0101, -0.1996, +0.3176, -0.6325, +0.3636, -0.2449, -0.1157, +0.0073, -0.3086, -0.2715, -0.0547, -0.2606, -0.1449, +0.2044, -0.0712, +0.2225, -0.2971, +0.1239, -0.0524, -0.1557, -0.4880, +0.3417, -0.2688, +0.2402, -0.7468, +0.1165, +0.0455, +0.0083, +0.0263, +0.1152, +0.4319, -0.2114, +0.2163, -0.0243, +0.4918, +0.1440, +0.5037, -0.2096, -0.1679, +0.1048, -0.0869],\n[ +0.1235, +0.3844, -0.2685, -0.0345, +0.0650, +0.3062, -0.1378, -0.2978, -0.2188, -0.0771, +0.2667, +0.0304, -0.0427, +0.5494, -0.1444, -0.2129, +0.5117, -0.6419, +0.0324, -0.0905, -0.1562, -0.4267, -0.0232, -0.5619, -0.5693, -0.5975, -0.4708, +0.0807, -0.2655, -0.4244, -0.0705, -0.2516, -0.2843, -0.6295, -0.4559, -0.1688, +0.0422, +0.6803, -0.4978, -0.2812, +0.0619, -0.4478, +0.0157, -0.4996, -0.3075, -0.3749, +0.1362, +0.0591, -0.1556, -0.4341, +0.6123, -0.0788, +0.7977, -0.3591, -0.9098, -0.1948, -0.2681, -0.2196, +0.5037, +0.5002, -0.2051, -0.4643, +0.0398, -0.5385, -0.1953, +0.0785, -0.6677, -0.1378, -0.9210, -0.4328, -0.2015, -0.4793, -0.1423, +0.0203, +0.1207, -0.7935, -0.0970, +0.0853, -0.6163, -0.0491, +0.4021, +0.1865, +0.4280, -0.0037, -0.4592, -0.2485, +0.1351, -0.2844, -0.6598, +0.3015, +0.2235, -0.9861, -0.3646, +0.3393, -0.3452, +0.3539, -0.5701, -0.4908, +0.5699, -0.4017, -0.1102, -0.2180, +0.1905, +0.4240, -0.2312, -0.2970, -0.4595, +0.2174, +0.1782, +0.0243, -0.0901, +0.4517, -0.0867, +0.3470, -0.2287, -0.2463, -0.1592, -0.3746, -0.0036, -0.3122, +0.1140, -0.4486, -0.8497, -0.1085, +0.9101, +0.2038, -0.2062, -0.0823],\n[ +0.2463, +0.5707, -0.4433, +0.2201, -0.0199, -0.6071, +0.2050, +0.0493, -0.2200, -0.4677, +0.0629, +0.0493, -0.0842, +0.1211, -0.3605, -0.0976, -0.7993, +0.0483, -0.1064, +0.2877, -0.6253, -0.2183, -0.3741, +0.0682, -0.0970, +0.5666, -0.0419, -0.8218, +0.1176, -1.4321, +0.0578, -0.1302, +0.1870, +0.3711, -0.5058, +0.3670, -0.1283, -0.1262, +0.0613, -0.2924, -0.4785, -0.1313, +0.1151, -0.1354, +0.3046, +0.1120, +0.0943, -0.5687, -0.6528, -0.1993, -0.8543, -0.3318, +0.1341, -0.1301, +0.2574, +0.0046, -0.1438, -0.0989, +0.0297, -0.1477, -0.0582, -0.2912, +0.1454, +0.2671, -0.4703, -1.3141, -0.1180, +0.1261, +0.0528, +0.1643, -0.4553, -0.0406, -0.4539, +0.0511, +0.1268, +0.2252, -0.0132, +0.0196, +0.7711, -0.0122, +0.1204, -0.5035, -0.0164, -0.0501, +0.4662, -0.2951, -0.1318, -0.1211, -0.2753, -0.0345, +0.0149, -0.3875, -0.1881, -0.1405, +0.1532, +0.1554, -0.0673, -0.2962, +0.1869, +0.2242, -0.5578, -0.0057, -0.3872, +0.1012, +0.1592, -0.5527, -0.1771, +0.2545, +0.1549, -0.0110, -0.5167, +0.1115, -0.0596, -0.5712, +0.3191, -0.3316, -0.0618, -0.7255, -0.0035, -0.5352, -0.0360, +0.0610, -0.6326, -0.2177, -0.5042, -0.5355, -0.1681, -0.1498],\n[ +0.1926, -0.0685, +0.3304, -0.3787, +0.0300, -0.2676, +0.0136, -0.3297, -0.1434, +0.2152, +0.1500, -0.0552, -0.3526, -0.0323, +0.2272, -0.1979, -0.4169, -0.3634, -0.8873, -0.1784, +0.0721, -0.5226, +0.1625, -0.5973, -0.2882, -0.2884, -0.0342, -0.7191, -0.1038, +0.2307, -0.3793, -0.2169, -0.4917, +0.1231, -0.2788, -0.0933, +0.0867, +0.1352, -1.5252, +0.3475, -0.2924, -0.6522, -0.2583, -0.2580, +0.0162, -0.1497, -0.2728, -0.0223, -0.2669, +0.1925, +0.3857, -0.3332, +0.1178, -0.5367, +0.0528, -0.0169, -0.5829, -1.0664, -0.5824, -0.1038, +0.1126, -0.7621, -0.4332, -0.0314, -0.1815, +0.1829, -0.6382, +0.1326, +0.0845, -0.4755, -0.2206, -0.3449, +0.0764, -0.0900, -0.5230, -0.0539, -0.4122, -0.7988, -0.1538, +0.2325, +0.1030, -0.1607, +0.0113, -0.1478, +0.2353, +0.1148, -0.0807, -0.1881, -0.4861, -0.2848, +0.2721, -0.7730, -0.4613, +0.6028, +0.2487, -0.1225, -0.2896, -0.2808, -0.1185, -0.7210, +0.0897, +0.1085, +0.3445, -0.1611, -0.0695, +0.2044, -0.1024, -0.2478, -0.0785, +0.3137, +0.5408, -0.3994, -0.4764, -0.2766, +0.2133, +0.1204, -0.9959, +0.3333, +0.1227, +0.2426, -0.4202, -0.3578, -0.1248, -0.1090, -0.2104, -0.3860, +0.2306, -0.0728],\n[ -0.3248, +0.0159, -0.0667, -0.0756, -0.0663, +0.3394, -0.2182, -0.2663, -0.1528, +0.1880, -0.5628, +0.2296, -0.0234, -0.0294, -0.2945, +0.1946, -0.3773, -0.0458, -0.3226, -0.4340, -0.0176, +0.3101, +0.1491, +0.3488, +0.3136, +0.1758, +0.3119, +0.4763, +0.1465, -0.0397, -0.3957, -0.3768, +0.1163, -0.4517, +0.3269, -0.0506, +0.2071, -0.3858, +0.4534, -0.2441, +0.5120, -0.5391, -0.2332, -0.6894, -0.0485, -0.2305, -0.0612, +0.1359, +0.0151, -0.3765, +0.3468, -0.3807, +0.5106, -0.2714, -0.7264, -0.8902, -0.3425, -0.1253, -1.0046, +0.2819, +0.0902, +0.2295, +0.2070, -0.6132, -0.0471, +0.1733, -1.1173, -0.4534, +0.5652, -0.0742, +0.1592, -0.1147, -1.0578, -0.7389, +0.1167, -0.3078, -0.0317, -0.1373, -1.1162, -0.1009, -0.4260, -0.0209, -0.6846, -0.2091, +0.4688, +0.1684, +0.5514, -0.1581, -0.4061, -0.2237, +0.1754, -0.4694, +0.0851, -0.6308, -0.2750, -0.3957, -0.5867, +0.2913, -0.4755, -0.2140, -0.3601, +0.3739, -1.1790, -0.0693, -0.2330, -0.5800, -0.1498, -0.5219, -0.1689, +0.1537, -0.1052, +0.6002, +0.0727, -0.2849, -0.4114, -0.0390, -0.0276, -0.2993, -0.0610, -0.1966, -0.0580, +0.2800, -0.0033, +0.4452, +0.2182, -0.9645, +0.0513, +0.0884],\n[ -0.3226, +0.1336, +0.6645, -0.4503, -0.1672, -0.0602, -0.5145, -0.2136, -0.2340, +0.2906, -0.0673, -0.1477, +0.2186, +0.1079, +0.0486, +0.2164, -0.5336, -0.1297, -0.0875, +0.2970, -0.3045, +0.4400, +0.5784, -0.4019, -0.3110, -0.9414, -0.3346, +0.2523, -0.0274, -0.4475, -0.2102, +0.0897, +0.0419, +0.0503, +0.1835, -0.0535, +0.0689, +0.0691, -0.1349, -0.6753, +0.3090, -0.5637, +0.0200, +0.3919, -0.2876, -0.3861, +0.2904, +0.2580, +0.1847, -0.4541, +0.2357, -0.0827, -0.9218, -0.0946, -0.1218, -0.1566, -0.0611, -0.1368, -0.1764, -0.5379, -0.0988, +0.0806, -0.4375, -0.0130, -0.1357, -0.0778, -0.7153, +0.0455, -0.4599, -0.4328, +0.1127, -0.3466, +0.2376, -0.0922, -0.4810, +0.3056, -0.7828, +0.0528, -0.1335, +0.5371, -0.2253, +0.1008, -0.3593, -0.4297, -0.0838, -0.2490, -0.2259, -0.1838, -0.0699, -0.2224, -0.2921, -0.1630, +0.0722, -0.1825, -0.8208, -0.1545, +0.6346, +0.0748, +0.0044, -0.4955, -0.1173, +0.2172, -0.0944, -0.7999, -0.5529, -0.1711, +0.1271, +0.0247, -0.5756, -0.4206, -0.3636, -0.4627, +0.2299, +0.0614, -1.0937, -0.5468, +0.0510, -0.7410, -0.8884, +0.3174, -0.4159, -0.4072, -0.8816, -0.1527, -0.1486, +0.3125, -0.2283, +0.2177],\n[ +0.0039, +0.0103, -0.1755, -0.7996, -0.0968, +0.0277, +0.1732, -0.3424, -1.4968, -0.1753, -0.0114, +0.2941, +0.1342, +0.2813, -1.7341, -0.5349, +0.0842, +0.3819, -0.5440, +0.0967, +0.0900, -1.4831, +0.3267, -0.0945, +0.1357, -0.3697, -0.1469, -0.4575, +0.1818, -0.6642, -0.0056, -0.1976, +0.0797, -0.1708, +0.1934, +0.2915, -0.1422, -0.0404, -0.2522, +0.0745, -0.5222, +0.1857, -0.1054, -0.4258, -0.1111, -0.3788, -0.0603, -0.3986, -0.5091, -0.0344, +0.3817, +0.2383, -0.1576, +0.2485, +0.3856, +0.0202, +0.0191, -0.5266, -0.1806, -0.1136, +0.1925, -0.5746, -0.9777, -0.3512, -0.0246, +0.0901, -0.5217, -0.0677, +0.0909, +0.2601, +0.9345, -0.3394, -0.0119, -0.0092, -0.4497, +0.3786, +0.0784, +0.0421, +0.0792, -0.3989, -0.0075, -0.0954, +0.1185, -0.1363, +0.0989, -0.1078, +0.1155, -0.0391, -0.1252, -0.2695, -0.0459, -0.2948, -0.3355, -0.5704, -0.0559, -0.2500, +0.4076, +0.0663, +0.2311, -0.0617, -0.0548, -0.0475, +0.4303, -0.0794, -0.2073, -0.7073, -0.0176, -0.0955, -0.0993, -0.5084, +0.0248, -0.0866, -0.8128, +0.0065, -0.0581, +0.3117, +0.0263, -0.1558, -0.0008, +0.2365, -1.1693, +0.4167, -0.1065, +0.4104, -0.2010, +0.0968, +0.0552, -0.0371],\n[ +0.2534, -0.0501, -0.2594, +0.1478, -0.7236, +0.2802, +0.0805, -0.1607, +0.2090, +0.1263, +0.1884, -0.1083, -0.4125, -0.3225, +0.1190, -0.2326, -0.2595, -0.2029, -0.3719, +0.0625, -0.1373, +0.2389, +0.3014, +0.0892, +0.3054, -0.2101, +0.0070, +0.1154, -0.0347, -0.1208, +0.0531, +0.1513, +0.2507, -0.0510, -0.1456, +0.0580, +0.0154, +0.1996, -0.0832, +0.1442, +0.0221, -0.0928, +0.0884, +0.1552, +0.0532, -0.7460, -0.2394, +0.0967, +0.1723, -0.2659, +0.2287, -0.0556, +0.2908, +0.4469, -0.1458, +0.1577, -0.2041, -0.0583, +0.3994, +0.5170, -0.7670, +0.3433, -0.0383, -0.0670, -0.3647, -0.1440, -0.2584, +0.4074, +0.3214, -0.1794, +0.3010, -0.2413, +0.0558, -0.5349, -0.5052, +0.0807, +0.0558, -0.4087, +0.1186, -0.3845, +0.1390, +0.0359, +0.2994, -0.2928, -0.0573, -0.1539, -0.2161, -0.0756, -0.1941, -0.3361, -0.4123, +0.1537, -0.0709, +0.2274, -0.5642, -0.2887, -0.4241, +0.3842, +0.3369, +0.1526, +0.1065, -0.2747, -0.1502, +0.0964, -0.0100, -0.0807, -0.6731, -0.7413, -0.1946, -0.4019, -0.3018, +0.0907, -0.0809, -0.3277, +0.2652, +0.0611, +0.0838, -0.1097, -0.6041, +0.1028, +0.1933, -0.2961, -0.0032, +0.0196, +0.0418, -0.0965, +0.3258, +0.0426],\n[ -0.6308, -0.0501, -0.0838, -0.1519, +0.3868, +0.0165, -0.0647, +0.2359, -0.7684, +0.2470, -0.1684, +0.0552, -0.0439, -0.5137, -0.0342, -0.1471, -0.2033, +0.0010, +0.5120, +0.2452, -0.3707, -0.4031, +0.1284, -0.0031, +0.2168, -0.2880, -0.1485, -0.0316, +0.1138, -0.0230, -0.3823, -0.2558, +0.2216, -0.8283, +0.1104, +0.2202, +0.1233, -0.5009, -0.3594, -1.0594, +0.1320, -0.1490, +0.0831, +0.6580, -0.2022, -0.3214, -0.0493, -0.4918, -0.1414, -0.3596, -1.2959, +0.0334, -0.0096, -0.0637, +0.1090, +0.2714, +0.2888, -0.4400, -0.9489, +0.0081, -0.0942, +0.1463, -0.1739, -0.6975, +0.2816, -0.8516, -0.3003, +0.1234, +0.3339, -0.1227, -0.2146, -0.7996, -0.2116, -0.0534, -0.3831, +0.1300, -0.6559, -0.2496, -0.3875, -0.6497, -0.4172, +0.1083, +0.3429, +0.1460, -0.6197, +0.0890, +0.4016, -0.0499, +0.0776, -0.1099, +0.4072, +0.1516, -1.1238, +0.1015, +0.0881, +0.2689, +0.4516, -0.1882, +0.1791, +0.7761, +0.2232, +0.0939, -0.3682, -0.1288, +0.0489, -0.2183, -0.2398, +0.0578, -0.5142, -0.1569, +0.2860, +0.3591, +0.3800, -0.3455, +0.1576, +0.2144, -0.8416, -0.0603, +0.1336, +0.2087, -0.6990, -0.1888, +0.5619, -0.2315, +0.1561, +0.1132, -0.7656, -0.3181],\n[ -0.1286, +0.1858, -0.2411, +0.0090, -0.3402, -0.2936, +0.1051, +0.1919, -0.2531, +0.4690, -0.6679, +0.3317, +0.2073, -1.1532, -0.7191, +0.0884, -0.0799, +0.2003, -0.1593, -0.1969, -0.2554, -0.2080, -0.2559, +0.3545, -0.8480, +0.4528, -0.2133, -0.4219, -0.4413, +0.1131, -0.0602, -0.1928, +0.0279, +0.1352, -0.2555, +0.2333, -0.2460, +0.0057, -0.8220, -0.0760, +0.0504, -0.9094, -0.6114, +0.1768, +0.0091, -0.0628, -0.1880, -0.2029, +0.2462, +0.1165, +0.2070, -0.1164, -0.1690, +0.1759, -0.1980, -0.2017, -0.2359, -0.5043, -0.7476, -0.3844, -0.1492, -0.5376, +0.2582, +0.3424, -0.1346, -0.1681, -0.4327, +0.0905, -0.2482, +0.1685, +0.3307, +0.1385, -0.2317, +0.2284, -0.6566, +0.2268, -0.1018, -0.2626, -0.0549, -0.3204, -0.1904, -0.0743, -0.8110, +0.2222, -0.0741, -0.0935, +0.1553, +0.1289, -0.1035, -0.0720, +0.4058, -0.3011, +0.0458, +0.2255, -0.6481, +0.0250, +0.1870, +0.1039, +0.1931, +0.1419, -0.7905, +0.0222, +0.2000, -0.1245, +0.2453, -0.4877, +0.2176, +0.2230, -0.5876, -0.0292, +0.0748, -0.3372, -0.0874, +0.0585, +0.4317, -0.0294, +0.3862, +0.3095, -0.2184, -0.5214, +0.3847, +0.0584, +0.3308, +0.0771, -0.0182, +0.1068, +0.0827, -0.3393],\n[ -0.0223, +0.2438, -0.0411, -0.3990, +0.4767, -0.4751, +0.2604, +0.2775, +0.0951, +0.3999, -0.0248, -0.0078, -0.0592, -0.4420, +0.0711, +0.2006, -0.0424, +0.4881, -0.0616, -0.3197, -0.2655, -0.3899, +0.2844, -0.0932, -0.3590, +0.0607, +0.1592, -0.0413, +0.2932, +0.0039, +0.3933, +0.3092, -0.4648, +0.3378, -0.1120, +0.0148, +0.2869, +0.0393, -0.3506, +0.2658, +0.1252, +0.1600, -0.2529, -0.1268, +0.3310, -0.0902, +0.2594, -0.1768, -0.0035, -0.4478, +0.1509, -0.0057, +0.1382, -0.0739, -0.0245, -0.1634, -0.1252, +0.0223, -0.5591, -0.0023, -0.0707, -0.2333, -0.3290, -0.0214, +0.0871, -0.0567, -0.3501, -0.0693, -0.1038, -0.4635, +0.0217, -0.3380, +0.2432, +0.1398, -0.2903, +0.0658, +0.0269, +0.0889, +0.2137, -0.1885, -0.0677, +0.2808, +0.0891, +0.0853, +0.1616, +0.3423, +0.2141, +0.0873, +0.0890, +0.0168, +0.0364, +0.0070, -0.3678, +0.1200, +0.1389, +0.1118, +0.2462, +0.0112, +0.0536, -0.3833, +0.1149, -0.3142, +0.3757, -0.2268, +0.5505, -0.4528, +0.1183, -0.4295, -0.2305, -0.2820, +0.2610, -0.5187, -0.3496, -0.0884, +0.1280, -0.1898, +0.2082, +0.2891, -0.1266, +0.0223, -0.2194, +0.0958, +0.3503, +0.1008, -0.0299, +0.1613, +0.1108, +0.2884],\n[ -0.2112, +0.2139, +0.2353, -1.3735, +0.3379, -0.3306, -0.5791, +0.2142, +0.1661, -0.2046, -0.4202, -0.5208, -0.4747, -0.3524, -0.1866, +0.2010, +0.3635, -0.3221, -0.3282, +0.1993, -0.1151, -0.4721, +0.0627, -0.2138, -0.3382, +0.2179, +0.1286, +0.0916, -0.2135, +0.1114, +0.1641, -0.7235, +0.0271, -0.3165, -0.4372, -0.0611, +0.0824, -0.1537, +0.3468, -0.3766, -0.0373, +0.0722, +0.0230, -0.7669, -0.7521, +0.4481, +0.1352, -0.0385, -0.0233, -0.3938, -0.1916, +0.1151, +0.2809, -0.0007, -0.0378, +0.0906, +0.2414, -0.9108, -0.2856, -0.1401, -0.6404, -0.7260, -0.2588, +0.1096, -0.1461, +0.0196, -0.9551, -0.5937, -0.0863, -0.1227, -0.9528, -0.1206, -0.0116, -0.3274, +0.3520, +0.0632, -0.1109, -0.3264, -0.8242, +0.2668, -0.0588, -0.2727, -1.1266, -0.9705, +0.2631, -0.2931, -0.7791, -0.9120, +0.2401, +0.0197, -0.0931, -0.5968, +0.5901, +0.1350, +0.0918, -0.5455, -0.6439, +0.4476, +0.2893, -0.5928, +0.0521, -0.3101, -0.0489, +0.0052, -0.3169, -0.4701, -0.6680, -0.0638, +0.1298, -0.4204, -0.0254, +0.1195, -0.0933, -0.1896, +0.0503, -0.7819, +0.1228, +0.0729, -0.0561, -0.0014, -0.0664, +0.2052, -0.7386, +0.4056, -0.0660, -0.9663, -0.2771, -0.5232],\n[ -0.1246, -0.2805, -0.3576, +0.2033, +0.1179, +0.0137, -0.6560, -0.2292, -0.5633, -0.0186, -0.2827, +0.1622, +0.1316, -0.4703, -0.7199, +0.2989, +0.1364, -0.1050, +0.4184, -0.0721, -0.1984, -0.8043, +0.1757, +0.1660, +0.1198, -0.6675, -0.1686, -0.0080, -0.0535, -0.2755, -0.0087, -0.0772, -0.1946, -0.1625, +0.2803, -0.4290, +0.3188, -0.5116, +0.3810, -0.4627, -0.3474, -0.0296, -0.0126, -0.5452, -0.1713, +0.0726, +0.1517, -0.3349, +0.2804, -0.4211, -0.3698, -0.0776, +0.5157, -0.0116, -0.1578, -0.2274, -0.5434, -0.7078, -0.1557, +0.1799, -1.7247, +0.0598, -0.2763, -0.2002, -0.1513, +0.0315, +0.1172, -0.5970, -0.4829, -0.1757, -0.6510, -0.2016, +0.0722, -0.2784, +0.2477, -0.1084, +0.3574, +0.2216, -0.2019, -0.0929, -0.2401, -0.0009, +0.0100, +0.3262, +0.1323, -0.3393, +0.0441, +0.0237, -0.1123, -0.3906, +0.1925, +0.2541, +0.0079, -0.3655, +0.1809, +0.0290, -0.2198, +0.2628, -0.1835, +0.1061, +0.2996, +0.5976, -0.0171, -0.0882, +0.0488, +0.0209, -0.4426, -0.1461, -0.7127, +0.0345, -0.0908, -0.0087, -0.6902, +0.0474, +0.3433, +0.2376, +0.0280, +0.1350, -0.6906, -0.1005, -0.3456, -0.4074, -0.3707, -0.4556, +0.1230, +0.1421, -0.1536, +0.4549],\n[ +0.4253, -0.1862, -0.2521, +0.0973, +0.2793, -0.0849, -0.1390, +0.2692, +0.2426, +0.1690, -0.0772, -0.0276, +0.1998, +0.0185, -0.1736, -0.0510, -0.1957, +0.2297, +0.2222, -0.0429, -0.1558, -0.9350, +0.1733, +0.2294, +0.2759, -0.1063, -0.6416, -0.2152, +0.0899, -0.4758, -0.0466, +0.0209, +0.0777, -0.6026, +0.1939, -0.1837, +0.1779, +0.1248, -0.1254, -0.2091, -0.1979, +0.4873, +0.1698, +0.0795, -0.1283, +0.0153, -0.4910, +0.3692, -0.0166, +0.0893, -0.3555, -1.2133, -0.3912, -0.3162, +0.1056, +0.3104, -0.0164, +0.4362, -0.1710, -0.5106, -0.0164, +0.3129, -0.7867, +0.1497, +0.0967, -0.0521, +0.1755, -0.1235, +0.1731, +0.2002, +0.2755, -0.0072, +0.2876, +0.2190, +0.0101, +0.1913, -0.3036, +0.3041, +0.2350, +0.3212, +0.1128, +0.1641, -0.5767, +0.1855, +0.0141, +0.2336, -0.1784, -0.2715, +0.0631, -0.1110, +0.2796, +0.1881, +0.2210, +0.4339, +0.1268, -0.1757, +0.1511, +0.0015, -0.0059, +0.0019, +0.2075, +0.1971, +0.1113, +0.0985, +0.0434, -0.2494, +0.2593, +0.0467, -0.5946, -0.2618, -0.1521, -0.2028, +0.2417, -0.6486, -0.1829, +0.0708, -0.1101, +0.1115, +0.0227, +0.1591, -0.4037, -0.0098, +0.0621, -0.4142, -0.1739, +0.4112, +0.3607, -0.5380],\n[ +0.1177, -0.0829, -0.2037, -0.2156, +0.1920, +0.1360, -0.1985, -0.0925, -0.5613, -0.2878, -0.1383, -0.1178, +0.2660, -0.2264, -0.3758, +0.0474, +0.0023, +0.1017, -0.4475, -0.0988, +0.2955, -0.4085, -0.2192, +0.1311, -0.3999, -0.3496, -0.1175, -0.4424, -0.4607, -0.0673, -0.1690, -0.2330, -0.3726, -0.0566, -0.3821, +0.0028, +0.1176, +0.1637, -0.3063, -0.2605, +0.4228, +0.5291, +0.0629, +0.5256, -0.3477, +0.4640, +0.0645, -0.5641, -0.2468, +0.2339, +0.1751, +0.0718, +0.3500, -0.4988, -0.1264, -0.0177, +0.1769, -0.0400, -0.2931, -0.0032, +0.3511, +0.2182, -0.4131, -0.6258, -0.2432, -0.0031, -0.4084, -0.1694, -0.1345, -0.0997, -0.2085, -0.1604, +0.3349, +0.2521, +0.1971, -0.1442, +0.2636, -0.1359, -0.0244, -0.5738, +0.1898, -0.0964, +0.3351, +0.1642, +0.1135, -0.3388, -0.1054, -0.0207, +0.0978, +0.0311, +0.0011, -0.0103, -0.3878, -0.1310, +0.1785, +0.0105, +0.0070, -0.2280, +0.1466, +0.1476, -0.0045, -0.4846, +0.3915, -0.2372, -0.0766, -0.4503, -0.1964, +0.1785, +0.1047, +0.8291, +0.1383, +0.1750, +0.2304, +0.1486, +0.1168, -0.0636, +0.0268, -0.0029, +0.1824, -0.4461, +0.2138, +0.0789, +0.2332, +0.3060, -0.2309, -0.1640, -0.2015, +0.0814],\n[ -0.2089, +0.1014, -0.0579, +0.3885, -0.0160, +0.3301, -0.4470, +0.4714, -0.1930, -0.4235, -0.0122, -0.5872, +0.3336, +0.0962, +0.1576, +0.0089, +0.0542, -0.1455, -0.1269, +0.1636, -0.1596, -0.1467, -0.0751, -0.3372, -0.0210, -0.3890, +0.2495, -0.5069, +0.2558, +0.2504, -0.0504, -0.1370, -0.1636, +0.1638, -0.4274, -0.4883, +0.1807, -0.3299, -0.2602, -0.1714, +0.3408, +0.1558, +0.3440, -0.2063, +0.3495, -0.0942, -0.1896, +0.1604, +0.2981, +0.0938, +0.3079, -0.0422, +0.2457, -0.0888, -0.1330, +0.3858, +0.1828, +0.2261, +0.0589, +0.0212, +0.2386, -0.2637, -0.1125, -0.6084, -0.1772, +0.1757, -0.0477, -0.4429, -0.5084, -0.0512, -0.0579, +0.2526, +0.2676, -0.3652, +0.0563, +0.1303, +0.1980, -0.1137, -0.1055, +0.1332, +0.2540, -0.0726, -0.2244, +0.1358, +0.1327, -0.5314, -0.5776, +0.0586, +0.4645, -0.5067, +0.2115, -0.8910, -0.0485, -0.2118, +0.0909, +0.4667, +0.1512, -0.4808, +0.0252, -0.1659, +0.2356, -0.1116, +0.0382, +0.0931, +0.1761, +0.0300, -0.3396, -0.1821, +0.1245, -0.1004, -0.1907, +0.1104, -0.0528, -0.1555, -0.0645, +0.0562, -0.2227, +0.3703, -0.2504, -0.1985, -0.2180, -0.1775, +0.2405, -0.0889, +0.1268, +0.3380, -0.3181, +0.3610],\n[ -0.1952, -0.0578, -1.1057, +0.5134, -0.1378, +0.2639, -0.1665, -0.3419, -0.1661, +0.1025, -0.0394, -0.0055, -0.3502, +0.3829, +0.0351, -0.8268, -0.4556, +0.1120, -0.0705, +0.2972, -0.4725, +0.0838, +0.3422, -0.5389, -0.2642, -0.2428, -0.4464, -0.1878, +0.3799, +0.0833, -0.3188, +0.1659, -0.5127, -0.1637, -0.5105, -0.3803, +0.1553, +0.1436, -0.2048, -0.0348, +0.1922, -0.2446, -0.6350, -0.6326, +0.3759, -0.8518, -0.2066, -0.3527, -0.9225, +0.5922, +0.1130, -0.6997, +0.0371, +0.1281, +0.0358, -0.3420, +0.0422, -0.1585, +0.0060, -0.2306, +0.0093, +0.3616, +0.1565, -0.4825, -0.2306, -0.6262, +0.0261, +0.1737, -0.8466, +0.1037, +0.1481, -0.1724, -0.0477, -0.2328, -0.7668, +0.0539, -0.0205, -0.1615, +0.1709, +0.1200, +0.0062, -0.1006, -0.0631, -0.6605, +0.3073, +0.0534, -0.1759, -0.0048, +0.3530, -0.0488, -0.3439, +0.6192, -1.0127, -0.4404, -0.1655, +0.2329, +0.1701, -0.0102, -0.6998, -0.5205, -0.3661, -0.3588, -0.0991, -0.0539, -0.1724, -0.0598, +0.0298, -0.5078, -0.2480, +0.2300, -0.0248, -0.8744, +0.1165, -0.3571, -0.2669, -0.3494, +0.1781, +0.3223, -0.0251, +0.1327, +0.5895, +0.4107, -0.1365, -0.4952, -0.2869, +0.1122, +0.2460, +0.4473],\n[ +0.1338, -0.0063, -0.2522, -0.1941, +0.2738, -0.2233, -0.2280, +0.2611, -0.0943, -0.3515, -0.0844, +0.2064, -0.0363, -0.3102, -0.3898, +0.0770, +0.2784, -0.1512, -0.1478, -0.0007, -0.1822, -0.2255, -0.5550, +0.2976, +0.2547, +0.0151, +0.0828, -0.0848, -0.2138, -0.0103, -0.8352, +0.1319, -0.2303, -0.2299, -0.0563, -0.3885, +0.0342, -0.4977, -1.2963, +0.2561, -1.1328, +0.0012, +0.2306, -0.5411, -0.3006, +0.0676, -1.9030, -0.3075, -0.0979, +0.0896, -0.0580, -0.0038, +0.1381, +0.0090, +0.1478, -0.3079, +0.2841, -0.1863, -0.6163, -0.1751, -0.5646, +0.4300, +0.0704, -0.0387, -0.2823, +0.2259, -0.0953, -0.2159, -0.2969, -0.3502, -0.3141, -0.1517, +0.0782, +0.0999, +0.5229, -0.1531, -0.2047, -0.1471, +0.2247, -0.2625, +0.0019, -0.1073, -0.0022, -0.4420, +0.0517, -0.0580, -0.6427, -0.2424, +0.1411, -0.0678, -0.1120, -0.8540, -0.2692, +0.2238, -0.2368, -0.9063, +0.2976, +0.0414, +0.1095, +0.2244, +0.0210, -0.1006, -0.2540, -0.1564, -0.0956, -0.1224, -0.5763, +0.2323, -0.1224, +0.2074, -0.5607, -0.5861, -0.0902, +0.1296, +0.1034, -0.2310, -0.2814, -0.1924, +0.1452, -0.5430, -0.0619, -0.4562, +0.2069, +0.2684, -0.2071, -0.7354, -0.2258, -0.1395],\n[ -0.1963, -0.0107, -0.0423, +0.0007, +0.3431, +0.3000, -0.2971, +0.3827, -0.2744, -0.6258, -0.7529, -0.2262, -0.0908, +0.7792, -1.4345, -0.7595, -0.0323, -0.4281, -0.0325, +0.0773, -0.4141, +0.1006, +0.5800, +0.4668, +0.3735, -0.3455, -0.2889, -0.2550, +0.2045, -0.2958, -0.0868, -0.0470, -0.2008, +0.1274, +0.0259, +0.1598, -0.2579, -0.5582, -0.0712, +0.2501, -0.4543, +0.1671, -0.0398, -0.3789, -0.6567, +0.3653, -0.5175, -0.0741, -0.1428, -0.9625, -0.1969, +0.3227, -0.7710, -0.2489, +0.0553, +0.1062, +0.2338, -0.8710, -0.2630, +0.1344, +0.2044, -0.1054, -0.3221, -0.7012, -0.1819, -0.1221, -0.0658, -0.0748, -0.3830, +0.3178, +0.6506, -0.3111, -0.4665, +0.2741, -0.2561, +0.2261, -0.4738, -0.0788, +0.0114, -0.5013, -0.6076, -0.4491, -0.4184, +0.6562, +0.3299, -0.1993, +0.3129, -0.6395, -0.0735, -0.5297, +0.0319, +0.4652, +0.6208, +0.0026, -0.1381, -0.5193, +0.0792, +0.5061, -0.5610, +0.5022, -0.1293, -0.0424, -0.2379, -0.2926, -0.4384, -0.4252, -0.6475, +0.4550, -0.1399, -0.3108, -0.6347, +0.2680, +0.6766, +0.5618, +0.0904, -0.0573, -0.3292, +0.1988, -0.2317, +0.3934, -0.3043, -0.1623, -0.4827, -0.1761, -0.1059, +0.0525, -0.0313, -0.1475],\n[ -0.0720, -0.0474, -0.0168, +0.1428, -0.1180, -0.3548, +0.4974, -0.7262, -0.2118, +0.1000, -0.0795, -0.2894, -0.0655, -0.0423, -0.1659, +0.1347, +0.0860, -0.2163, -0.5078, -0.2881, +0.1382, -0.4808, +0.0168, +0.3667, -0.3162, -0.3973, -0.1601, -0.2069, -0.1248, +0.2609, +0.0856, +0.0030, +0.1974, +0.3896, +0.0707, -0.1730, -0.1112, +0.2419, -0.1894, -0.1437, +0.0670, -0.2615, -0.3430, +0.1329, -0.0827, -0.8066, -0.0254, +0.1680, +0.2338, -0.9633, +0.1481, -0.1104, +0.1121, +0.2071, +0.2378, +0.1063, -0.1278, -0.1192, +0.2428, +0.2460, -0.0064, +0.0025, +0.4307, +0.4768, -0.8693, +0.2835, -0.2696, +0.0314, -0.0276, -0.1222, +0.2166, +0.2363, -0.2178, +0.3113, -0.8229, -0.2482, -0.4106, +0.1824, -0.0883, +0.0695, -1.2673, -0.3113, +0.1158, +0.2840, +0.1894, -0.7726, +0.3572, +0.0157, +0.0035, -0.2735, +0.2097, -0.6630, +0.0192, +0.0644, -0.1819, -0.0937, -0.2322, +0.3941, -0.1772, +0.0560, +0.0390, -0.4749, +0.0099, -0.3858, -0.1375, -0.0988, -0.6115, -0.1879, -0.1092, +0.0315, +0.2415, -0.2787, -0.0006, -0.0118, +0.2440, -0.0432, -0.1334, -0.1346, -0.4602, +0.1854, +0.1047, -0.0036, -0.2341, -0.1576, +0.0653, +0.1567, +0.1526, +0.1044],\n[ +0.1769, -0.1034, -0.1948, -0.1274, -0.1648, +0.5893, -0.0946, -0.2004, -0.2721, -0.3387, +0.3898, -0.3109, +0.1371, +0.1680, -0.7257, +0.1351, -0.6586, +0.2851, +0.1728, +0.1130, -0.0131, -0.4239, +0.1692, +0.1324, -0.3443, +0.2481, +0.1447, -0.3541, +0.1374, +0.0410, -0.1387, +0.1571, +0.1503, -0.0262, -0.0774, -0.1869, -0.0378, +0.2104, +0.0172, -0.1452, -0.4186, +0.2158, +0.2101, +0.2539, +0.3134, -0.0346, -0.0244, -0.2532, -0.1317, -0.0540, +0.0459, -0.1373, +0.1366, -0.0571, +0.1385, +0.1867, -0.1449, -0.1944, +0.3466, +0.0981, -0.1643, +0.0219, -0.0246, -0.3430, +0.0084, -0.6418, +0.1014, +0.0666, +0.2488, -0.2650, -0.1532, -0.2495, +0.0428, -0.4240, -0.2821, -0.0754, +0.2721, +0.4462, -0.8370, -0.0485, +0.1678, -0.2672, +0.3932, -0.1626, +0.1755, -0.5960, -0.0072, +0.6288, +0.0466, -0.0260, -1.0479, -0.5837, -0.1394, +0.0547, +0.1121, -0.6113, -0.5292, -0.6037, -0.2560, +0.2865, -0.2272, -0.2464, +0.1547, -0.0282, -0.8408, +0.3033, +0.1790, -0.1641, -0.3059, +0.1309, -0.3210, -0.0627, +0.2607, -0.0373, +0.0129, -0.2009, -0.3511, -0.1189, -0.2208, -0.0538, +0.0637, +0.1723, -0.5148, +0.2267, +0.1132, +0.0045, -0.1421, +0.1912],\n[ +0.0906, -0.1130, -0.2077, +0.1502, +0.3574, -0.4087, -0.1618, -0.1510, +0.2428, -0.3561, -0.3971, -0.2274, -0.1077, +0.2071, +0.5784, +0.3420, +0.2530, -0.1951, -0.1346, +0.2660, -0.0259, +0.0771, -0.0849, +0.3132, -0.0404, -0.1513, -0.0255, +0.1137, +0.3354, -0.1369, -0.1115, -0.1645, +0.1585, -0.2722, +0.4510, -0.0847, -0.0087, -0.1398, +0.0353, -0.0701, -0.4510, -0.8897, +0.0337, -0.0026, +0.1252, +0.0948, +0.2133, +0.2421, -0.2473, +0.2771, +0.3732, -0.0731, -0.4412, -0.0749, +0.1336, -0.0273, +0.1484, -0.3783, +0.1017, +0.0390, -0.5518, +0.0037, -0.6220, +0.2184, +0.3016, +0.0485, +0.1901, +0.2104, +0.1166, -0.1032, +0.3395, -0.0177, -0.3512, +0.1229, +0.0400, +0.2339, -0.5922, -0.4052, -0.5183, +0.4653, +0.1410, +0.2020, +0.1406, -0.1232, +0.2124, -0.1376, -0.2617, -0.0459, -0.4286, -0.7037, -0.0695, -0.5891, +0.0045, -0.1987, -0.3906, +0.1380, +0.0949, -0.3112, -0.2688, -0.1340, -0.2209, -0.3564, -0.1459, +0.3078, +0.0459, +0.2045, +0.1436, -0.1081, -0.6286, -0.3076, +0.1346, +0.1734, -0.0212, +0.2784, -0.2703, -0.5880, -0.1379, +0.0307, +0.4657, -0.2370, -0.6191, -0.0113, +0.2450, -0.0045, +0.1112, -0.0822, +0.3822, -0.0562],\n[ +0.1409, -0.0538, -0.1432, +0.1716, +0.1670, +0.1345, +0.3174, +0.0786, +0.3477, -0.5395, -0.3514, +0.2158, +0.0896, -0.1822, +0.2500, +0.4068, +0.3118, +0.3923, -1.1109, -0.2122, +0.0909, +0.1655, +0.1356, +0.4056, -0.3149, +0.1185, +0.1717, +0.0258, -0.1728, +0.2402, -0.0251, -0.0708, -0.0743, -0.1093, +0.5213, +0.0419, -0.1286, +0.1853, +0.4699, -0.5875, -0.2311, +0.4390, -0.3652, +0.2524, -0.0379, -0.1384, -0.1974, -0.3096, +0.0594, +0.0794, +0.0578, +0.1156, -0.9567, -0.3747, +0.2159, +0.1118, -0.0018, -0.4289, -0.0497, +0.5024, +0.0897, -0.5219, -0.4890, -0.1756, -0.1832, +0.1821, +0.3873, +0.0030, -0.7192, +0.0757, +0.4871, -0.8119, -0.0317, +0.3589, -0.1295, -0.0279, -0.1444, -0.8395, -0.3653, +0.1671, -0.0373, +0.0349, -0.0650, +0.2142, -0.1536, -0.2972, +0.0513, -0.2596, +0.1086, -0.5500, +0.5186, -0.1609, -0.2082, -0.9726, -0.0006, -0.2899, +0.1783, -0.1634, -0.5750, -0.0325, -0.2408, -0.0379, +0.1189, +0.0418, +0.3697, +0.2451, +0.1595, -0.2399, -0.1088, -0.1213, -0.4627, +0.2782, +0.0666, -0.0095, -0.4685, -0.5872, +0.1120, +0.1272, +0.0088, -0.4947, +0.3034, +0.0580, -0.2964, +0.0579, -0.1105, -0.1897, +0.0601, +0.0605],\n[ -0.0967, -0.8116, -0.1189, -0.5744, -0.3304, -0.6346, +0.1212, +0.2593, -0.0239, +0.2636, +0.0874, -0.4657, +0.1628, +0.6463, +0.2373, -0.6677, -0.1700, +0.1728, -0.3091, +0.2459, +0.3715, -0.7096, -0.0390, +0.4462, +0.6917, -0.5975, -0.0984, +0.5475, +0.4104, -0.1399, -0.1059, +0.0950, +0.1488, +0.1787, +0.3116, -0.1217, +0.1377, +0.1193, -0.4545, -0.5360, +0.6137, -0.1851, +0.2568, -0.1605, -0.0478, -0.8687, +0.4071, -0.1072, -0.1179, +0.2131, +0.0615, -0.2244, -0.2624, -0.0859, -0.0773, +0.0423, +0.0499, -0.3312, -0.1253, +0.0579, -0.0270, +0.1835, +0.1067, -0.3907, +0.2115, +0.0562, -0.1266, +0.0306, +0.1233, -0.0524, +0.0968, -0.5194, +0.2325, +0.3540, -0.4088, +0.0112, -0.2728, -0.4368, -0.3143, -0.1848, +0.0044, +0.1795, -0.1937, -0.4705, +0.1564, -0.4308, +0.2992, +0.1559, +0.1413, -0.5684, -0.5887, -0.4417, +0.1254, +0.6016, +0.3093, +0.2221, +0.2689, +0.2764, +0.2066, +0.3921, -0.0492, +0.5264, +0.1195, +0.0354, +0.0472, -0.7510, +0.4551, -0.3297, +0.2557, +0.0964, -0.2333, -0.0974, +0.0046, -0.1966, -0.1438, +0.2168, -0.0106, +0.0827, +0.1734, -0.1622, -0.7648, +0.0657, +0.4125, -0.0107, +0.5142, -0.1843, -0.0255, +0.0251],\n[ -0.4894, -0.3838, -0.0560, -0.0102, +0.2462, +0.2396, -0.1605, +0.6648, +0.0289, +0.3985, -0.3124, -0.1270, +0.0019, -0.0186, -1.0743, +0.6784, +0.1577, +0.2268, -0.0569, +0.2442, -0.1164, -1.2692, -0.6146, -0.5071, -0.0065, -0.1015, +0.0568, -0.0257, -0.1093, -0.1685, +0.5144, -0.0581, +0.5970, +0.3194, -0.3760, +0.2867, +0.3716, -0.2157, -0.1207, +0.3089, +0.5712, +0.2472, +0.0238, -0.7120, +0.0921, -0.5046, -0.4643, +0.3063, +0.0916, -0.7755, +0.0375, +0.1377, +0.4099, +0.1541, -0.0234, -0.2307, -0.2292, +0.1845, -0.3229, -0.0655, -0.0224, -0.3936, +0.1464, -0.2497, -1.0163, -0.0754, -0.2383, -0.1563, +0.0342, -0.2338, +0.3699, +0.0190, +0.3233, +0.0361, -0.3095, +0.1544, -0.2940, -0.0897, +0.2073, +0.4680, -0.3452, -0.1035, -0.1820, -0.5826, -0.4127, -0.5719, -0.2433, +0.1142, -0.1415, -0.4814, -0.1358, -0.0734, +0.4341, +0.2401, -0.4429, +0.3538, -0.2312, -0.2072, +0.0078, -0.2757, -0.2212, +0.0824, -0.0945, -0.2644, +0.0169, -0.6544, -0.0961, -0.2083, +0.4017, -0.6613, -0.2524, +0.2338, -1.7121, -1.9104, +0.2422, -0.0147, +0.3072, -0.3819, +0.3498, -0.4022, -0.4189, +0.4619, +0.1477, +0.3179, -0.2559, -0.0367, +0.1080, -0.3277],\n[ +0.0885, -0.4506, +0.4188, +0.2334, +0.2903, +0.0490, -0.0631, -0.0504, +0.2215, -0.1555, -0.0389, -0.1621, +0.0310, -0.4083, -0.3338, -0.2568, +0.0147, +0.1238, -0.5747, +0.0482, -0.2570, -0.2541, -0.0272, +0.2152, +0.0003, +0.3637, -0.2845, +0.0474, +0.1485, -0.3750, -0.4793, +0.4647, +0.0522, -0.6269, -0.1822, -0.0465, +0.1702, +0.2424, -0.1810, -0.1115, -0.5531, +0.0817, +0.0351, -0.0986, +0.0474, -0.4087, -0.0984, -0.2288, -0.0353, +0.0090, -0.1881, -0.1815, -0.0516, +0.1652, +0.3100, +0.2649, +0.2842, +0.3319, +0.4483, +0.2002, -0.1618, +0.1024, -0.1409, +0.3892, +0.2560, +0.1263, -0.0126, -0.0984, +0.3725, +0.1183, -0.2909, +0.1879, +0.1049, -0.1430, -0.1976, +0.2291, -0.2020, +0.1399, +0.0848, +0.0876, +0.2030, -0.0233, -0.3914, -0.0174, -0.0214, -0.0481, -0.2875, +0.0232, +0.0775, -0.6476, -0.2746, -0.3848, -0.0085, -0.5357, -0.5196, +0.1634, +0.1043, +0.0855, -0.0452, -0.0374, +0.1830, -0.2180, +0.1506, -0.1684, +0.0160, +0.0860, +0.4804, -0.0316, -0.3141, -0.1559, -0.4767, -0.1185, -0.1897, -0.0881, -0.1665, +0.0357, -0.3111, +0.0422, -0.1488, -0.1632, -0.7307, +0.0462, +0.0164, -0.0160, +0.0143, +0.3344, +0.3005, +0.2044],\n[ -0.1839, -0.1799, -0.5069, +0.3499, -0.0474, +0.3088, -0.3604, -0.3482, +0.0276, -0.4781, -0.1208, +0.1777, -0.2034, +0.0649, -0.2837, +0.0718, +0.1179, -0.5131, +0.0390, -0.2522, -0.0683, -0.0128, -0.1180, +0.3263, -0.4664, -0.2562, -0.1115, +0.2541, -0.1521, -0.2567, -0.2162, -0.8788, +0.1234, +0.0628, -0.4491, +0.0485, +0.1332, +0.4184, -0.1297, +0.2791, -0.3251, -0.3365, +0.1526, +0.2818, -0.1102, -0.2195, -0.3119, -0.0794, -0.0632, +0.0212, -0.5798, -0.3181, +0.1821, +0.5628, +0.2593, +0.0294, +0.1707, +0.3392, -0.4010, -0.3270, -0.0918, -0.1726, -0.1836, -0.9079, +0.1893, +0.2778, -0.0391, +0.0554, +0.1980, +0.2862, -0.4825, +0.1002, -0.8366, +0.1210, +0.0550, +0.1069, -0.3516, +0.1627, -0.3116, -0.0464, +0.0496, -0.1154, -1.9989, +0.0147, +0.1130, -0.1936, -0.1085, -0.1807, +0.2358, +0.3699, -0.1481, -0.1616, +0.1135, -0.7175, -0.0026, +0.2132, -0.1823, +0.1340, +0.1992, +0.0517, +0.0738, +0.0116, -0.4166, +0.0767, +0.1451, -0.4632, -0.4240, +0.2534, +0.0618, +0.1764, +0.1874, -0.1760, +0.3762, -0.0860, +0.2035, -0.8606, -0.1828, -0.1658, +0.0730, -0.1324, -0.6475, +0.1506, -1.5427, -0.1539, +0.3581, +0.1685, -0.0010, -0.4744],\n[ -0.0038, +0.2629, -0.0222, -0.4313, +0.2458, -0.0521, +0.2995, +0.1401, -0.1501, +0.2219, -0.0508, -0.6905, +0.2089, -0.1588, -0.1579, +0.3263, +0.1093, +0.1843, -1.6538, -0.1711, +0.0280, +0.0042, +0.1678, +0.3942, -0.1122, -0.7447, +0.1776, +0.0452, +0.0464, +0.1958, -0.5837, -0.2740, -0.8022, -0.0597, -0.1732, -0.3843, -0.1682, +0.4161, +0.5845, +0.0023, -0.0245, -0.0033, -0.0888, -0.0147, -0.0569, -0.7998, +0.1384, -0.1289, +0.3003, -0.0507, -0.4349, +0.5294, +0.0240, +0.1736, -0.1584, -0.1868, +0.2200, +0.0831, -0.6566, -0.1660, +0.1753, -0.0999, -0.5722, -0.1753, -0.4500, -0.0143, -0.2124, -0.0305, -0.8012, +0.2173, -0.1607, +0.3925, -0.3454, -0.6074, -0.1541, -0.4732, -1.1834, -0.0688, -0.0430, -0.5353, -0.1790, -0.4362, -0.2226, +0.0306, -0.0149, -1.2302, -0.6747, -0.3040, +0.1048, -0.6485, -0.4555, +0.0236, -1.1733, +0.5728, -0.1619, +0.1556, +0.1970, -0.2370, -1.8582, -1.0090, +0.0693, -0.1693, -0.1371, +0.3884, -0.1606, -0.2022, -0.2294, +0.5752, -0.0680, +0.2737, -0.1054, -0.5773, -1.0577, +0.0026, +0.1283, -0.2511, -0.0521, +0.1820, -0.6071, +0.1580, -0.1609, -0.1862, -0.1680, -0.1709, -0.1637, -0.2186, -0.2398, -0.2395],\n[ -0.1990, -0.1855, -0.5902, -0.1221, -0.8566, -0.1161, -0.2680, -0.2043, +0.0571, -0.2467, -0.5931, +0.0254, +0.2520, -0.0025, +0.3188, -0.2049, -0.1082, -0.2633, -0.3426, -0.3033, -0.4411, +0.2479, -0.4121, -0.2249, -0.2227, -0.3816, +0.1910, +0.2194, -0.0299, -0.0100, -0.2124, +0.4138, -0.2423, -0.0244, +0.1657, +0.0472, +0.0487, -0.3765, -0.1005, -0.1568, +0.3201, +0.4085, -0.1769, -0.6161, +0.1530, +0.0596, -0.1156, -0.8411, +0.1763, +0.0797, -0.6425, +0.3897, -0.2085, -0.1751, -0.0950, +0.1567, +0.0639, -0.1603, -0.3423, +0.1372, +0.2382, +0.1874, -0.0615, +0.0112, -0.1747, -0.6345, -0.5276, +0.1812, +0.4886, -0.0955, -0.1977, +0.0003, -0.1835, -1.0626, -0.3339, +0.1317, -0.2761, -0.2422, -0.6792, -0.0781, -0.3236, -0.1302, -0.3486, -0.7894, -0.6369, -0.4031, -0.1746, -0.1511, -0.2680, +0.0775, -0.5212, -0.1803, -0.4150, -0.3021, +0.4448, -0.4643, +0.3082, +0.5188, +0.3292, -0.1640, +0.2659, -0.0533, +0.0046, -0.2112, +0.0273, -0.3975, +0.1884, -0.2407, +0.5993, -0.0079, -0.1007, -0.3500, -0.7119, -0.2974, -0.2962, -0.1762, +0.0230, +0.2033, +0.1797, +0.0175, -0.3485, +0.2241, +0.1257, -0.1503, -0.3651, -0.4343, +0.0671, -0.2769],\n[ -0.2600, +0.3728, -0.1667, -0.2300, -0.1080, -0.4825, -0.1574, -0.2525, -0.1608, +0.3153, +0.3603, +0.1668, -0.1195, -0.0257, -0.1009, +0.2942, +0.1748, -0.3680, +0.4175, +0.1282, +0.2824, -0.8205, +0.3949, +0.0005, -0.0312, +0.2784, +0.1849, +0.0828, +0.1771, -0.6393, +0.0286, +0.2044, +0.5371, +0.1309, -0.1426, +0.0577, -0.3925, +0.1035, +0.2724, -0.4162, -0.2550, -0.0506, +0.1085, -0.0336, -0.3512, +0.1353, +0.2077, -0.2278, +0.1395, -0.0786, -0.3421, +0.5657, -0.5820, +0.1765, +0.0473, +0.0195, +0.0008, +0.0445, -0.0457, -0.0523, +0.2640, +0.1286, -0.0769, +0.1583, +0.0020, -1.1252, -0.3394, -0.2469, -0.2057, -0.0182, +0.0225, -0.4020, +0.0443, +0.3486, +0.3001, +0.1069, +0.2708, -0.3042, -0.0407, +0.1189, -0.1185, -0.6579, +0.1502, -0.0180, -0.1439, -1.4340, -0.0236, -0.1200, -0.3519, +0.2684, -0.9712, +0.4391, +0.2379, -0.0626, -0.3674, -0.0832, -0.1493, -0.5778, -0.0028, -0.3108, -0.3896, -0.5958, -0.0574, +0.0678, +0.3899, -0.3551, -0.6014, -0.6356, -0.0705, +0.1556, +0.2318, +0.1251, -0.0145, +0.0445, +0.1474, -0.0408, -0.5473, +0.0390, -0.0862, +0.3692, +0.0987, +0.0798, -0.2238, -0.0353, +0.0702, +0.1057, +0.1381, +0.0884],\n[ -0.1550, -0.6553, -0.0098, -0.0513, +0.4681, -0.4697, +0.3507, -0.6614, -0.1393, +0.2851, -0.2353, +0.1117, +0.3554, -0.4386, -0.3174, -0.8774, -0.7019, +0.2315, -0.2981, -0.2346, +0.5748, +0.3798, -0.0704, -0.0219, +0.2735, -0.1494, -0.1263, -1.4348, +0.3184, -0.1334, -0.0568, +0.1348, -0.5653, -1.3195, -0.4193, +0.0257, -0.6081, -0.2396, +0.5183, +0.0537, -0.4824, +0.1120, -0.1459, -0.6086, +0.2357, -0.4124, -0.1445, -0.6436, -0.1888, -0.2544, +0.3426, -0.6475, +0.2406, +0.4602, +0.1544, +0.1915, -0.8082, +0.3168, -0.2569, +0.0707, +0.1043, +0.0112, +0.5536, +0.2491, -0.4264, -0.4461, +0.4385, -0.2794, +0.0615, -0.1656, -0.3143, +0.1733, -0.5887, -0.1564, +0.0588, +0.2128, +0.4712, -0.0338, +0.2120, -0.0114, -0.2567, -0.4390, +0.1800, +0.1231, -0.0566, +0.0131, +0.3387, -0.1946, -0.0597, +0.6877, +0.3091, -0.3047, -0.0256, +0.3494, -0.5286, -0.1652, +0.4785, -0.6145, -0.5849, +0.3488, -0.2858, +0.0411, -0.0751, -0.0106, +0.0322, +0.1082, +0.1757, -1.0412, -0.1001, +0.1272, -0.2649, -0.2981, +0.0066, -0.6085, -0.2164, +0.3369, -0.4131, -0.3749, -0.1556, -0.2250, +0.3331, -0.7518, +0.5474, +0.2815, -0.1135, -0.0068, +0.0842, +0.1357],\n[ -0.2586, +0.1079, -0.0247, +0.2432, +0.2292, -0.1817, +0.0774, -0.5248, -0.7265, +0.0755, -0.2294, -0.1428, -0.3254, +0.0887, -0.2064, -0.2610, +0.4028, -0.3058, -0.6010, +0.1141, -0.3263, -0.0803, -0.0203, +0.0699, -0.0710, -0.6511, -0.1640, +0.4584, -0.2584, +0.2199, +0.3318, -0.4902, +0.0846, -0.0167, +0.2071, -0.3865, +0.0593, +0.4101, -0.1511, -0.4140, +0.1307, +0.0728, -0.1314, +0.2545, -0.1680, +0.1449, +0.2776, +0.1651, -0.1073, -0.0356, -0.2519, +0.6122, -0.0189, -0.6031, +0.2458, +0.2116, -0.3389, +0.1072, +0.1387, +0.2409, -0.2313, +0.1170, -0.0619, +0.1290, -0.4353, +0.0356, -0.2926, +0.0432, -0.7712, -0.3432, -0.0004, -0.2343, -0.0708, +0.0205, -0.0015, +0.1525, +0.0152, +0.0648, +0.3449, -0.4003, +0.0074, +0.1720, -0.0653, +0.1272, +0.2053, +0.1385, -0.0542, +0.1896, +0.3601, -1.3929, +0.1053, -0.1274, -0.0188, -0.0037, +0.3852, -0.4051, -0.4026, +0.0011, +0.1984, +0.4017, -0.2087, -0.1526, +0.0104, -0.3145, -0.1186, -0.2420, -0.8113, -0.1924, -0.0769, +0.0629, +0.0287, -0.0093, +0.1665, +0.4061, -0.1140, -0.1501, +0.4697, -0.1004, -0.0598, -0.9584, +0.1174, +0.1803, -0.4207, +0.2724, +0.1135, -0.3000, -0.1081, -0.2234],\n[ -0.0944, +0.2341, -0.1293, -0.3682, -0.2815, +0.3922, +0.1281, -0.0364, -0.3578, +0.3765, +0.0833, -0.2770, -0.3372, -0.1390, +0.2268, -0.0754, -0.7129, +0.1079, +0.4896, -0.2936, +0.5755, +0.0711, +0.0261, +0.3214, +0.1170, -0.3148, -0.3589, -0.3999, -0.0442, +0.1939, -0.0184, -0.5482, -0.0909, -0.1102, +0.3405, -0.2403, +0.0017, -0.0908, +0.1057, +0.0563, -0.4943, -0.2383, +0.2750, +0.1475, +0.4200, -0.7400, -0.0005, -0.7109, +0.2982, -0.0624, +0.0991, -0.7474, -0.3128, +0.0312, -0.1940, +0.0893, +0.1152, -0.2274, -0.3678, -1.4362, -0.1969, -0.6162, +0.4540, +0.2569, +0.3304, -0.1393, +0.2411, -0.5043, -0.4747, -0.4624, +0.2566, +0.2622, +0.3182, -0.1998, +0.2243, +0.1218, -0.3776, -0.1689, -0.3444, -1.2030, -0.3384, +0.0376, -0.9877, +0.4066, +0.2852, -0.1160, +0.1727, -0.6990, -0.0899, -0.1878, +0.4875, -0.7249, -0.2148, -0.2262, +0.2572, +0.5794, +0.1245, +0.0084, -0.0661, +0.0022, +0.0790, -0.2628, +0.0873, +0.1133, -1.4550, -0.0061, +0.4475, -0.0445, -0.1131, -0.1322, -0.1805, -0.4632, -0.4034, -0.0477, +0.0881, -1.3645, +0.7105, -0.8035, -0.0230, -0.2352, -0.6769, -0.0870, -0.1496, -0.7174, -0.3905, +0.5053, +0.1599, +0.0774],\n[ -0.0854, -0.3404, +0.0652, +0.0362, -0.0756, +0.0192, -0.1265, +0.3489, -0.3569, -0.8240, +0.0391, -0.0941, -0.1433, +0.3441, -0.1874, -0.2676, +0.5242, +0.2251, -0.2071, -0.1848, -0.2279, -1.2238, +0.2887, +0.0095, -0.1245, -0.3161, +0.0329, -0.0625, -0.2333, +0.3116, +0.0164, -0.3283, -0.4250, +0.1686, +0.2560, -0.5164, -0.2912, -0.3383, -0.1983, +0.1373, +0.2847, +0.4067, +0.1801, -0.1141, +0.0385, -0.4291, -0.2469, +0.0845, -0.0106, -0.4852, +0.2204, -0.3597, -0.3101, +0.0629, -0.1494, -0.0957, -0.2842, -0.2712, -0.1635, -0.3056, -0.0953, -0.7983, -0.2230, +0.1603, +0.1263, +0.0316, +0.1262, +0.0299, +0.2142, -0.1661, -0.1874, -0.2247, -0.1495, +0.4345, -0.0050, -0.0023, -1.0454, -0.1850, +0.0324, -0.1299, +0.0808, +0.2819, -0.3591, -0.0671, +0.3848, +0.0527, +0.0870, +0.2097, -0.7340, -0.0364, +0.1416, -0.7251, +0.3899, -0.0321, +0.2230, +0.2796, +0.3449, -0.1251, +0.1570, +0.2467, +0.0966, +0.1106, -0.0601, +0.2364, -0.1023, +0.1761, -0.3835, +0.3161, +0.1496, +0.0713, +0.3664, -0.1054, -0.8298, -0.0852, -0.6931, -0.3448, -0.0491, +0.0825, -0.1888, +0.0303, -0.1669, -0.2420, +0.1384, -0.1588, -0.0640, +0.0044, +0.0920, +0.5491],\n[ -0.2530, -0.0550, +0.1767, -0.0197, +0.0392, +0.1533, -0.1908, +0.2095, -0.4558, -0.3182, -0.1207, +0.1121, +0.1108, +0.0632, +0.0251, +0.0800, -0.1490, -0.5128, +0.3625, +0.2499, -0.2294, +0.1496, -0.0599, +0.3519, -1.1459, +0.4171, -0.4250, -0.8933, -0.3909, -0.2782, +0.1234, +0.1028, +0.1640, -0.3572, -0.8056, -0.0328, +0.0441, -0.2858, +0.4340, +0.3755, -0.1979, +0.2009, -0.4347, -0.0842, -0.6567, -0.5243, -0.0483, +0.1214, -0.2512, +0.3272, -1.3854, +0.1747, -0.4386, +0.1441, -0.3902, -0.4389, +0.0880, -1.0031, -1.5420, -0.2146, -0.2588, -0.7700, -0.1053, -0.4492, -1.1340, -0.3955, -0.0062, +0.1858, -0.4721, +0.5749, -0.7658, -0.4602, -0.0370, -0.1331, +0.1250, -0.4708, +0.0186, -0.0472, +0.6910, -0.1052, -1.0263, +0.3958, +0.1160, -0.0658, -0.0263, +0.1706, -0.1598, +0.3593, -0.7746, -0.1430, -0.2939, -0.5315, +0.1469, +0.0629, -0.2287, -0.0982, -0.9673, +0.3858, -0.1839, -0.3926, +0.1103, +0.5221, -0.2097, -0.0538, -0.3374, -0.2589, -0.1362, -0.8023, +0.5058, -0.1019, -0.1020, -0.2040, +0.2014, -0.2802, -0.0912, +0.0152, -0.6996, +0.2708, -0.5717, -0.3458, +0.1621, -0.1512, +0.2791, -0.0776, -0.1128, +0.0089, +0.1515, +0.5123],\n[ -0.4655, -0.0379, +0.0643, +0.1431, -0.0937, +0.1572, +0.2089, +0.0227, +0.4811, +0.2738, -0.4770, -0.0302, -0.2714, -0.0353, -0.4082, +0.4073, -0.2526, +0.0575, -0.2309, +0.4160, -0.2468, +0.2285, -1.0454, -0.3023, +0.0094, -0.0101, +0.3390, +0.0338, -0.0278, +0.1941, -0.0570, +0.2868, +0.0429, -0.8105, -0.0188, -0.1246, +0.0159, -0.4999, -0.2032, +0.0914, +0.6455, -0.1245, +0.0020, +0.0418, -0.6123, +0.0697, +0.1981, -0.4423, -0.1076, +0.2690, +0.1470, +0.3055, -0.1343, -0.0624, +0.2082, -0.1393, +0.0894, +0.2845, +0.2606, +0.3553, -0.0025, +0.4361, +0.6263, -0.0265, -0.0731, +0.2340, -0.1738, +0.1374, -0.0836, -0.0444, -0.7722, -0.3558, -0.3378, -0.5021, +0.1452, +0.1734, -0.0282, -0.3774, +0.1987, +0.1507, +0.0245, -0.0719, -0.0083, +0.2448, -0.1184, +0.1152, -0.6909, +0.1571, -0.6083, +0.0978, -0.2012, +0.3425, -0.0069, +0.2474, -0.5482, +0.0814, -0.1556, -0.1506, +0.1668, +0.2776, +0.3391, +0.2342, -0.2065, +0.0879, -0.6159, +0.2643, -0.2567, -0.2676, +0.2018, -0.4286, -0.4639, -0.4193, +0.1816, +0.2604, -0.0906, +0.0827, -0.0005, +0.2018, -0.3132, -0.3025, +0.1614, -0.1068, +0.0919, +0.2894, -0.3590, -0.3032, -0.4354, +0.0154],\n[ -0.0429, -0.3707, -0.3739, +0.3704, -0.0150, +0.2189, -0.4955, -0.7602, -0.6327, +0.1189, +0.3060, -0.5486, +0.1754, -0.5086, -0.4190, -0.6469, +0.0826, -0.0344, +0.1115, -0.4931, +0.2178, -0.6064, -0.0868, +0.2074, -0.1581, +0.4950, -0.4560, +0.1868, -0.2302, -1.1003, -0.9498, -0.3075, -0.1099, -0.7696, +0.2225, +0.3048, +0.0237, +0.1738, -0.1000, -0.0539, +0.2746, -0.6315, -0.7834, +0.5839, -0.7714, -0.1029, -0.3709, -0.2092, +0.5387, +0.0590, +0.3252, -0.1812, -0.0796, +0.0109, +0.1797, -0.1807, -0.6953, +0.0922, +0.2157, -0.0296, +0.0137, +0.0309, -0.0322, +0.1320, -1.3996, -0.5238, +0.0258, +0.1411, -0.3324, -0.0493, -0.8150, -0.2201, -0.6262, -0.1943, -0.2466, -0.3587, -0.5532, +0.3025, -0.9596, +0.2320, +0.0081, -0.3283, +0.0479, -0.5118, -0.1686, +0.5102, -0.2949, +0.2405, -0.0371, +0.3475, -0.2653, -0.1511, -0.3839, -0.0687, +0.0342, -0.5996, -0.4391, -0.5633, +0.3001, -0.8634, +0.0928, +0.1859, +0.1312, -1.4476, +0.4269, +0.6876, -0.3687, +0.0898, -0.2377, -0.3089, +0.2988, -0.4810, -0.0220, -0.2208, -0.3043, -0.2693, -0.5514, -0.1327, +0.0981, +0.1090, +0.0979, +0.0774, +0.1311, -0.0520, +0.5947, +0.1165, +0.0255, +0.0646],\n[ -0.4015, -0.7856, -0.0138, +0.2379, +0.4219, -0.0133, -0.1120, -0.0367, +0.1862, -0.5656, -0.0802, +0.3317, -0.1842, +0.0494, -0.0962, +0.1796, +0.3449, +0.3633, -0.5248, +0.3595, -0.6307, -0.1464, -0.2156, -0.1198, +0.6068, -2.0541, +0.0744, +0.1036, -0.4800, -0.9317, +0.3650, +0.0608, +0.2096, +0.0011, -0.1670, +0.2135, +0.5401, -0.6114, -1.1094, -0.1530, -1.1694, -0.0469, -0.0043, -0.3584, -0.0760, -0.4375, -0.3118, +0.2781, -0.8857, -0.1821, -0.2038, +0.5715, -0.3865, -0.4510, +0.4034, -0.0904, -0.0075, -0.1263, -0.3763, -0.6469, +0.0981, +0.2835, -1.0551, -0.2261, -0.1222, +0.0047, +0.5585, -0.3472, -0.2015, -0.7965, -0.1860, -0.5664, -0.2167, -0.2244, -0.2145, +0.3671, -0.6399, -0.0906, +0.2896, -0.0949, -0.7272, -0.1690, -0.8668, +0.1197, +0.2210, -0.1381, -0.0362, -0.2772, +0.3834, +0.4729, +0.1046, -0.8879, +0.2266, -0.0665, +0.2083, +0.1939, +0.3427, -0.0931, +0.0226, -0.2263, +0.1235, +0.4096, +0.0778, +0.4601, -0.1834, -0.1526, +0.3648, +0.2721, +0.1836, +0.1039, +0.1252, -0.0670, -0.4777, +0.4181, +0.0461, -1.0144, -0.2770, -0.2780, +0.4376, +0.2788, -0.7011, -0.3962, -0.4795, -0.2742, -0.9814, -0.0447, -0.0447, -0.4444],\n[ +0.3501, +0.3281, -0.1430, +0.3228, -0.0807, +0.2136, -0.0195, -0.1002, -0.2381, +0.4731, -0.0458, +0.0137, +0.2722, +0.0900, -0.0668, +0.0659, -0.3536, +0.1688, -0.2339, -0.1300, -0.2204, +0.1206, -0.0453, -0.2130, -0.0140, +0.0041, -0.2013, -1.2579, +0.0436, -0.2726, -0.2897, -0.5455, +0.1721, -0.4969, -0.3442, +0.2220, +0.0569, -0.1374, -0.4693, -0.5210, -0.7792, -0.1477, -0.1435, -0.1889, +0.1739, -0.1954, +0.1126, +0.1769, -0.0294, -0.2095, -0.4485, -0.1938, +0.3559, -0.1803, +0.1457, -0.3859, +0.2793, -0.7072, +0.2029, -0.0043, -0.1302, -0.4323, -0.1481, +0.0405, +0.1824, +0.2051, +0.2035, -0.2542, -0.0847, +0.0311, +0.0031, -0.1944, +0.0411, -0.1625, -0.2449, -0.0437, +0.1181, +0.1734, +0.1622, +0.2482, -1.3020, -0.2676, -0.1668, -0.4255, -0.3623, +0.0653, -0.2027, +0.2574, +0.3004, +0.0192, +0.0700, +0.2328, -1.2587, -0.2362, -0.3811, +0.0033, +0.3652, -0.2675, -0.1837, -0.1855, -0.0299, +0.1968, -0.2170, -0.0384, -0.2551, +0.0783, +0.0645, +0.0703, -0.0835, +0.1902, -0.4658, +0.5849, -0.0440, -0.6254, +0.4145, +0.0154, -0.0337, -0.2226, -0.0193, -0.3050, -0.2739, +0.0791, -0.0331, -0.2853, -0.3587, -0.0245, +0.3906, -0.1312],\n[ -0.3729, -0.3580, +0.1994, +0.3125, -0.1575, +0.0351, +0.1712, +0.0479, -0.5090, -0.1524, -0.0229, -0.3794, -0.2660, -0.2205, -0.3291, +0.5219, -0.3152, -0.3720, +0.0702, -0.4275, +0.0918, +0.0985, +0.2214, +0.2948, +0.0067, +0.1727, +0.3573, -0.0733, -0.3958, -0.5830, -0.0946, -0.2411, +0.1241, -0.5995, +0.1575, +0.2158, -0.5639, -0.1019, -0.1161, -0.0025, +0.3288, -0.3254, +0.0209, +0.2869, +0.0349, -0.0032, +0.2493, -0.1807, +0.2012, +0.0285, -0.9862, -0.3932, +0.0355, -0.6267, -0.2420, -1.1322, +0.3270, -0.4070, +0.0856, +0.1377, -0.4038, -0.2331, -0.0207, -0.5493, -0.2026, -0.7153, +0.1385, +0.1107, +0.0477, +0.0602, -0.3468, -0.0456, -0.3055, -0.5692, +0.0869, +0.0364, +0.2018, +0.0797, +0.2311, +0.4669, -0.2796, -0.5679, +0.0042, +0.0015, -0.0524, -0.2371, -0.3690, -0.3491, +0.1767, -0.2053, -0.2630, -0.6185, +0.0131, -0.2657, -0.5073, -0.2655, +0.0691, -0.0768, -0.4844, -0.6951, -0.3339, +0.3054, -0.8780, -0.0191, -0.6555, -0.3514, -0.3141, -0.3230, -0.0244, +0.0889, +0.1160, +0.3249, +0.2260, -0.1515, -0.2689, -0.3664, -0.1808, +0.0221, +0.5215, +0.0880, -0.1395, -0.5289, -0.7631, +0.0567, -0.4045, -0.2250, +0.0902, -0.3769],\n[ -0.5947, +0.2274, -0.9496, +0.3703, +0.0841, -0.1836, +0.0044, +0.0021, +0.1787, -0.0429, +0.1353, -0.4052, +0.0083, +0.1235, +0.6274, +0.2087, +0.3484, +0.1119, -0.1528, +0.0003, -0.3680, -0.5990, -0.5782, -0.0053, -0.0038, +0.2493, +0.0427, -0.1297, -0.0332, +0.0291, -0.1151, -0.0122, +0.2482, -0.1746, +0.6397, +0.1691, +0.0541, -0.1390, -0.0668, -0.1904, +0.2275, -0.0548, -0.3013, -0.3059, +0.0162, -0.3792, +0.2507, -1.5725, -0.0162, +0.4433, -0.2386, -0.3115, +0.2838, -0.2357, -0.2010, +0.3173, -0.0459, -0.0212, +0.3470, +0.1231, -0.2594, +0.0194, +0.0015, +0.5905, -0.0855, +0.0124, +0.3250, +0.0103, -0.2701, +0.0159, +0.1874, -0.0726, +0.1616, +0.0483, -0.2104, +0.0683, +0.0354, +0.0604, +0.0738, -0.6140, -0.1024, +0.1922, -0.6209, -0.4914, -0.1139, -0.3441, +0.2979, -0.0118, +0.0571, +0.2113, -0.4013, -0.2839, -0.2274, -0.1234, -0.2224, +0.5136, -0.5031, -0.2097, +0.1413, +0.4381, -0.5172, -0.2161, +0.1183, +0.1782, +0.0394, -0.9201, +0.1413, +0.1968, -0.8099, -0.2413, -0.0397, +0.0048, -0.9168, +0.0216, +0.2725, -0.6455, +0.0540, -0.1368, -0.8279, -0.2034, -0.1583, +0.2583, -0.4102, +0.2368, -0.1897, +0.0892, +0.2035, -0.6145],\n[ -0.4914, -0.0175, -0.1029, -1.1875, +0.0158, +0.1054, -0.4501, +0.1822, +0.1384, +0.0073, +0.1010, +0.2072, -0.0566, +0.0374, -0.5182, +0.2300, -0.0543, +0.0234, +0.0719, -0.3345, +0.1585, -0.2427, -0.3074, -0.5413, +0.0213, +0.2899, +0.1372, -0.2435, -0.1829, +0.5233, -0.1391, +0.0271, -0.0862, +0.1832, +0.7547, -0.0167, -0.1029, -0.4332, +0.3798, +0.0694, -0.6046, +0.2171, +0.0931, -0.4275, +0.0547, +0.1686, +0.0674, -0.3921, +0.5958, +0.2580, -0.7376, +0.4033, +0.3357, +0.5261, -0.2143, -0.2368, -0.1381, -0.7014, -0.2709, +0.3299, -0.6330, +0.0553, +0.1474, +0.5485, -0.4183, -0.5408, -0.4226, +0.1773, -0.0814, -0.0224, +0.1349, -0.1298, -0.7348, +0.1840, -0.1371, +0.2017, +0.0927, -0.2807, +0.1679, -0.3693, +0.1331, +0.1271, +0.0445, -0.1609, -0.3406, -0.5669, -0.1359, +0.3603, -0.2292, -0.1502, -0.2860, +0.3773, -0.0089, +0.2764, +0.4845, -0.1967, -0.8141, -0.0563, +0.1718, -0.0717, +0.4198, +0.0029, -0.3978, +0.2698, +0.0651, +0.2271, +0.1170, -0.3438, -0.2717, +0.6219, -0.0172, +0.2207, -0.5174, +0.1052, -0.2792, +0.1195, +0.3255, +0.2822, -0.1286, -0.6566, +0.0267, +0.3203, -0.5382, -0.0283, -0.9872, -1.0424, -1.0301, -0.0555],\n[ -0.0453, -0.2395, +0.2406, +0.0631, +0.0629, +0.0833, +0.1781, +0.3761, -0.2013, -0.5130, -0.5778, -0.0650, -0.1589, -0.3232, -0.3333, -0.8319, +0.2013, +0.2267, +0.2610, -0.5927, -0.0493, +0.2577, +0.4523, +0.1532, -0.6685, -0.0539, +0.2305, +0.2816, -0.1894, +0.1239, +0.0723, -0.3620, +0.0898, -0.0181, -0.0459, +0.1914, -0.6516, +0.1430, -0.5466, -0.3008, -0.0715, -0.6103, +0.0118, +0.0141, +0.1248, -0.1234, +0.0846, +0.2255, -0.5190, -0.2559, -0.1613, +0.4473, +0.1540, -0.5639, -0.8685, +0.1487, -0.2612, +0.3390, -0.4948, -0.0352, +0.0908, +0.1311, +0.0625, -0.1194, -0.2128, +0.0454, +0.2579, +0.3957, +0.5040, +0.1017, -0.3063, -0.4509, -0.2063, -0.2673, -0.3279, +0.0295, -0.1485, +0.2786, +0.1512, +0.1158, +0.1659, -0.1235, -0.8287, -0.2932, +0.0973, -0.0585, -0.3973, +0.2949, +0.2267, -0.1506, +0.2104, -0.1853, -0.4127, -0.8102, -0.1861, -0.2241, -0.0969, -0.1213, +0.2320, -0.8217, +0.0387, +0.1559, +0.0937, +0.0199, +0.2130, +0.5811, +0.1031, +0.0254, +0.1938, -0.1855, +0.2375, -0.6722, -0.1418, +0.6080, -0.6757, -0.2532, +0.0382, +0.2149, -0.2714, +0.3754, -0.2763, +0.0971, -0.3987, +0.5819, -0.0490, -0.6208, -0.0094, -0.0335],\n[ -0.0520, +0.1143, +0.3346, +0.3328, -0.7457, -0.4004, +0.4402, +0.1565, +0.2192, -0.7181, +0.4356, +0.0638, +0.1050, +0.3447, -0.9636, -0.2623, +0.5175, -0.0571, +0.0749, -0.2558, -0.1522, +0.1987, +0.3003, -0.0340, -0.1250, +0.1917, +0.0105, +0.1603, +0.3017, +0.1785, +0.3443, -1.5343, -0.1129, -0.3967, -0.0861, -0.1839, +0.3447, -0.0320, -0.1140, -0.7012, -0.5374, -0.1204, -0.1188, -0.9007, +0.1548, -0.1386, -0.0337, +0.6399, -0.4455, +0.0039, +0.2296, +0.0017, -0.3522, -0.4161, +0.1904, -0.4585, -0.0504, -0.4775, -0.1963, -0.0786, -0.0918, -0.0926, -0.4992, -0.4981, +0.1395, -0.0966, -0.1520, +0.1250, -0.3255, +0.1306, +0.1884, +0.2478, -0.1758, -0.0991, -0.3168, +0.2045, +0.1035, +0.1926, +0.1510, -0.9593, +0.2111, -0.5241, -0.6604, -0.0298, -0.4766, -0.1225, -0.2626, -0.2218, +0.3571, -0.8226, +0.0405, -0.8556, -0.8859, -0.3557, -0.7047, +0.2757, -0.0380, +0.1300, -0.2333, -0.4366, -0.6659, +0.1032, +0.0568, +0.0892, -0.5488, -0.5828, +0.2135, +0.5198, +0.2927, -0.1756, +0.4095, -0.5759, -0.5219, +0.0122, +0.1435, -0.3798, +0.1782, +0.0249, -0.7436, -0.4398, -1.0002, -0.4882, +0.3656, +0.4042, +0.3798, -0.3411, -0.0478, -0.4250],\n[ +0.0293, -0.1663, -0.3768, -0.2223, -0.0055, -0.3266, +0.1328, +0.2718, +0.3462, +0.0200, -0.5919, -0.0576, -0.5609, -0.3056, +0.0931, +0.3790, +0.0302, +0.0964, +0.0000, -0.1535, -0.2970, +0.7108, +0.7230, -0.1885, +0.3083, -0.2583, -0.1817, -0.2954, -0.4350, +0.2642, +0.0138, -0.2058, -0.5751, +0.2576, +0.2225, -0.3665, -0.9525, -0.1351, -0.3914, +0.0191, -0.0508, -0.6731, +0.1697, -0.3946, -0.0264, -0.0259, +0.0276, -0.5611, -0.4430, +0.2832, -0.6663, -0.0660, +0.2421, +0.0772, -0.0405, +0.2755, +0.0031, -0.6514, +0.0150, +0.3053, +0.4018, +0.5021, -0.2157, +0.1647, -0.8754, +0.1060, +0.1213, +0.0049, -0.5501, +0.3529, +0.3156, -0.1689, -0.3875, -0.1849, -0.1724, +0.1727, +0.1591, -0.0960, +0.1350, -0.5237, -0.3937, +0.0273, +0.3581, +0.1818, -0.2577, +0.4555, +0.3635, +0.4512, -0.2311, -0.1382, -0.8001, -0.4946, +0.0842, +0.3760, +0.1495, +0.6040, -0.4504, -0.4973, +0.0428, +0.0742, -0.1994, -0.0175, -0.0597, -0.2138, -0.3823, -0.1439, +0.1747, -0.8329, -0.2617, +0.2151, +0.1795, -0.4310, +0.0498, +0.0994, +0.5801, +0.2664, +0.2355, -0.6505, -0.2277, -0.4698, +0.4264, -0.0938, -0.3382, -0.3953, +0.0303, +0.4967, +0.1012, -0.1518],\n[ -0.2188, +0.1021, -0.2240, -0.4672, +0.1532, -0.3045, +0.1718, +0.1418, -0.5078, -0.3792, +0.0666, +0.0985, -0.3817, -0.1581, -0.3091, -0.4972, -0.4725, +0.1204, -0.6940, -0.1842, +0.0045, -0.9836, +0.1029, -0.1811, +0.0756, +0.0428, +0.0777, -0.3013, -0.4269, -0.0528, +0.0722, -0.0670, -0.7216, +0.1244, +0.2999, -0.0958, +0.0789, -0.1816, -0.4453, -0.6704, -0.0310, -0.2875, +0.3572, -0.0051, -0.4087, +0.0230, +0.3466, -0.1092, +0.3518, -0.3754, -0.1125, -0.1751, -0.1190, +0.0514, +0.2108, +0.0382, -0.2385, -1.1745, -0.6549, +0.1466, +0.1003, -0.1397, +0.1939, -0.1007, -0.5671, -0.2141, -0.3080, -0.3285, -0.5235, -0.5764, -0.3409, -0.5457, +0.1450, +0.2169, -0.0775, -0.3134, -0.2641, +0.0260, -0.1224, -0.2079, -1.1028, -0.0962, +0.3657, +0.2675, +0.2761, -0.1438, -0.0040, +0.3894, -0.1598, -0.9228, -0.4203, +0.3786, -0.6578, -0.2065, +0.4389, -0.6512, +0.1878, +0.1302, -0.1187, +0.0349, -0.2018, +0.2299, -0.1649, -0.5971, -0.0187, -1.1908, -0.2499, -0.4757, +0.0489, -0.0761, +0.6595, +0.0984, -0.2936, -0.1527, +0.3843, +0.4120, +0.2702, +0.0496, -0.1293, +0.6906, +0.2244, -0.1256, -0.4838, +0.6558, -0.0440, -0.2431, -0.5317, +0.1463],\n[ +0.1007, -0.2044, +0.0915, -0.1110, +0.2309, +0.0957, +0.0353, -0.2517, -0.1242, -0.1429, -0.3406, +0.0926, +0.0606, +0.0489, +0.0082, +0.1055, -1.3637, +0.2863, -0.8374, +0.0397, -0.0966, +0.1015, +0.0416, +0.2654, -0.1580, +0.0091, +0.2479, -0.1647, +0.0940, +0.1184, -0.1846, +0.2333, -0.1408, -0.0981, +0.1680, +0.0911, +0.4148, +0.2737, +0.0429, -0.3616, +0.0468, +0.2632, -0.2784, +0.4318, +0.0774, -0.4683, +0.0070, -0.0744, -0.0775, +0.0305, +0.0484, -0.1177, +0.0596, -0.1406, +0.0670, -0.0642, +0.2147, -0.3049, -0.0589, +0.2003, -0.0902, -0.1474, -0.3707, +0.1232, -0.3934, +0.1415, -0.2993, +0.0076, -0.1834, +0.0297, -0.1118, +0.1966, +0.1434, +0.0009, +0.0595, -0.2880, +0.0926, -0.3560, -0.5908, +0.2940, -0.0431, -0.0223, +0.4797, +0.1149, -0.0131, -0.0821, +0.0943, -0.2290, +0.1531, +0.2856, -0.3395, -0.1174, -0.5446, +0.0501, -0.0462, -0.1134, -0.1696, +0.0725, -0.0470, -0.0893, +0.0332, -0.0180, +0.5846, -0.2333, +0.1019, -0.0331, +0.0771, +0.2611, -0.1336, -0.1301, -0.9212, -0.3052, +0.0114, +0.0305, -0.2702, -0.1209, +0.2616, +0.4937, -0.0724, +0.2716, -0.0188, +0.2153, +0.3038, +0.1207, +0.1832, -0.1724, +0.2292, +0.2457],\n[ -0.1804, -0.6413, -0.0744, +0.1991, -0.0737, -0.2933, +0.1017, -0.1491, +0.4600, +0.2833, -0.2731, -0.1631, -0.5416, -0.4341, -0.2894, -0.1315, -0.2177, +0.0498, -0.0359, +0.4271, -0.1389, -0.0952, -0.0668, -0.2945, +0.0423, -0.4391, +0.1829, -0.0940, +0.2085, +0.4466, -0.0549, -0.4789, +0.1381, +0.3271, -0.1952, -0.5656, -0.4272, -0.6747, -0.1363, -0.4104, +0.3835, +0.1417, +0.3708, +0.0614, -1.0061, -0.3433, +0.5277, -0.0979, +0.2403, -0.3140, -0.1091, -0.0942, -0.7187, -0.1379, -0.0743, -0.0831, -0.0651, -1.4077, -0.2030, +0.4326, +0.1291, +0.0197, +0.0664, +0.0799, -0.0921, -0.1370, -0.4199, +0.2537, -0.1733, -0.5453, -0.3406, +0.2641, -0.2840, -0.2217, -0.1717, -0.2596, -0.6149, +0.2036, -0.1179, -0.4542, -0.2821, -0.0771, -0.3978, -0.0618, -0.0831, -0.2506, -0.1301, +0.0824, -0.5279, -0.0014, -0.0490, -0.5102, -0.3509, -0.0945, +0.3115, +0.4273, -0.6854, -0.7175, -0.3040, +0.1343, -0.3622, -0.2034, +0.4940, -0.5965, +0.0041, -0.6200, +0.0382, +0.2886, -0.6852, +0.1837, -0.1224, -0.3253, -0.2527, -0.8300, -0.0729, +0.0226, -0.4047, +0.1606, +0.2364, -0.4973, -0.6442, +0.1035, +0.3565, +0.0052, +0.3142, +0.3418, +0.0403, -0.5119],\n[ -0.1017, -0.0053, +0.0267, -1.0741, -0.1520, -0.1370, -0.3307, -0.0732, -0.4366, -0.7079, -0.2262, -0.2293, -0.1644, +0.1885, +0.6407, -0.2647, +0.0997, +0.1705, +0.3492, -0.1606, -0.0041, -1.6792, +0.0112, -0.0425, -0.4714, -0.0951, +0.1999, +0.1248, +0.2741, +0.2132, +0.3065, +0.2279, -0.1889, +0.0610, -0.3630, +0.1476, -0.0402, -0.1753, -0.7225, -0.0120, +0.0444, +0.1587, +0.1624, -0.3233, +0.0357, +0.1818, +0.2471, -0.5049, -0.9300, -0.1819, -0.0472, -0.1533, +0.2635, +0.2933, -0.1783, -0.2531, -0.1683, -0.0108, +0.0977, +0.2713, -0.0255, -0.2324, +0.1616, -0.3653, -0.1300, -0.0267, -0.0338, +0.1684, +0.0473, -0.0590, -0.1669, +0.0260, -0.2941, +0.1953, -0.7179, -0.1900, +0.1382, +0.3187, +0.1247, -0.5942, -0.5205, -0.0947, -0.0335, -0.0588, -0.0189, +0.3053, -0.0854, -0.3207, +0.1741, +0.0546, +0.0115, -0.2876, +0.1458, +0.3574, -0.2910, +0.0976, +0.1731, -0.0817, -0.0826, +0.2886, -0.1298, +0.2386, +0.2121, -0.1873, +0.3193, +0.3598, +0.3700, -0.4408, +0.0477, -0.2259, -0.8351, -0.3033, -0.1020, -0.3804, -0.0147, -0.0782, -0.0253, +0.2604, +0.0669, -0.0886, -0.2918, +0.0786, -0.0955, +0.1704, -0.2301, -1.1527, -0.0295, +0.4580],\n[ -0.6923, -0.0835, -0.2932, +0.1860, -0.0988, +0.0671, +0.0189, +0.2117, +0.1516, +0.1037, -0.1780, +0.5206, -0.2900, -0.4395, +0.0723, -0.1160, +0.1709, -0.1442, +0.3346, -0.7235, -0.4740, +0.0875, -0.0777, -0.1537, +0.4708, +0.3247, +0.1284, -0.2747, +0.4688, +0.1906, -0.5578, -0.2271, -0.6142, -0.2636, -0.1089, -0.4988, -0.1197, -0.1757, +0.0829, -0.0075, +0.2441, -0.6189, -0.1981, +0.1928, +0.1158, -0.3283, -0.5588, +0.0071, +0.6097, +0.3238, -0.3600, +0.4527, -0.0510, +0.7691, -0.5023, -0.2895, +0.2136, +0.1607, +0.3858, -0.0098, +0.3589, -0.1146, -0.0712, -0.0400, -0.1925, -0.4676, +0.2759, +0.1378, -0.4339, -0.5202, -0.4195, +0.0838, -0.2441, -0.3500, +0.0759, -1.0871, -0.1415, -0.2753, -1.1007, -0.0186, +0.1863, -0.5634, +0.0043, +0.1119, +0.2769, -0.1073, -0.2010, +0.4234, -1.3143, +0.3216, +0.1197, -0.0692, -0.4731, -0.6585, +0.1252, +0.1992, -0.2571, +0.1463, -1.2326, +0.1175, +0.0214, -0.3330, -0.2817, -0.4959, -0.0610, -0.0143, -0.7349, +0.2925, -0.2817, -0.2231, +0.1759, -0.9774, +0.2390, -0.1225, -0.9171, -0.1901, -0.7895, -0.4615, +0.0353, -0.5406, -0.4352, +0.3894, +0.1673, -0.5319, -0.4118, -0.2283, -0.5668, +0.2083],\n[ -0.2980, -0.3592, -0.0770, -0.3542, +0.1863, +0.2256, -0.1919, -0.1789, -0.4016, +0.1436, -0.2752, +0.0135, +0.2265, -0.3872, +0.0598, +0.0476, -0.2551, -0.1743, -0.7473, -0.2520, -0.2357, -0.3911, +0.0377, +0.1986, -0.0369, -0.3049, +0.1196, +0.2373, +0.1458, +0.2655, +0.1632, -0.1340, -0.2975, +0.4237, +0.2112, +0.4525, -0.0604, -0.0543, -0.4549, +0.0763, -0.6850, -0.4227, -0.7335, -0.1524, -0.2647, -0.1532, +0.3511, -0.2706, +0.1650, -0.4522, -0.0460, +0.6378, -0.1204, +0.0676, -0.0465, -0.0767, +0.2404, -0.8729, +0.2271, +0.4106, -0.1021, +0.0290, -0.5288, -0.1131, -0.3274, +0.0984, -0.4574, +0.5551, -0.1834, +0.0660, -0.1157, +0.1353, -0.2033, -0.3010, +0.5301, +0.0713, -0.2118, -0.1965, +0.0318, +0.5520, -1.3944, +0.3160, -0.5554, -0.3506, +0.0334, +0.1534, +0.1675, -0.2854, +0.1113, -0.2012, +0.3805, -0.0233, -0.2185, +0.1111, +0.0239, -0.4577, +0.1438, +0.1930, -0.1968, +0.1310, +0.6313, -0.3919, -0.4534, +0.2406, -0.5656, +0.1666, +0.0888, +0.0411, +0.0157, +0.3176, -0.1931, +0.2688, -0.3309, +0.0924, -0.0175, +0.3059, +0.0998, -0.1257, +0.2527, -0.5143, +0.0128, +0.0456, -0.3505, -0.3927, -0.0699, -0.4737, -0.5614, -0.4470],\n[ -0.4806, +0.3155, +0.0923, -0.0874, -0.3705, +0.0612, +0.2649, -0.5107, -0.7034, +0.1830, -0.2687, -0.0724, -0.0851, -1.4003, +0.0659, -0.5941, +0.1984, +0.1414, -0.1043, +0.0107, -0.6484, -0.0740, +0.2101, +0.0407, -0.2219, +0.0094, -0.1241, -0.3421, -0.0127, -0.1056, +0.2103, +0.2309, -0.2926, -0.7421, -0.5438, +0.0586, +0.0710, +0.3101, -0.6930, +0.0095, -0.5734, +0.1109, -0.3719, +0.1490, +0.1394, -1.1226, -0.0579, +0.3137, -0.1024, +0.3899, +0.0721, -0.4998, +0.0433, +0.3800, +0.2369, +0.2631, -0.4126, +0.4422, -0.0038, +0.1540, -0.2970, +0.2507, +0.0484, -0.0013, -0.1378, +0.3255, -0.6364, +0.1867, +0.3550, +0.1193, -0.1194, -0.5268, -0.0703, +0.0644, -0.1363, +0.0177, +0.0322, +0.2742, +0.0973, -0.1359, -1.5205, +0.3452, +0.1356, -0.0290, -0.1225, -0.5191, -0.1403, -0.1737, -0.3189, +0.1529, +0.1788, +0.5082, -0.2529, -0.1178, +0.1159, -0.2308, +0.0791, +0.0944, +0.2157, -0.1984, -0.6510, -0.2869, -0.0004, +0.2280, +0.0338, +0.1443, -0.5017, +0.3413, -0.0812, -0.3725, -0.0953, -0.1753, -0.6761, -0.7394, -0.2595, -0.5947, -0.1616, -0.1802, +0.1509, -0.3197, -0.0278, +0.2917, +0.2329, -0.0423, +0.1683, -0.2103, +0.1495, +0.0141],\n[ -0.0667, -0.0992, -0.1614, +0.0497, +0.1847, +0.0645, +0.1867, +0.1900, +0.0920, -0.4507, +0.0406, -0.5628, +0.1646, +0.0726, -0.0777, -0.0642, -0.7265, +0.1354, +0.2330, -0.3079, +0.1769, -0.1255, +0.2995, -0.1625, -0.6023, -0.1636, +0.2149, +0.0838, +0.1872, -0.0040, -0.2513, +0.1646, +0.4241, +0.3852, -0.2071, +0.2552, +0.5452, +0.2415, +0.1584, -0.3388, +0.0944, -0.0345, +0.1485, -0.2260, +0.1944, -0.3549, -0.0954, -0.3895, +0.0756, -0.1107, +0.3953, -0.2296, -0.6125, +0.2883, +0.5243, -0.1802, +0.1412, +0.0637, +0.2653, +0.1138, +0.0330, -0.8522, -0.4973, -0.1516, -0.1257, +0.0568, -0.1561, -0.9619, -0.3596, +0.0625, +0.0618, -0.3619, +0.0190, -0.0134, +0.0080, -0.1078, +0.5391, +0.0050, +0.0016, -0.2223, -0.0290, -0.2086, +0.1544, -0.4426, +0.2384, -1.1034, -0.0437, +0.1262, +0.2907, +0.1822, +0.2386, -0.3384, -0.0796, -0.0364, -0.2243, -0.3665, +0.2532, -0.0050, -0.5482, -0.1726, -0.0011, +0.2308, -0.1187, +0.4308, +0.3380, -0.5357, -0.3053, -0.1135, -0.4833, +0.2517, -0.3469, -0.1541, +0.2791, -0.6348, +0.3385, +0.0960, +0.0710, -0.0633, -0.0994, +0.3203, -0.7650, -0.3229, -0.2339, -0.1777, +0.1286, +0.3831, +0.2287, +0.2214],\n[ -0.2278, +0.0599, +0.1749, +0.0846, +0.2653, -0.2085, +0.2674, +0.0839, +0.1695, +0.0025, +0.1746, -0.6353, -1.5946, -0.0767, +0.1051, -1.0178, -0.4124, -0.8966, -0.4066, -0.4909, +0.1857, -0.3586, -0.0416, -0.0515, -0.5398, -0.4615, +0.0544, -0.2038, -0.6872, -0.1224, +0.1132, -0.0118, +0.5187, -0.3769, -0.3153, -0.6752, -0.0928, +0.5779, -0.3849, -0.2489, -0.2858, +0.0034, -1.1811, -0.5856, +0.0593, -0.7577, -0.4646, -0.0720, -0.0858, -0.6801, -0.3070, -0.2493, -0.3826, +0.1238, +0.0142, +0.1338, -0.2912, +0.1265, +0.1413, -0.0790, -0.4042, -0.1121, +0.0566, -0.5050, -0.1847, -0.0788, -0.7086, +0.4693, +0.0922, -0.5241, +0.0480, -0.6937, -0.3177, +0.3113, -0.2466, -0.0568, -0.3864, +0.0785, +0.1268, +0.3052, -0.4641, +0.0233, +0.3174, +0.4796, +0.2136, +0.2071, -0.3378, +0.1770, -0.3073, -0.4837, +0.0268, +0.3070, -0.7598, +0.1185, -0.9079, -0.0655, -0.8789, +0.1163, +0.0533, -0.4104, -1.1246, +0.3531, -0.2305, -0.0769, +0.2587, -0.2744, -0.1765, +0.1128, -0.3661, -0.2759, -0.3635, -0.0870, +0.2840, -0.1848, -0.7447, -0.1136, -0.5474, +0.1823, +0.3944, -0.8700, +0.2521, +0.2599, -0.1910, -0.1516, -0.0022, +0.1349, +0.0809, -0.0219],\n[ +0.1997, -0.1686, +0.1633, +0.3972, -0.2555, -0.4707, +0.0986, +0.2124, +0.2609, -0.0477, +0.0647, +0.2468, -0.4802, +0.2162, +0.1008, -0.3933, -0.7887, +0.1305, -0.3189, +0.1509, +0.1744, +0.0297, +0.1503, +0.3479, +0.4152, -0.0781, -0.1543, -0.3510, -0.4484, +0.2827, -0.3342, +0.2767, -0.1857, -0.0755, +0.0698, -0.0198, +0.3059, +0.1141, -0.1964, +0.0740, +0.1471, +0.0524, +0.3166, +0.4728, +0.2753, -0.2491, -0.3577, -0.0727, +0.1008, +0.2395, +0.1954, -0.2640, +0.1138, +0.2663, -0.0429, -0.1869, -0.1945, +0.2696, -0.0298, -0.4269, +0.1007, -0.0709, +0.1780, -0.0386, +0.1516, -0.2490, +0.0642, -0.1984, +0.1382, +0.2252, +0.2989, -0.0884, +0.5567, -0.1373, -0.0234, -0.1539, +0.2249, +0.0235, +0.0363, -0.0671, +0.5934, -0.3499, -0.4031, -0.0343, +0.0282, +0.2338, -0.1531, -0.0376, -0.3333, -0.0082, +0.3619, +0.0112, -0.2805, +0.1392, -0.2005, -0.4171, -0.0823, -0.1410, -0.0805, -0.1926, +0.4222, -0.0171, +0.5127, +0.2822, +0.2713, -0.5033, +0.1302, -0.1223, -0.2492, +0.0080, +0.3015, +0.2210, -0.1282, -0.2839, -0.2694, +0.0342, +0.0786, -0.0651, +0.4300, -0.3366, +0.1258, +0.0659, +0.1648, -0.3529, -0.1900, -0.0573, -0.1043, +0.3084],\n[ -0.3196, -0.3881, -0.2989, +0.0906, -0.1275, -0.0363, +0.1150, -0.0005, -0.3683, +0.2742, -0.0017, +0.0236, -0.4375, +0.2102, +0.3765, -0.4295, -0.1413, -0.1046, -0.4363, -0.2343, -1.3066, -1.2125, +0.2322, +0.0435, -0.2443, +0.2904, +0.3639, +0.0835, -0.0235, +0.1044, +0.0270, -0.2595, -0.1545, +0.4291, -0.6825, -0.1580, -0.3518, -0.4174, -0.3360, +0.1452, -0.3446, -0.0335, -0.2831, -0.0344, +0.0732, +0.0012, -0.7452, -0.2454, +0.0288, +0.0435, +0.2431, -0.1675, +0.3184, -0.0510, +0.2899, -0.5552, +0.0075, -0.7057, +0.0556, -0.1258, -0.9960, -0.9879, +0.2819, +0.0506, -0.4218, +0.0495, +0.1051, +0.0847, +0.1042, +0.1964, +0.0236, -0.4007, -0.2263, +0.1160, -0.4721, +0.0103, -0.1899, -0.0715, +0.2563, +0.4472, -0.0286, +0.0117, +0.1024, -0.0833, +0.2239, +0.0298, +0.0470, +0.0317, -0.2442, +0.1281, -0.1152, +0.2104, +0.1516, -0.3350, +0.1161, +0.0015, -0.2331, -0.1021, +0.1318, +0.0211, -0.0406, -0.3460, -0.0693, +0.3281, +0.2538, +0.0049, -1.5453, -0.5795, +0.1501, -0.0246, -0.1320, +0.1567, +0.1448, -0.5247, +0.0656, -0.1189, +0.2296, -0.2399, +0.1571, +0.1269, -0.2336, -0.2560, -0.2979, -0.0549, +0.2824, -0.0339, -0.4543, +0.0071],\n[ +0.2987, -0.2200, +0.0036, -0.7426, -0.0864, +0.0387, -0.2558, -0.3795, -0.2955, -0.2933, +0.2701, +0.1779, -0.0576, -0.4442, +0.1014, -0.1645, -0.1616, +0.0985, +0.4104, +0.5039, -0.1322, -0.2139, +0.2462, -0.3159, -0.2811, -0.2837, +0.4436, +0.3231, +0.1408, +0.1549, +0.1745, +0.1944, -0.2509, +0.4857, -0.9717, +0.0911, -0.1165, -0.1745, -0.2154, +0.5759, -0.3839, +0.4048, +0.1516, -0.1299, -0.2511, +0.1025, +0.0566, -0.2283, -0.3304, -0.2317, +0.4664, +0.1280, -0.2645, -0.3396, +0.2112, +0.1603, +0.3225, +0.3721, +0.3877, +0.2450, -0.0419, +0.0659, -0.3701, -0.7026, -0.3082, +0.0884, +0.1561, +0.3241, -0.1554, -0.3413, -0.1884, -0.9844, -0.5072, -0.0303, +0.1457, +0.3460, -0.5375, -0.2408, -0.1810, -0.5850, +0.0492, +0.1837, -0.1011, +0.4130, +0.0559, -0.0881, +0.3454, +0.2754, -0.7812, +0.1154, -0.3114, -0.2542, +0.0460, -0.0661, +0.1960, +0.1341, -0.1157, +0.1198, -0.2963, -0.3547, +0.3525, -0.3400, -0.1312, +0.0590, -0.0392, +0.4954, +0.0648, +0.3201, -0.2454, -0.2929, +0.0651, +0.5381, -0.3279, +0.2092, -0.1961, -0.1749, -0.5174, -0.1017, -0.1551, +0.1715, -0.0994, +0.1894, +0.0272, +0.2676, +0.1321, -0.0582, -0.1677, +0.2883],\n[ -0.0146, -0.0709, -0.0518, -0.0176, -0.2959, -0.1746, -0.0633, +0.6862, +0.2387, +0.0482, -0.2509, +0.1436, +0.1807, -0.3079, +0.0494, -0.3246, +0.3055, -0.0408, +0.0555, -0.3382, -0.0025, -0.6834, +0.3476, -0.0030, +0.1579, +0.0862, -0.1791, +0.2797, -0.1817, -0.5095, +0.1178, -0.0809, +0.0146, +0.0715, +0.2182, +0.1804, -0.0511, +0.2319, +0.3272, +0.1131, -0.8983, -0.2011, +0.1689, +0.1074, -0.2583, +0.1779, -0.3014, -0.6327, -0.6665, -0.1555, -0.4645, +0.3818, -0.1123, -0.0006, +0.0179, -0.2749, -0.2487, +0.2292, -0.4893, +0.0555, +0.0468, -0.6136, +0.5404, +0.0466, +0.4146, +0.1579, -0.2530, -0.1750, +0.1629, +0.3238, +0.5659, -0.0503, +0.0016, +0.2809, +0.2784, +0.1475, -0.6414, +0.0670, +0.2265, -0.1811, -0.3040, -0.0546, -0.2173, -0.2576, +0.0802, +0.2631, +0.0972, -0.4235, -0.2117, +0.1895, -0.0280, -0.5000, +0.0190, -0.4688, -0.1146, -0.1668, -0.3957, -0.4922, -0.4706, +0.0001, +0.0590, +0.2283, +0.2287, +0.1000, +0.0845, -0.0947, -0.0734, +0.0261, -0.6553, +0.0896, +0.2446, -0.0348, +0.0433, +0.4232, -0.0218, -0.3960, -0.5046, -0.2987, +0.0280, +0.1634, +0.2381, +0.3501, -0.3088, -0.1006, -0.1233, +0.2142, +0.2062, -0.1283],\n[ +0.4739, -0.1162, -0.1366, +0.3816, -0.0417, +0.1261, -0.2335, +0.3669, -0.2401, -0.9522, +0.2109, -0.0132, -0.1865, +0.4836, +0.1055, -0.2218, -0.1196, +0.0196, -0.0025, -0.1330, +0.2417, +0.1743, -0.1366, -0.0336, -0.6017, -0.1232, -0.0231, -0.5966, -0.2600, +0.2885, +0.1231, -0.4905, +0.1792, +0.3717, -0.3098, -0.1309, +0.0815, +0.2542, +0.4068, +0.0271, +0.5056, -0.1061, +0.0995, +0.1470, +0.0906, +0.0087, -0.0164, -0.0068, +0.1400, +0.0244, +0.1745, -0.5422, +0.1526, +0.2037, +0.3617, +0.0946, -0.0461, +0.3309, +0.1855, +0.1393, +0.1067, -0.1586, +0.4164, -0.3496, -0.0448, -0.1618, +0.1439, +0.1217, -0.1955, +0.1186, -0.4702, -0.0768, +0.1980, +0.0206, -0.1104, +0.1408, +0.2186, +0.2172, -0.0080, -0.5046, -0.2846, +0.2130, +0.3136, +0.1544, -0.1141, -0.2175, +0.1584, +0.3487, +0.0853, -0.3997, +0.1498, -0.7731, -0.3221, -0.1360, +0.1550, -0.3316, +0.2193, +0.1954, +0.1046, -0.4732, +0.0718, -0.0856, +0.1472, +0.1259, +0.4658, -0.9460, -0.3982, -0.2009, +0.0725, +0.0534, -0.1006, +0.1511, +0.1744, -0.0355, +0.0462, +0.0833, +0.1941, +0.2094, -0.0421, -0.3453, +0.0553, -0.3758, +0.4053, +0.1095, +0.3244, +0.2891, +0.0966, +0.1166],\n[ -0.0724, +0.1085, -0.4972, -0.0028, -0.0067, +0.0227, -0.0290, -0.2167, -0.6043, +0.0365, +0.4909, -0.1674, +0.2678, +0.0223, -0.3653, -0.0437, -0.1391, +0.0805, -0.1396, +0.2644, -0.1395, +0.0312, -0.3119, -0.0578, -0.3483, -0.0024, +0.0221, -0.0269, +0.0651, -0.4800, +0.3787, -0.7678, -0.2529, +0.1472, +0.2696, +0.0188, +0.0343, +0.1721, -0.2451, -0.1120, -0.2019, -0.2884, -0.6638, -0.1714, +0.2885, +0.2285, +0.0318, -0.3222, -0.3335, +0.2194, -0.3422, -0.4953, +0.2280, -0.5251, -0.1215, -0.6155, -0.1824, -0.5457, -0.2367, -0.3223, -0.0156, +0.1921, -0.3315, +0.0449, -0.4019, -0.2108, -0.1561, +0.4559, +0.0354, +0.1694, +0.1857, -0.1764, -0.1723, +0.2748, -0.4499, -0.2516, -0.2862, -0.7654, +0.1760, -0.3138, +0.1232, +0.1452, -0.4399, -0.4372, -0.5577, +0.4384, +0.1098, +0.2407, +0.3873, +0.2635, -0.4921, +0.0278, +0.0434, -1.1753, -0.5458, -0.7957, -0.5668, +0.1613, -0.4191, +0.1752, -0.0424, +0.0031, -0.1225, +0.3289, -0.2935, -0.1039, -0.1187, +0.0134, -0.1290, -0.0443, -0.4428, +0.1887, +0.3621, -0.5257, -0.1781, -0.4115, -0.2030, -0.0053, -0.3871, +0.1769, +0.0771, +0.1107, -0.5442, +0.1559, -0.2440, +0.0977, +0.0242, -0.3956],\n[ -0.0798, -0.1869, -0.4505, +0.2138, +0.1049, +0.3944, +0.1550, -0.0989, -0.2967, +0.0611, +0.3077, +0.1048, +0.0462, -0.2095, -0.1263, +0.3146, -0.2458, -0.2668, -0.2381, +0.0026, +0.1260, +0.4875, +0.0092, -0.0631, +0.1149, +0.0102, -0.2396, +0.0068, -0.0141, -0.3464, +0.1864, -0.2542, -0.2427, +0.2244, -0.8746, +0.2318, -0.0816, +0.0589, -0.9249, -0.0227, -0.4448, +0.0442, +0.2586, -0.0237, +0.0434, +0.3148, +0.1130, -0.1757, -0.2727, -0.0835, -0.8797, -0.1123, -0.2536, +0.0793, -0.0724, +0.0759, -0.8041, +0.0838, +0.0876, +0.2154, +0.0597, -0.4181, +0.5480, -0.0952, +0.3059, -0.1575, -0.0610, -0.1407, -0.1976, -0.1626, -0.5145, +0.4673, +0.1151, +0.2068, -0.0202, -0.5073, -0.2761, -0.0609, -0.0292, +0.2907, -0.0149, -0.2919, -0.0782, -0.0149, +0.2524, +0.0138, +0.5132, -0.0625, -0.0648, -0.1619, +0.0246, -0.4964, +0.4155, -0.1614, -0.0334, +0.2199, -0.4551, -1.1411, +0.4183, -0.5659, +0.4333, -0.1543, -0.3226, +0.1156, +0.2702, +0.1286, -0.1775, -0.1968, -0.0402, +0.4610, -1.0954, +0.0405, +0.2891, -0.0159, -0.1103, +0.1736, +0.0364, -0.3440, -0.4703, -0.0790, +0.2281, -0.0608, -0.0070, +0.1701, +0.2180, +0.2921, -0.1660, -0.0240],\n[ -0.1531, -0.0762, +0.2434, +0.2059, +0.0006, -0.0483, -0.0835, -0.1988, +0.1489, -0.3940, -0.0165, +0.2051, -0.3440, -0.4387, +0.1805, -0.0351, +0.0964, -0.2094, +0.0173, -0.1027, +0.0215, -0.5647, -0.2739, -0.1528, +0.4392, +0.0523, +0.1529, +0.0308, +0.0211, +0.1656, -0.0450, -0.1568, +0.0969, +0.1154, -0.3587, +0.2964, -0.0999, -0.3697, +0.1148, -0.0733, -0.5812, +0.1674, +0.0484, +0.2600, -0.0143, +0.0457, -0.0709, -0.0018, +0.0157, -0.0339, -0.2562, -0.0362, -0.8209, -0.1456, -0.2533, +0.1077, -0.0559, -0.4240, -0.0907, -0.3365, -1.1521, -0.1654, -0.1469, -0.3055, -0.2623, +0.2880, +0.1382, +0.1370, +0.2796, -0.0196, +0.4249, -1.0353, -0.1271, -0.4022, +0.0980, -0.0444, +0.1223, -0.2851, -0.2339, +0.6282, +0.2245, -0.2938, +0.1645, -0.0032, +0.2605, -0.5217, -0.1395, -0.3899, -0.1549, -0.0148, -0.3275, +0.3340, -0.0451, -0.4814, -0.8078, +0.0940, -1.2753, +0.1369, -0.2777, +0.1625, +0.1247, -0.1658, -0.3757, +0.3097, +0.1633, -0.3481, -0.1820, +0.2435, -0.3022, -0.3967, -0.5671, -0.2281, +0.0564, -0.0929, +0.0627, -0.2076, +0.1221, -0.0419, -0.0861, +0.0797, -0.0192, -0.2822, -0.1680, -0.3009, -0.3804, -0.4238, +0.0706, +0.1520],\n[ +0.0304, -0.2949, -0.1197, -0.2498, -0.1144, +0.2134, +0.0354, +0.4345, -0.1183, +0.2633, -0.0164, +0.0046, -0.2470, -0.0179, -0.0765, -1.0519, +0.4042, +0.2414, -0.3601, +0.2761, -0.2936, +0.2004, +0.0272, -0.3135, +0.5484, -0.2961, +0.1134, +0.1317, +0.2315, -0.5105, +0.2052, +0.3595, +0.1048, -0.6679, +0.3744, -0.0238, -0.3705, -0.5327, -0.4238, -0.3695, -0.3441, +0.1102, -0.2174, -0.2318, +0.3524, -0.8398, +0.3412, +0.6612, -0.2882, -0.1543, -0.4695, -0.5044, +0.0114, -0.2963, -0.1065, -0.4779, -0.0599, -0.0249, +0.2302, +0.2725, -0.2193, -0.4488, -0.0592, +0.3570, +0.3488, -0.3927, -0.8498, -0.5230, -0.2438, -0.3574, +0.1329, -0.0425, -0.1903, +0.0357, +0.0999, +0.2359, -0.0484, -0.1651, +0.0435, +0.3874, -0.1912, +0.3354, +0.0358, +0.0585, +0.1967, +0.0366, +0.0187, -0.0461, +0.0070, +0.1871, -0.5516, -0.0042, -1.0255, -0.3259, -0.0243, -0.5298, +0.2294, -0.5062, -0.1477, +0.1261, -0.0510, +0.1436, +0.0660, -0.5151, +0.2660, -0.5662, -0.1677, -0.8036, +0.2069, +0.1128, -1.1921, +0.2165, -0.2782, -0.0133, +0.3023, +0.4483, +0.3006, +0.1258, -0.3541, +0.4909, -0.5832, -0.2644, -0.5679, -0.0143, +0.4790, -0.5303, -0.0335, +0.1827],\n[ +0.1815, -0.0401, +0.5799, -0.1930, -0.1426, +0.2715, -0.2027, -0.4024, +0.1903, -0.2936, -0.1695, +0.1541, +0.0735, +0.1619, +0.2111, -0.2971, +0.1040, -0.2205, +0.0246, +0.3006, +0.0952, -0.0275, -0.0447, +0.1255, -0.0167, -0.3038, +0.0067, +0.0918, +0.4534, +0.3729, +0.2850, +0.3374, -0.4727, -0.1795, -0.1843, -0.7887, -0.1064, +0.3617, +0.4538, -0.4319, +0.5354, +0.2161, +0.0345, -0.0433, +0.3310, -0.3595, +0.2615, -0.0568, +0.0303, +0.3004, +0.0770, -0.2452, -0.6659, -0.5601, -0.2301, -0.0998, +0.0636, -0.1085, +0.1295, -0.0602, +0.0307, -0.0612, +0.1105, -0.0609, +0.1408, -0.2544, -0.6422, -0.0406, +0.2392, +0.0856, -0.2494, -0.0096, -0.0540, +0.0659, -0.1719, -0.1073, -0.2555, -0.2760, -0.4249, -0.0601, +0.2513, +0.1583, -0.1241, +0.1113, -0.5371, -0.0592, -0.3068, -0.5263, +0.1182, -0.1418, -0.6853, -0.5592, -0.8657, -0.0064, +0.1229, -0.6746, +0.0328, -0.5248, +0.0427, +0.0597, +0.0565, +0.1423, +0.0662, -0.1197, +0.0264, +0.4936, -1.0516, -0.4423, -0.1124, +0.0092, -0.2064, -0.8563, -0.3046, -0.4646, -0.0318, +0.1897, +0.3785, +0.4474, +0.1881, -0.2840, -0.1618, +0.0267, -0.0722, -0.1734, -0.2372, -1.1021, -0.7337, +0.1509],\n[ -0.6447, +0.3449, -0.9011, -0.2403, -0.0744, -0.1438, +0.2014, -0.3920, -0.2266, +0.0825, -0.3074, -0.4425, -0.0174, -0.3714, -1.7478, -0.1300, +0.1309, -0.2163, -0.6222, +0.5497, -0.1621, -0.6566, +0.0829, -0.3938, +0.2074, -0.0513, +0.2233, -0.6168, -0.0091, -0.1450, +0.3245, +0.1645, -0.2045, -0.1519, +0.8124, +0.1053, -0.2874, -0.5231, -0.2111, -0.2046, -0.3640, -1.6102, -0.0478, +0.2372, -0.3473, -0.2914, -0.2621, -0.0857, -0.4149, -0.5113, -1.0806, +0.1913, +0.0824, -0.1712, -0.3389, -0.9758, -0.2708, +0.1427, -1.3125, -0.7610, -1.1084, +0.0959, -0.3895, -0.0990, +0.2630, -0.2732, +0.5443, -0.6141, +0.0143, -0.2621, +0.5494, +0.5633, -0.0835, -1.4201, -0.0531, -0.0703, -0.5871, -0.1776, -0.6265, -0.1685, -0.5413, -1.0861, +0.2029, -0.2093, +0.5018, +0.3682, +0.0130, -0.4049, -0.2864, -0.5180, -0.3193, -0.2385, +0.1110, -0.1328, -0.9537, -0.4612, -0.8338, +0.2002, +0.5781, -0.5692, +0.0020, +0.4453, -0.5529, +0.0276, -0.3348, +0.6694, -0.0618, -0.0625, +0.1663, +0.3807, -0.7775, +0.0974, +0.1139, +0.1578, -0.4627, -0.4537, +0.2381, -0.0858, +0.0478, -0.2309, -0.3844, +0.1591, -0.2309, -0.0848, -0.4875, -0.2816, -0.1464, +0.3074],\n[ +0.0608, -0.1224, -0.6330, -0.1259, +0.4083, -0.1106, -0.0339, -0.3438, +0.4411, -0.1641, -0.0177, +0.1939, -0.3302, -0.3217, +0.1455, -0.0684, +0.0289, -0.3108, -0.3304, -0.3683, -0.4343, -0.2011, +0.0423, +0.2278, -0.4396, +0.0042, -0.4049, +0.4562, -0.2658, +0.5984, -0.1613, -0.0498, -0.1887, -0.1891, +0.2460, -0.0120, +0.3477, +0.3723, +0.1210, +0.9048, -0.6516, -0.4915, +0.5168, +0.0967, +0.0134, -0.4125, -0.1386, -0.4876, +0.0348, +0.2497, -0.2787, +0.0328, +0.2342, +0.4503, +0.4376, +0.3408, -0.2701, -0.0179, -0.3997, -0.4566, -0.6290, +0.0888, -0.0180, -0.4430, -0.2944, -0.1129, +0.1297, -0.2416, -0.1129, -0.0031, -0.1250, -0.0975, -0.3855, -0.2048, +0.3220, -0.2710, -0.3520, -0.3234, +0.0636, +0.4856, -0.1456, -0.0888, -1.0173, -0.3701, +0.0300, -0.4591, +0.1676, -0.1109, -0.0447, -0.0134, -0.1801, -0.2803, -0.5276, +0.0692, -0.3301, -0.5713, -0.4566, -0.0351, +0.3655, +0.0301, -0.3156, -0.0812, -0.3689, -0.1990, -0.3871, +0.6426, -0.0287, -0.1836, +0.3070, +0.2296, +0.0925, -0.5309, -0.2213, -0.1056, +0.2159, -0.0555, +0.5778, -0.1754, +0.2780, -0.2772, +0.0251, +0.3355, -0.2899, -0.0700, +0.1763, +0.2300, +0.1414, -0.4682],\n[ -0.2827, +0.2157, +0.1368, +0.0241, -0.1796, -0.2885, -0.3154, +0.2077, -0.0781, -0.3434, +0.1631, +0.2726, -0.0359, -0.1775, -1.2394, -0.2690, -0.5155, +0.0391, -0.0500, +0.1368, -0.0134, -0.1563, +0.1046, -0.1969, -0.2891, +0.0316, -0.2012, +0.3028, -0.0851, -0.3458, -0.7299, -0.2510, +0.4461, +0.2600, +0.3267, +0.0790, -0.1682, -0.1931, -0.0799, +0.2881, -0.5884, +0.0632, +0.3180, -0.3941, +0.1914, +0.5623, +0.3005, +0.0005, -0.0287, -0.2098, +0.0567, -0.6083, -0.5947, -0.5355, -0.4706, +0.4109, +0.1546, +0.0628, -0.5879, -0.5762, -0.4205, -0.1880, -0.6307, +0.3561, +0.3985, -0.5811, +0.3369, +0.2780, +0.0391, -0.3813, +0.0994, +0.3831, +0.1480, +0.0737, -0.2445, +0.0227, -0.0794, -0.0114, -0.5846, -0.3964, -0.0213, -0.4718, -0.8218, +0.0227, -0.1789, +0.1025, -0.2074, -0.0989, +0.4805, +0.1240, -0.2949, +0.2676, -0.0070, -0.1780, -0.1916, -0.1816, -0.3574, +0.0061, +0.1938, -0.0771, -0.1268, -0.3721, +0.0112, -0.4341, +0.0241, -0.1961, -0.0148, -0.1895, +0.2493, -0.1105, -0.0807, -0.0370, +0.4592, +0.6299, +0.1287, +0.3238, +0.1459, +0.3146, -0.0258, +0.4083, -0.1598, -0.1403, -0.4425, -0.0246, +0.1522, -0.1340, -0.0754, +0.0788],\n[ +0.1139, -0.1473, +0.0625, -0.2686, -0.1878, +0.1209, +0.1643, -0.6718, -0.2045, -0.2222, -0.8136, -0.0653, -0.2586, +0.0177, -0.6514, -0.4516, +0.0916, -0.3649, -0.2173, +0.2538, -0.5378, +0.2946, +0.3534, +0.3872, +0.1299, -0.0175, +0.3286, +0.0112, +0.1354, +0.4912, +0.4418, +0.3883, -0.0243, -0.0846, -0.3096, +0.1244, -0.4983, +0.0288, -0.8007, -0.1267, +0.0108, +0.1520, -0.2865, +0.4799, -0.3412, -0.2733, -0.4696, +0.6912, +0.0983, +0.0998, +0.2374, -0.1982, +0.2253, +0.1817, -0.2972, -1.1327, -0.0503, +0.0064, +0.3198, -0.3973, -1.0132, +0.0144, +0.1239, -0.3439, +0.3481, -0.1054, -0.1642, -0.3204, +0.2265, -0.6322, -0.6049, +0.0105, +0.2317, +0.0861, -0.5469, +0.3185, +0.3799, +0.2897, -0.1284, +0.1114, +0.0065, -0.2713, -0.1795, +0.2296, +0.2176, +0.0355, -0.1739, -0.2801, +0.1071, -0.3920, +0.1264, +0.1638, -0.6773, -0.2767, -0.4543, -0.1426, -0.0279, -0.0029, -0.2080, -0.3354, +0.3200, -0.2031, -0.0694, +0.2044, +0.1851, -0.0066, +0.3423, +0.3267, +0.2141, -0.1742, +0.3827, -0.0116, +0.2795, +0.3505, -0.6727, +0.0667, -0.1962, +0.1275, -0.3322, +0.1561, +0.2273, -0.7180, -0.4079, -0.6132, -0.8328, -0.0139, -0.0531, +0.0699],\n[ -0.2162, +0.3650, -0.0775, +0.1286, +0.2833, -0.2686, +0.2269, +0.1891, +0.5902, +0.0766, -0.5377, -0.4029, +0.3113, -1.0276, -0.5917, -0.0088, -0.0177, +0.3982, -0.8924, +0.0721, +0.0778, +0.4037, -0.1173, -0.1329, -0.2413, +0.1995, -0.2764, +0.2759, -0.0765, -0.0474, +0.4479, +0.0464, +0.0024, +0.2887, +0.2408, -0.2897, -0.5199, -0.3479, +0.0651, -0.0846, -0.5294, -0.3773, -0.3503, +0.0261, -0.0676, -0.6216, +0.3592, +0.1187, +0.1490, -0.1143, +0.1841, -0.0096, -0.5919, +0.3378, -0.8474, -0.9084, +0.0227, -0.7069, -0.0190, -0.9554, -0.5866, +0.2025, -0.2729, +0.1888, -0.4473, -0.1897, -0.1489, -0.0217, +0.1219, -0.3593, +0.0485, +0.2373, +0.2032, +0.5618, -0.2778, -0.4513, +0.1367, +0.2175, -0.0878, -0.2160, -0.0099, -0.1469, -0.2830, +0.3326, -0.3476, +0.0601, -0.2457, +0.2154, -0.4680, -0.3028, -0.0916, +0.1164, -0.0634, -0.4959, -0.2114, +0.2425, +0.0475, +0.2571, -0.0876, +0.0062, -0.4116, +0.4813, -0.1849, -0.0425, +0.0471, -0.4631, -0.7976, +0.2002, -0.1408, +0.2919, +0.0119, -0.5573, +0.3390, +0.4550, -0.3039, -0.1137, +0.2137, -0.0528, -0.6604, -0.0976, +0.3270, -0.0922, -0.2643, -0.4038, -0.3013, -0.1558, -0.1576, -0.3229],\n[ +0.1884, +0.1581, -0.1678, -0.1605, -0.0788, -0.2348, -0.2711, +0.0234, +0.1463, -0.4431, -0.4578, -0.1051, +0.1719, +0.0025, +0.2520, -0.1683, -0.5398, -0.5436, -0.5340, -0.3022, +0.0653, +0.2823, +0.3645, -0.2714, -0.0326, +0.2143, +0.1475, +0.3053, -0.4027, +0.1658, +0.2754, -0.0655, -0.5440, -0.1419, +0.0892, -0.2852, -0.4614, +0.0123, +0.2066, +0.4549, -0.3081, -0.7535, -0.2124, -0.4547, -0.3026, +0.3357, -0.6742, +0.1423, -0.3012, +0.0458, -0.9311, -0.7505, -0.1701, +0.2159, -0.1285, +0.2552, -0.4995, -0.0120, -0.1738, +0.1413, +0.0009, -0.6367, -0.2474, +0.3341, -0.3521, +0.1533, +0.2673, +0.0852, -0.2343, -0.0740, +0.1619, -0.0412, -0.1895, -0.1293, +0.0769, +0.0521, -0.1532, -0.2426, +0.1172, -1.5164, +0.0624, -0.3847, +0.1773, -0.1819, -0.6927, -0.3207, +0.1571, +0.1593, -0.1427, +0.3353, +0.5158, +0.0365, +0.0112, +0.3568, +0.1530, +0.1749, -0.7890, -0.1237, -0.3425, -0.5343, -0.1073, -0.0057, +0.4587, +0.0027, -0.0265, -0.3313, +0.0943, -0.2787, -0.3268, -0.3629, -0.1698, -0.0133, -0.0167, -0.5999, -1.1855, +0.2669, -0.1889, +0.2187, +0.3723, -0.5664, +0.0426, +0.0538, +0.6252, +0.0166, +0.2159, +0.1125, +0.4596, +0.0911]\n])\n\nweights_dense2_b = np.array([ +0.1576, -0.1010, +0.0138, +0.3265, +0.2288, +0.0764, +0.0671, -0.1334, +0.1474, +0.1138, +0.0768, +0.1304, +0.2356, +0.0043, +0.0230, +0.1405, -0.0490, +0.2157, +0.1432, +0.1021, +0.0087, +0.1419, +0.0887, +0.1315, +0.1993, +0.1391, +0.2385, +0.0697, +0.1521, +0.1446, +0.1235, +0.2313, +0.1099, +0.1952, +0.1449, +0.2652, +0.0128, +0.2281, +0.1987, +0.1900, +0.0668, +0.1516, +0.0543, +0.0443, +0.1702, -0.0012, +0.1350, -0.0182, +0.1091, +0.1445, +0.1419, -0.0253, -0.0059, +0.0150, +0.1739, +0.0979, +0.1677, +0.0557, +0.1365, +0.1161, +0.2681, +0.1524, +0.1655, +0.1234, +0.1366, +0.2115, +0.1751, +0.0743, +0.1009, +0.3255, +0.0978, +0.3483, +0.3591, -0.0855, +0.2378, -0.0121, +0.2396, +0.0827, +0.1282, +0.2056, +0.2524, +0.1829, +0.0006, +0.0432, +0.1458, +0.1960, +0.2966, +0.1491, +0.0097, +0.0552, +0.1618, +0.0341, +0.1177, +0.0545, +0.2253, +0.1174, -0.0110, +0.0619, +0.0641, +0.1169, +0.1377, -0.0361, +0.3361, +0.0248, -0.0144, +0.2120, +0.2515, +0.1977, +0.1501, +0.1138, +0.1061, +0.1561, -0.0356, +0.1378, +0.1289, +0.0579, +0.1030, +0.1400, +0.1058, +0.1320, -0.0224, +0.0156, +0.0664, -0.0583, +0.1267, +0.3404, +0.2669, +0.0034])\n\nweights_final_w = np.array([\n[ -0.0500, +0.0520, -0.1401, +0.2030, -0.3231, +0.0144, -0.1040, +0.2029, -0.1153, -0.1412, -0.0326, +0.0192, +0.0153, +0.1157, -0.2278, +0.0663, -0.0588],\n[ -0.1934, +0.1167, +0.0131, +0.0005, -0.0056, +0.2003, +0.0647, +0.0407, +0.1506, +0.0205, -0.2231, -0.1559, -0.2919, +0.0398, +0.0346, +0.0151, -0.0333],\n[ -0.5131, +0.0469, -0.0589, -0.2056, -0.0316, +0.1800, -0.1217, -0.1480, +0.1785, +0.0261, -0.1138, -0.1974, +0.2588, -0.0282, -0.0632, +0.2111, -0.2802],\n[ +0.0465, -0.2210, +0.1704, -0.4249, -0.0303, -0.1091, +0.0872, -0.1461, -0.0034, +0.2490, -0.2206, -0.0736, +0.0300, -0.0700, -0.1995, +0.3273, -0.1255],\n[ +0.0426, +0.3676, -0.1660, -0.0894, +0.0383, -0.0723, +0.0043, -0.0986, -0.0594, -0.0239, -0.0395, +0.0166, +0.1111, -0.0135, +0.0101, -0.1226, +0.0468],\n[ -0.1512, -0.0761, +0.2163, +0.0025, +0.1200, +0.1947, -0.2594, -0.1264, +0.0708, -0.0100, +0.2345, +0.1272, +0.0181, +0.0598, +0.0196, +0.0544, +0.0077],\n[ -0.0479, +0.0877, -0.1959, +0.2171, +0.1391, -0.0882, +0.1955, -0.0043, +0.0778, -0.0865, +0.0036, +0.0395, -0.0664, -0.0722, -0.0999, +0.0403, +0.0459],\n[ -0.1554, -0.0621, +0.0832, +0.0046, -0.1666, -0.0623, -0.0754, -0.1921, -0.0802, +0.1742, -0.3199, +0.2225, -0.0935, -0.0165, -0.1373, -0.1331, +0.1116],\n[ +0.1641, +0.0611, -0.0527, -0.0480, +0.1270, -0.0116, -0.1430, -0.2651, +0.0744, +0.1591, -0.0271, -0.1597, +0.0145, -0.0970, -0.0396, +0.0642, -0.1355],\n[ -0.1786, +0.1061, -0.0170, +0.2049, -0.0101, -0.0016, +0.1681, +0.2846, +0.0429, +0.0779, -0.0644, +0.2227, +0.0480, -0.0613, -0.1086, +0.0909, +0.0695],\n[ -0.1132, -0.0676, +0.0076, +0.1158, -0.0424, -0.0585, +0.0658, -0.1111, -0.0105, +0.1243, -0.1272, -0.1166, -0.1396, -0.0158, +0.2221, -0.5519, +0.0056],\n[ -0.2191, +0.0914, -0.2542, -0.0066, -0.1819, -0.0521, +0.0304, +0.0955, +0.0114, +0.1190, +0.1209, -0.1486, -0.2386, +0.0012, +0.0523, -0.0173, -0.0888],\n[ +0.1867, +0.1779, +0.1244, +0.0109, +0.0969, +0.0768, -0.3619, -0.0457, +0.1208, -0.0982, +0.0409, -0.1705, -0.0508, -0.0835, +0.1529, -0.0571, -0.1034],\n[ -0.1137, -0.2575, +0.0101, -0.4039, -0.1796, -0.2636, +0.1259, +0.0388, +0.1942, -0.0287, +0.2606, -0.0823, -0.0069, +0.1304, +0.4204, +0.0883, -0.1248],\n[ -0.2387, +0.1436, -0.1701, +0.2502, +0.0021, +0.0202, +0.1469, -0.1469, +0.1600, -0.0124, +0.0876, -0.1535, +0.4238, -0.0717, +0.3870, +0.4299, -0.1141],\n[ +0.2502, +0.1630, +0.0301, -0.1261, +0.1486, -0.1277, -0.0184, -0.1144, -0.0368, +0.1179, -0.1200, -0.1448, -0.1146, -0.0880, +0.3904, -0.0074, +0.1483],\n[ -0.2122, +0.1499, -0.0626, -0.0072, +0.0174, -0.1730, +0.0620, -0.1867, +0.0511, -0.0842, -0.2439, -0.0841, -0.0805, +0.1795, -0.0691, -0.2461, +0.1777],\n[ +0.0065, +0.2415, +0.1155, -0.2030, +0.0613, +0.2977, -0.0146, +0.0042, +0.1047, -0.1538, -0.0089, -0.1895, +0.0561, -0.0879, +0.0143, -0.0559, -0.0890],\n[ -0.1464, -0.0184, +0.1731, +0.0010, +0.0459, -0.0302, -0.0838, -0.1482, +0.1891, -0.0848, +0.0385, -0.0858, -0.3817, -0.0695, -0.2484, -0.3315, -0.0779],\n[ -0.1571, -0.1686, -0.0109, -0.0342, -0.0847, -0.1510, -0.0678, -0.0963, +0.0833, +0.1933, +0.3233, -0.0954, +0.0170, -0.1014, +0.0558, -0.1237, -0.0068],\n[ +0.1069, -0.2466, -0.1261, +0.2319, -0.0456, +0.0744, -0.0796, +0.0739, +0.1059, +0.0330, -0.1405, +0.1594, -0.2326, -0.1312, +0.1011, +0.0532, -0.0480],\n[ -0.3261, -0.3612, +0.4744, +0.1766, -0.2084, +0.0832, +0.0138, -0.1178, -0.1055, -0.0774, +0.1118, +0.1659, +0.2805, -0.2790, -0.4162, -0.1110, -0.0465],\n[ -0.0609, +0.0663, -0.1358, +0.2174, +0.1067, +0.0559, -0.0523, -0.0194, +0.2576, +0.0302, -0.0394, -0.0745, -0.0917, -0.0268, -0.0366, +0.0958, +0.0342],\n[ +0.0260, -0.1912, -0.2794, -0.2077, +0.0565, +0.0009, -0.0691, -0.0750, -0.0582, +0.0129, -0.0717, -0.1249, -0.0442, +0.0543, -0.0696, +0.0801, +0.0080],\n[ +0.0147, +0.1115, -0.2710, +0.0096, -0.1212, +0.1514, -0.0183, -0.0714, +0.2782, -0.1610, +0.1000, +0.0025, +0.0322, +0.0412, -0.1214, +0.1739, -0.0345],\n[ -0.1663, -0.0534, -0.0607, +0.2529, +0.0435, +0.0024, -0.1550, +0.2743, +0.0510, +0.0220, +0.1415, -0.1839, +0.1584, +0.0849, +0.0793, +0.3077, -0.0912],\n[ +0.1709, +0.0804, +0.0473, +0.1288, +0.0029, +0.1389, +0.2928, +0.0363, -0.0596, -0.0483, +0.3041, -0.0522, -0.0675, +0.0609, -0.0559, -0.0244, +0.0003],\n[ -0.1667, +0.1194, +0.1725, +0.0877, -0.1228, -0.0480, -0.0474, -0.0550, +0.1146, +0.0949, +0.0477, -0.4471, -0.3624, -0.0280, -0.0140, -0.1358, -0.0709],\n[ +0.0310, -0.0247, +0.1299, -0.0083, +0.1510, -0.0586, -0.0757, +0.0686, +0.2878, -0.0194, -0.0216, +0.0047, +0.1680, +0.0598, -0.2225, -0.0494, -0.0765],\n[ +0.1092, -0.3239, +0.1943, -0.0899, -0.0506, +0.1235, +0.0486, +0.0174, +0.0141, -0.0921, +0.1887, +0.0339, +0.1610, -0.0140, -0.1033, +0.1223, -0.0155],\n[ -0.0277, +0.0191, +0.0459, +0.0300, +0.1183, -0.0972, +0.3191, +0.1737, +0.1869, +0.0644, +0.0507, -0.0121, -0.0506, +0.0448, -0.1096, -0.0081, -0.0789],\n[ -0.2677, +0.1532, +0.0895, -0.0466, -0.0054, +0.0569, +0.0022, -0.3044, +0.1862, -0.2223, -0.0157, +0.0559, +0.0186, -0.0800, -0.1146, +0.3398, -0.1068],\n[ +0.0242, -0.0935, +0.0429, -0.0115, +0.2126, +0.0859, +0.0020, -0.1650, +0.1600, -0.0348, -0.1426, -0.0212, +0.0662, +0.0819, -0.2960, -0.0660, -0.0242],\n[ -0.0361, -0.0494, +0.0313, +0.1553, +0.0623, -0.1620, +0.2842, +0.0104, -0.1632, +0.1637, +0.2602, -0.2550, -0.0172, -0.0868, -0.1758, +0.0689, +0.0731],\n[ +0.0089, +0.3266, -0.2115, +0.1473, +0.0423, +0.0773, +0.1420, +0.2675, -0.0000, +0.1361, -0.4133, +0.0742, +0.0148, +0.2438, -0.0297, +0.0726, +0.0325],\n[ -0.1394, +0.0096, -0.1679, +0.1894, +0.0730, +0.0572, -0.2028, +0.0843, -0.0058, -0.1014, +0.1447, -0.0080, -0.1056, -0.0686, +0.0299, +0.0535, +0.0238],\n[ -0.0404, +0.0493, -0.0139, +0.0949, +0.0198, +0.1132, -0.0484, +0.0609, +0.2326, +0.2329, -0.3538, +0.0420, +0.0020, +0.1391, +0.0121, -0.1743, +0.0309],\n[ -0.0571, -0.0159, -0.3700, +0.0511, +0.3797, -0.1085, +0.0214, +0.1002, -0.0533, -0.0098, -0.1695, +0.0261, -0.0258, -0.0383, -0.0943, -0.0375, -0.0874],\n[ -0.2690, +0.3889, -0.1825, -0.2502, +0.3067, +0.2693, -0.0415, -0.2989, -0.0810, -0.2701, -0.0876, +0.1370, +0.0814, +0.1991, +0.1026, -0.3468, +0.0540],\n[ +0.0901, +0.1083, +0.0109, +0.3720, -0.1897, -0.1353, -0.0002, -0.0120, -0.1493, -0.0239, +0.0070, -0.2525, -0.2358, -0.0631, +0.1656, +0.1604, +0.0500],\n[ +0.3311, -0.0797, +0.0255, -0.1776, +0.0926, +0.1546, +0.0071, -0.0737, -0.2305, +0.0552, -0.0238, +0.2687, -0.3520, -0.1743, +0.0424, -0.0234, -0.1280],\n[ -0.1053, -0.0652, +0.2033, +0.1009, +0.1072, +0.2026, -0.0444, +0.0543, -0.0881, -0.4387, +0.0020, -0.0239, -0.2309, -0.0730, +0.1919, -0.0019, -0.1239],\n[ +0.1863, -0.1390, -0.0368, +0.1796, -0.1910, +0.0543, -0.3425, +0.1458, +0.0116, +0.1308, +0.1313, +0.1098, -0.0819, +0.0199, -0.1105, -0.0217, +0.0668],\n[ -0.1977, -0.2812, -0.0967, +0.2061, -0.0280, -0.1759, +0.0672, +0.1179, +0.0423, +0.1185, +0.0052, +0.1873, -0.0213, +0.0987, +0.1273, -0.0343, +0.0754],\n[ -0.1596, +0.0493, +0.2797, +0.0854, +0.1617, +0.0723, +0.1626, +0.1054, -0.0487, +0.0097, -0.0439, -0.1232, +0.1922, -0.0277, +0.0119, +0.1730, +0.1012],\n[ +0.0703, +0.1970, +0.0967, +0.0519, -0.0242, -0.0490, +0.1296, +0.1286, +0.2053, -0.0337, +0.0747, +0.2130, -0.0591, +0.0786, +0.1569, -0.1778, +0.0426],\n[ +0.1670, -0.0426, +0.0293, +0.0224, +0.0965, +0.2378, +0.1234, -0.0270, +0.0717, +0.0657, +0.1491, +0.1309, -0.0047, +0.1014, -0.0419, -0.3916, +0.1701],\n[ -0.1341, -0.0447, -0.0123, -0.0296, -0.5153, -0.0471, +0.0095, -0.0265, -0.0487, -0.1160, -0.1601, +0.2510, +0.2775, -0.0465, +0.1130, -0.0542, +0.1162],\n[ -0.0102, -0.1284, +0.1419, -0.0311, +0.0061, -0.1030, +0.0771, -0.1695, -0.0464, +0.1868, -0.2451, -0.2036, -0.0334, +0.0034, +0.0619, +0.1707, -0.0432],\n[ +0.1202, +0.0824, -0.0883, -0.0337, +0.2071, -0.3426, -0.1190, +0.1154, +0.0478, +0.3089, +0.2514, -0.0086, -0.0398, +0.0412, +0.0666, +0.0843, -0.0302],\n[ -0.0201, +0.0295, -0.0884, -0.0905, +0.1772, +0.0316, +0.0128, -0.5102, +0.2854, +0.0869, -0.0243, -0.2130, +0.1952, +0.1135, -0.1489, -0.1426, -0.1163],\n[ +0.0725, +0.2234, -0.1540, -0.2566, -0.0715, +0.1212, -0.1259, -0.0770, +0.0403, +0.0321, +0.1270, +0.3933, -0.2743, +0.1460, +0.0257, +0.0532, +0.0935],\n[ +0.0539, -0.0206, +0.1057, -0.0783, +0.0933, +0.1618, -0.0672, -0.0393, +0.0656, +0.0044, +0.2199, -0.1817, -0.0652, -0.1396, +0.3507, -0.1025, +0.0075],\n[ -0.2804, -0.1096, +0.0919, -0.1097, -0.0635, +0.0266, +0.1040, -0.2046, +0.1255, +0.1206, -0.0410, -0.0632, -0.0094, -0.0336, +0.0988, +0.2421, +0.0582],\n[ +0.0741, -0.0379, -0.1480, -0.0401, -0.0145, +0.0121, -0.0270, +0.1328, +0.0768, +0.1763, +0.0653, +0.2162, +0.1625, -0.0759, -0.2528, -0.0400, -0.0609],\n[ -0.0612, -0.1351, +0.1460, +0.0407, -0.1009, +0.1307, -0.2049, -0.2043, +0.1486, +0.0571, +0.0194, +0.1111, +0.0900, -0.1686, +0.2435, +0.0025, -0.0025],\n[ +0.1537, -0.0991, +0.0023, +0.0866, +0.1233, +0.0192, +0.0099, +0.0112, -0.1178, -0.0302, +0.1665, -0.1938, +0.0973, +0.0237, -0.0594, -0.2662, +0.0543],\n[ -0.0866, +0.0235, -0.4950, -0.2234, -0.3568, +0.1504, -0.3272, -0.2139, -0.0608, -0.2261, +0.3710, -0.0093, +0.2967, +0.4035, -0.1705, -0.1567, -0.1671],\n[ +0.2094, +0.2050, -0.2027, -0.3216, -0.0898, -0.3793, +0.1276, -0.2629, -0.2257, -0.0036, -0.1710, +0.0087, -0.1903, +0.0476, -0.4685, -0.0350, +0.0185],\n[ +0.0761, -0.1583, -0.2297, -0.1768, -0.0385, -0.0560, -0.2828, +0.1289, +0.2082, -0.0280, +0.2047, -0.0699, -0.0760, +0.0177, +0.0795, -0.2414, -0.0863],\n[ +0.0051, -0.0159, +0.3049, -0.1012, +0.2151, +0.0903, -0.0857, +0.2459, -0.0574, -0.1813, -0.0363, +0.2777, +0.1517, -0.2497, -0.1926, -0.2714, -0.1205],\n[ -0.0313, -0.1017, +0.1731, +0.1876, +0.1274, -0.0242, -0.0131, +0.2359, -0.0604, -0.0337, +0.2364, -0.1355, -0.0705, +0.3669, -0.1421, +0.1863, +0.0562],\n[ -0.0262, -0.0691, +0.0511, +0.2557, -0.0593, +0.0052, +0.2013, +0.2306, -0.1830, +0.1198, +0.1838, +0.5372, +0.1109, +0.0263, -0.1590, +0.1215, -0.0652],\n[ -0.0851, +0.2184, +0.0468, +0.0364, +0.0370, -0.0666, +0.0343, -0.1203, +0.0828, +0.1937, -0.1424, +0.0270, +0.4887, +0.0595, +0.3850, -0.1007, -0.1618],\n[ -0.0647, -0.0491, -0.1482, -0.0725, +0.1533, -0.0067, -0.0163, -0.2621, +0.4495, -0.0230, -0.1823, -0.2028, +0.0139, +0.0228, -0.0210, +0.1549, +0.0272],\n[ +0.0583, +0.1002, -0.2269, -0.1197, +0.2894, -0.0435, +0.1308, -0.0852, +0.3371, +0.1352, -0.0481, +0.4537, +0.2147, -0.0701, +0.1391, -0.0899, -0.0713],\n[ -0.1840, -0.3356, -0.0079, +0.1192, -0.3029, -0.0518, -0.2026, +0.3101, -0.2164, -0.1004, +0.0849, -0.2748, +0.3760, -0.0042, +0.3993, +0.4109, -0.0759],\n[ -0.0964, -0.1880, -0.0638, -0.0073, +0.0270, -0.1475, +0.1412, +0.0414, -0.1057, -0.1715, +0.1548, -0.0673, +0.1397, -0.0826, +0.1504, -0.0542, +0.0261],\n[ -0.0436, +0.0176, -0.2640, -0.0555, +0.0581, +0.0930, +0.1484, +0.1862, +0.0059, -0.0723, +0.0217, -0.0897, +0.1173, +0.0441, +0.2001, +0.2820, +0.0056],\n[ +0.0250, -0.0386, -0.0329, +0.0484, +0.0093, +0.0156, +0.0280, -0.0594, -0.1825, +0.1148, -0.1595, -0.0615, -0.0671, +0.1472, +0.1153, +0.0946, -0.1374],\n[ +0.1758, +0.1463, -0.0741, -0.2125, +0.1636, -0.0032, -0.0480, +0.2846, -0.1297, +0.0617, -0.0237, +0.0512, +0.1992, +0.1058, +0.3484, +0.0708, +0.1671],\n[ +0.2952, -0.0091, -0.0190, -0.0003, +0.1044, -0.0487, +0.3075, +0.0950, -0.1509, +0.1373, -0.2803, +0.1419, +0.2875, +0.0242, +0.0065, -0.0936, +0.0162],\n[ +0.1659, +0.1577, +0.1013, -0.1512, +0.1424, +0.2053, +0.1997, +0.1179, -0.1882, -0.2216, -0.2507, +0.1068, -0.1726, +0.0142, +0.2472, +0.3217, -0.0999],\n[ +0.0972, +0.1773, +0.0982, +0.0548, -0.0945, +0.1250, +0.0022, -0.0743, -0.3627, -0.0801, +0.1161, +0.2369, +0.1528, -0.0622, -0.0397, -0.1468, -0.1702],\n[ +0.1908, +0.0306, +0.0756, -0.0095, -0.1140, +0.2149, -0.2536, +0.0192, -0.1244, -0.0219, -0.0060, +0.0286, +0.1742, +0.0430, +0.0622, +0.0760, +0.0211],\n[ -0.2068, -0.0279, +0.1215, +0.1463, -0.0032, -0.0667, -0.0069, +0.0322, -0.1577, +0.0497, +0.0793, +0.0504, +0.2050, +0.0459, +0.0586, -0.0467, +0.0756],\n[ -0.0050, -0.1217, +0.1720, -0.0497, +0.2169, +0.0608, -0.0893, +0.2269, -0.1392, +0.0636, -0.0461, +0.0243, -0.3206, +0.1114, -0.2682, -0.1793, +0.0532],\n[ -0.0919, -0.1976, +0.0794, +0.0269, +0.1282, -0.0851, +0.0248, +0.1133, +0.0308, -0.1318, -0.2297, -0.0558, -0.2465, -0.0534, -0.0406, +0.1037, +0.2015],\n[ -0.1945, +0.1965, +0.0724, -0.1509, +0.1594, -0.2093, +0.0466, -0.1090, +0.0338, -0.0269, -0.0978, +0.2136, -0.3288, -0.0004, -0.0335, +0.0991, -0.1539],\n[ +0.3684, +0.0830, +0.1588, +0.3764, +0.1412, -0.1288, -0.1443, +0.0388, +0.0819, +0.1563, -0.0173, +0.0067, +0.2200, +0.1175, +0.0211, +0.0056, +0.0099],\n[ +0.1937, -0.1604, -0.2249, -0.1253, -0.2346, -0.6112, -0.3301, +0.0008, +0.0128, +0.2471, +0.3018, -0.1123, -0.0536, -0.0060, -0.2138, -0.0642, -0.0224],\n[ -0.0750, +0.0249, +0.2450, -0.1484, -0.0077, -0.1160, +0.1432, +0.0341, -0.0626, -0.0306, +0.0240, -0.1328, +0.2852, +0.1698, -0.1014, -0.1040, -0.0586],\n[ -0.1895, +0.0789, -0.1464, +0.1532, +0.0039, -0.1385, +0.1615, +0.1964, +0.2573, +0.1177, +0.0378, +0.2443, +0.3998, -0.0943, -0.4698, +0.0102, +0.2408],\n[ -0.0270, -0.0811, -0.0578, +0.2961, +0.0481, +0.0643, -0.0719, +0.0192, +0.0965, -0.1667, +0.0554, -0.0081, -0.0330, +0.1145, +0.1502, -0.2212, -0.0304],\n[ +0.0547, -0.0325, +0.0254, -0.0844, +0.4001, -0.0142, +0.0167, -0.0836, +0.0950, -0.1405, +0.0599, +0.1493, +0.0871, +0.1385, -0.0614, -0.0024, -0.0119],\n[ -0.1367, +0.1318, -0.1068, +0.0860, -0.3123, +0.1001, +0.3291, -0.2463, -0.0354, -0.1765, +0.1861, +0.0849, +0.2081, +0.1625, -0.1385, -0.0860, -0.0052],\n[ -0.0067, +0.2718, -0.0711, +0.0202, +0.0372, -0.1661, +0.0658, +0.0383, -0.1041, -0.1056, +0.2210, +0.0247, -0.0011, -0.1070, -0.0103, +0.2565, -0.0143],\n[ -0.0009, +0.2050, +0.2110, -0.1294, -0.0535, +0.0716, -0.0756, +0.1513, -0.0375, -0.0890, +0.1466, +0.2619, -0.0011, -0.1757, +0.1141, +0.0407, -0.0847],\n[ -0.0193, -0.1029, +0.1858, +0.0139, +0.2608, -0.0146, +0.0154, +0.0489, -0.1093, +0.0104, -0.2219, +0.0648, -0.0656, -0.0877, +0.0203, -0.0899, -0.0903],\n[ -0.0886, +0.1405, -0.3229, +0.1365, -0.0121, +0.0062, -0.0392, -0.0051, +0.1538, +0.2586, -0.0580, +0.1502, -0.0022, -0.0271, -0.0936, -0.4547, +0.0754],\n[ +0.3475, +0.0164, -0.2164, +0.0795, -0.0293, +0.1575, +0.0395, -0.4194, +0.0051, -0.0399, +0.1034, +0.3149, -0.0858, -0.1606, -0.0197, -0.0434, -0.0950],\n[ +0.1236, -0.0872, -0.2009, +0.3143, +0.1047, +0.0123, -0.1373, -0.0199, +0.0415, -0.0361, +0.4936, +0.1017, +0.0232, -0.2103, -0.0957, -0.1162, +0.0018],\n[ -0.0303, +0.2271, +0.2671, -0.1139, +0.0505, +0.0459, +0.0181, -0.0978, +0.0442, +0.0428, -0.0411, +0.1537, -0.2310, -0.0133, +0.0393, +0.2811, +0.0302],\n[ +0.0180, -0.2184, -0.0850, -0.1738, -0.1618, +0.0298, -0.0198, +0.3145, -0.1872, -0.0837, -0.0753, -0.1052, -0.0610, -0.1089, -0.0288, +0.0058, -0.0733],\n[ +0.0005, +0.1507, +0.1555, +0.0516, -0.1457, -0.1738, -0.0570, +0.0876, +0.2975, +0.1416, +0.0454, -0.3029, +0.0148, -0.2664, +0.0913, +0.0374, +0.1604],\n[ +0.0099, +0.1211, +0.3735, +0.2346, +0.0826, -0.0476, -0.0548, -0.0957, +0.0246, -0.0414, +0.0233, -0.0779, +0.0275, -0.1103, +0.1439, -0.1177, +0.0950],\n[ +0.0991, -0.0663, -0.0226, -0.2599, -0.2676, -0.2060, -0.0898, +0.0218, -0.0629, +0.3991, -0.0857, -0.0919, -0.0442, +0.0097, -0.0634, -0.1714, +0.0120],\n[ -0.0083, -0.2737, -0.0861, -0.1251, +0.0406, +0.0368, -0.1133, +0.1224, -0.1233, +0.0078, +0.1233, +0.0664, -0.2030, +0.0159, -0.4872, +0.0998, -0.1442],\n[ -0.0557, +0.0044, +0.0650, -0.3940, +0.0112, +0.1438, +0.0244, -0.0566, -0.1197, +0.0153, -0.0472, -0.1162, -0.1096, -0.0067, +0.0748, -0.1948, +0.0299],\n[ -0.1223, -0.1968, +0.0289, -0.1279, +0.2201, -0.2378, +0.0276, +0.2640, +0.0479, +0.0962, -0.0191, -0.2616, -0.1661, -0.1301, +0.1400, +0.0645, -0.1336],\n[ +0.0914, +0.0268, +0.0498, +0.2482, -0.1405, -0.1721, +0.0066, +0.0427, +0.1062, -0.0699, +0.0238, +0.1893, +0.0670, -0.0401, -0.0636, +0.0611, -0.0596],\n[ -0.1982, -0.2318, +0.1808, +0.0593, +0.1440, +0.3635, +0.0442, -0.1524, +0.0650, +0.0216, -0.0772, -0.1202, +0.2329, +0.0204, -0.0736, +0.2445, +0.0261],\n[ -0.3588, +0.0298, -0.0034, -0.0458, +0.2777, +0.2485, +0.3801, -0.0695, -0.1679, -0.5362, -0.1887, +0.0389, +0.2512, -0.0413, +0.0365, -0.0915, +0.0519],\n[ -0.0069, +0.1737, +0.1234, +0.0086, +0.0216, +0.1098, -0.0172, +0.0209, -0.1796, +0.1730, -0.0681, +0.1485, +0.1096, -0.0660, -0.0895, +0.1351, -0.0554],\n[ -0.2458, +0.0149, +0.0155, -0.0099, +0.0206, +0.3378, +0.2285, +0.0385, -0.0532, +0.2575, -0.0545, +0.0151, +0.0540, -0.0833, +0.0946, +0.0446, -0.0862],\n[ -0.1136, +0.1565, +0.1244, -0.0161, -0.0614, +0.1204, -0.0771, +0.2362, +0.0355, +0.2028, -0.0155, -0.0652, -0.2262, -0.0749, -0.1730, -0.0957, +0.2760],\n[ +0.0634, -0.0011, -0.1770, -0.1426, +0.1599, +0.0892, -0.0261, +0.0790, +0.0812, +0.4281, +0.0143, +0.1554, +0.3770, -0.0206, -0.1564, +0.0818, -0.0514],\n[ +0.0102, -0.1232, -0.2659, -0.1866, +0.0090, +0.2672, -0.0540, +0.1100, -0.2942, -0.2454, -0.0566, -0.2273, +0.1938, +0.0229, -0.1563, -0.0024, -0.0975],\n[ -0.0645, -0.0004, +0.1548, +0.2129, -0.0429, -0.0628, +0.0306, +0.1769, -0.0491, -0.0285, -0.0621, -0.4293, +0.1821, +0.1154, -0.0027, +0.0264, -0.0107],\n[ +0.3158, -0.0591, +0.0461, +0.0682, +0.0015, -0.0667, +0.1125, +0.0242, -0.0998, +0.0405, -0.1104, +0.0892, -0.1950, -0.0443, -0.1472, +0.1181, -0.0764],\n[ -0.0697, -0.0853, +0.1715, +0.0623, +0.0484, -0.0205, -0.2043, -0.0463, -0.1115, +0.0058, -0.0556, -0.0808, +0.0454, -0.0959, +0.1666, +0.3646, +0.2677],\n[ -0.1574, -0.2403, -0.1285, -0.0211, -0.2054, -0.0027, +0.1099, -0.1112, -0.2730, -0.1247, +0.1788, -0.1336, -0.1028, +0.1764, +0.2594, -0.0231, +0.1732],\n[ +0.2124, +0.1167, -0.0895, -0.0324, +0.2043, +0.0680, -0.0162, -0.0062, -0.0897, +0.0854, +0.0748, +0.3693, -0.0414, +0.0972, +0.0094, +0.0354, +0.1289],\n[ +0.2460, -0.1170, -0.1432, -0.0896, +0.0093, +0.0557, -0.0195, +0.0088, +0.0839, +0.0375, +0.1659, -0.0451, -0.1184, -0.2017, +0.0481, -0.1517, +0.1372],\n[ -0.0602, +0.3316, -0.0452, -0.0173, +0.0148, +0.0459, +0.0167, -0.0842, +0.0428, +0.2873, -0.0527, +0.0752, -0.0441, +0.0583, +0.0443, +0.1112, -0.0272],\n[ +0.0051, -0.0555, +0.1717, +0.1398, -0.1614, -0.0300, -0.1820, -0.0658, +0.0742, -0.0847, -0.1139, +0.0728, +0.1420, +0.2099, -0.2902, +0.1338, +0.1417],\n[ -0.0094, -0.0020, -0.1126, -0.2572, -0.2766, +0.0049, +0.1369, +0.0054, -0.1328, +0.0361, +0.1361, +0.3090, +0.0047, +0.1695, +0.0896, -0.1901, -0.1786],\n[ +0.1339, -0.0886, +0.0770, +0.0189, -0.1017, -0.0070, +0.1551, -0.0489, +0.0966, -0.0131, -0.0856, -0.1183, +0.1585, -0.0139, +0.1861, -0.1487, +0.0005],\n[ +0.2260, +0.0098, -0.3146, -0.0441, +0.1948, +0.0520, +0.0863, -0.0021, -0.1183, +0.0328, +0.0124, +0.0056, +0.0944, -0.0319, +0.0679, +0.0774, +0.0409],\n[ -0.0237, -0.3453, -0.1483, -0.2272, +0.1675, -0.1041, -0.0126, +0.3376, -0.0138, +0.1525, -0.0322, +0.2070, -0.2725, -0.0600, -0.1466, -0.2563, +0.0551],\n[ +0.1966, -0.0838, -0.0537, +0.0933, -0.0060, -0.0721, -0.0399, -0.1153, -0.1987, -0.0413, +0.2588, +0.0648, -0.0473, +0.1387, +0.6180, +0.0229, -0.2610],\n[ -0.1566, -0.0589, -0.0600, +0.0966, +0.0738, -0.0744, -0.0010, +0.0253, +0.2387, -0.0539, +0.0625, +0.1197, +0.0486, +0.0605, +0.2775, -0.0594, -0.0759],\n[ -0.4288, -0.0693, +0.0067, +0.3567, +0.0813, -0.1369, -0.3431, +0.0559, -0.0320, +0.0158, -0.1947, -0.3235, +0.1294, -0.2915, -0.2301, -0.1980, +0.0892],\n[ -0.1741, +0.0445, -0.0525, +0.0650, -0.0474, +0.1845, -0.0831, -0.0693, -0.0071, -0.0772, +0.0915, -0.0571, -0.1035, -0.0038, -0.1508, -0.2323, -0.1788],\n[ +0.1543, -0.0994, +0.0674, +0.1953, +0.1254, -0.1263, +0.0854, -0.1048, +0.0106, -0.0203, +0.1019, -0.0881, +0.2788, -0.2237, -0.0723, +0.0393, -0.0969],\n[ +0.0560, +0.2425, +0.1031, +0.1155, -0.1187, -0.1155, -0.0789, +0.2113, +0.0024, +0.2648, -0.1811, -0.0259, -0.1363, -0.2149, -0.2199, -0.1105, -0.1616],\n[ -0.0967, +0.0007, -0.2230, +0.1197, +0.1085, +0.0267, -0.0633, +0.1611, -0.0078, +0.0088, -0.0879, -0.1362, +0.1364, -0.1784, +0.1725, -0.0491, -0.0177],\n[ +0.0724, -0.0574, +0.0696, -0.0095, -0.1601, +0.1243, +0.2373, -0.1058, +0.1610, -0.1874, -0.0689, +0.1806, -0.2117, -0.0855, -0.0657, +0.0366, -0.0797]\n])\n\nweights_final_b = np.array([ -0.0356, +0.0776, -0.0344, +0.1375, +0.1048, +0.3648, +0.3240, +0.1319, +0.1161, +0.3373, +0.3193, +0.0120, +0.0253, -0.2434, -0.1291, +0.1042, -0.2448])\n\nif __name__==\"__main__\":\n main()\n"
] |
[
[
"numpy.array"
],
[
"tensorflow.scatter_update",
"tensorflow.reverse",
"tensorflow.concat",
"tensorflow.range",
"tensorflow.Variable",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.exp",
"tensorflow.shape",
"tensorflow.stop_gradient",
"tensorflow.zeros_like",
"tensorflow.summary.merge",
"tensorflow.summary.histogram"
],
[
"numpy.asarray",
"numpy.random.normal",
"numpy.array"
],
[
"numpy.savetxt",
"numpy.array"
],
[
"tensorflow.get_variable",
"tensorflow.minimum",
"numpy.mean",
"numpy.zeros_like",
"numpy.clip",
"tensorflow.gradients",
"numpy.ceil",
"numpy.std",
"tensorflow.train.MomentumOptimizer",
"tensorflow.square",
"numpy.logical_not",
"tensorflow.shape",
"numpy.mod",
"tensorflow.exp",
"tensorflow.placeholder",
"numpy.array",
"numpy.logical_and",
"tensorflow.clip_by_value",
"numpy.maximum",
"numpy.abs",
"tensorflow.expand_dims",
"tensorflow.ones",
"numpy.random.shuffle",
"tensorflow.variable_scope",
"tensorflow.abs"
],
[
"numpy.dot",
"numpy.array",
"numpy.maximum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gry-kiu/Evolutionary-Algorithm
|
[
"58fc6928da9ed77d2fec454e4846b89fdf7b8c3f"
] |
[
"tutorial-contents/Genetic Algorithm/Genetic Algorithm Basic.py"
] |
[
"# Adding rank and tournament selections by Choi, T\n# Adding one- and two-point crossovers by Choi, T\n# Adding sharing method by Choi, T\n\n\"\"\"\nVisualize Genetic Algorithm to find a maximum point in a function.\nVisit my tutorial website for more: https://morvanzhou.github.io/tutorials/\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nDNA_SIZE = 10 # DNA length\nPOP_SIZE = 100 # population size\nCROSS_RATE = 0.8 # mating probability (DNA crossover)\nMUTATION_RATE = 0.003 # mutation probability\nN_GENERATIONS = 200\nX_BOUND = [0, 10] # x upper and lower bounds\n\n\ndef F(x):\n return np.sin(10*x)*x + np.cos(2*x) * \\\n x # to find the maximum of this function\n\n\n# find non-zero fitness for selection\ndef get_fitness(pred):\n return pred + 1e-3 - np.min(pred)\n\n\n# Added by Choi, T for EA lectures\ndef get_sharing_fitness(pop, pred, min_dist=1.5):\n fitness = pred + 1e-3 - np.min(pred)\n for i in range(POP_SIZE):\n denom = 1\n for j in range(POP_SIZE):\n dist = (pop[i] != pop[j]).sum()\n if dist < min_dist:\n denom += 1 - dist / min_dist\n fitness[i] /= denom\n return fitness\n\n\n# convert binary DNA to decimal and normalize it to a range(0, 5)\ndef translateDNA(pop):\n return pop.dot(2 ** np.arange(DNA_SIZE)\n [::-1]) / float(2**DNA_SIZE-1) * X_BOUND[1]\n\n\ndef select(pop, fitness): # nature selection wrt pop's fitness\n idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,\n p=fitness/fitness.sum())\n return pop[idx]\n\n\n# Added by Choi, T for EA lectures\ndef rank_select(pop, fitness):\n # Efficient method to calculate the rank vector of a list in Python\n # https://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python\n def rank_simple(vector):\n return sorted(range(len(vector)), key=vector.__getitem__)\n\n def rankdata(a):\n n = len(a)\n ivec = rank_simple(a)\n svec = [a[rank] for rank in ivec]\n sumranks = 0\n dupcount = 0\n newarray = [0]*n\n for i in range(n):\n sumranks += i\n dupcount += 1\n if i == n-1 or svec[i] != svec[i+1]:\n averank = sumranks / float(dupcount) + 1\n for j in range(i-dupcount+1, i+1):\n newarray[ivec[j]] = averank\n sumranks = 0\n dupcount = 0\n return newarray\n\n rank_fitness = rankdata(fitness)\n idx = np.random.choice(np.arange(POP_SIZE), size=POP_SIZE, replace=True,\n p=list(map(lambda x: x / sum(rank_fitness), rank_fitness)))\n return pop[idx]\n\n\n# Added by Choi, T for EA lectures\ndef tournament_select(pop, fitness, tournament_size=2):\n idx = []\n for _ in range(POP_SIZE):\n participants = np.random.choice(\n np.arange(POP_SIZE), size=tournament_size, replace=False)\n participants_fitness = list(np.array(fitness)[participants])\n winner = participants_fitness.index(max(participants_fitness))\n idx.append(participants[winner])\n return pop[idx]\n\n\ndef crossover(parent, pop): # mating process (genes crossover)\n if np.random.rand() < CROSS_RATE:\n # select another individual from pop\n i_ = np.random.randint(0, POP_SIZE, size=1)\n cross_points = np.random.randint(0, 2, size=DNA_SIZE).astype(\n np.bool) # choose crossover points\n # mating and produce one child\n parent[cross_points] = pop[i_, cross_points]\n return parent\n\n\ndef one_point_crossover(parent, pop):\n if np.random.rand() < CROSS_RATE:\n # select another individual from pop\n i_ = np.random.randint(0, POP_SIZE, size=1)\n j_ = np.random.randint(1, DNA_SIZE - 1, size=1)\n flag = True if np.random.randint(0, 2) < 0.5 else False\n cross_points = [flag] * DNA_SIZE\n cross_points[int(j_):] = [not flag] * len(cross_points[int(j_):])\n # mating and produce one child\n parent[cross_points] = pop[i_, cross_points]\n return parent\n\n\ndef two_point_crossover(parent, pop):\n if np.random.rand() < CROSS_RATE:\n # select another individual from pop\n i_ = np.random.randint(0, POP_SIZE, size=1)\n j_ = np.sort(np.random.choice(\n np.arange(DNA_SIZE) - 2, size=2, replace=False) + 1)\n flag = True if np.random.randint(0, 2) < 0.5 else False\n cross_points = [flag] * DNA_SIZE\n cross_points[int(j_[0]):int(j_[1])] = [not flag] * \\\n len(cross_points[int(j_[0]):int(j_[1])])\n # mating and produce one child\n parent[cross_points] = pop[i_, cross_points]\n return parent\n\n\ndef mutate(child):\n for point in range(DNA_SIZE):\n if np.random.rand() < MUTATION_RATE:\n child[point] = 1 if child[point] == 0 else 0\n return child\n\n\npop = np.random.randint(2, size=(POP_SIZE, DNA_SIZE)\n ) # initialize the pop DNA\n\nplt.ion() # something about plotting\nx = np.linspace(*X_BOUND, 200)\nplt.plot(x, F(x))\n\nfor _ in range(N_GENERATIONS):\n # compute function value by extracting DNA\n F_values = F(translateDNA(pop))\n\n # something about plotting\n if 'sca' in globals():\n sca.remove()\n sca = plt.scatter(translateDNA(pop), F_values,\n s=200, lw=0, c='red', alpha=0.5)\n plt.pause(0.05)\n\n # GA part (evolution)\n # fitness = get_fitness(F_values)\n fitness = get_sharing_fitness(pop, F_values)\n print(\"Most fitted DNA: \", pop[np.argmax(fitness), :])\n pop = tournament_select(pop, fitness)\n pop_copy = pop.copy()\n for parent in pop:\n child = two_point_crossover(parent, pop_copy)\n child = mutate(child)\n parent[:] = child # parent is replaced by its child\n\nplt.ioff()\nplt.show()\n"
] |
[
[
"numpy.linspace",
"numpy.min",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.ioff",
"numpy.argmax",
"numpy.random.rand",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RandomForestGump/ProFitness
|
[
"ce75723b6f36bd41c955237e601b1333d8a74738"
] |
[
"main.py"
] |
[
"import asyncio\nimport logging\nimport logging.handlers\nimport queue\nimport threading\nimport urllib.request\nfrom pathlib import Path\nfrom typing import List, NamedTuple\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal # type: ignore\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n# import pydub\nimport streamlit as st\n# from aiortc.contrib.media import MediaPlayer\n\nfrom streamlit_webrtc import (\n AudioProcessorBase,\n ClientSettings,\n VideoProcessorBase,\n WebRtcMode,\n webrtc_streamer,\n)\n\nHERE = Path(__file__).parent\n\nlogger = logging.getLogger(__name__)\n\nWEBRTC_CLIENT_SETTINGS = ClientSettings(\n rtc_configuration={\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]},\n media_stream_constraints={\"video\": True, \"audio\": False},\n)\n\n\ndef app_sendonly_video():\n \"\"\"A sample to use WebRTC in sendonly mode to transfer frames\n from the browser to the server and to render frames via `st.image`.\"\"\"\n webrtc_ctx = webrtc_streamer(\n key=\"video-sendonly\",\n mode=WebRtcMode.SENDONLY,\n rtc_configuration=RTC_CONFIGURATION,\n media_stream_constraints={\"video\": True},\n )\n\n image_place = st.empty()\n\n while True:\n if webrtc_ctx.video_receiver:\n try:\n video_frame = webrtc_ctx.video_receiver.get_frame(timeout=1)\n except queue.Empty:\n logger.warning(\"Queue is empty. Abort.\")\n break\n\n img_rgb = video_frame.to_ndarray(format=\"rgb24\")\n image_place.image(img_rgb)\n else:\n logger.warning(\"AudioReciver is not set. Abort.\")\n break\n\n\ndef main():\n\n st.header(\"WebRTC demo\")\n\n PoseCorrector = \"Real Time Workout Pose Corrector and Counter\"\n # video_filters_page = (\n # \"Real time video transform with simple OpenCV filters (sendrecv)\"\n # )\n # audio_filter_page = \"Real time audio filter (sendrecv)\"\n # delayed_echo_page = \"Delayed echo (sendrecv)\"\n # streaming_page = (\n # \"Consuming media files on server-side and streaming it to browser (recvonly)\"\n # )\n # video_sendonly_page = (\n # \"WebRTC is sendonly and images are shown via st.image() (sendonly)\"\n # )\n # audio_sendonly_page = (\n # \"WebRTC is sendonly and audio frames are visualized with matplotlib (sendonly)\"\n # )\n # loopback_page = \"Simple video and audio loopback (sendrecv)\"\n app_mode = st.sidebar.selectbox(\n \"Choose the app mode\",\n [\n PoseCorrector\n ],\n )\n st.subheader(app_mode)\n\n\n if app_mode == PoseCorrector:\n workout()\n\n logger.debug(\"=== Alive threads ===\")\n for thread in threading.enumerate():\n if thread.is_alive():\n logger.debug(f\" {thread.name} ({thread.ident})\")\n\n\ndef app_loopback():\n \"\"\" Simple video loopback \"\"\"\n webrtc_streamer(\n key=\"loopback\",\n mode=WebRtcMode.SENDRECV,\n client_settings=WEBRTC_CLIENT_SETTINGS,\n video_processor_factory=None, # NoOp\n )\n\ndef workout():\n\n class PoseDetector(VideoProcessorBase):\n confidence_threshold: float\n result_queue: \"queue.Queue[List[Detection]]\"\n\n def __init__(self) -> None:\n self._net = cv2.dnn.readNetFromCaffe(\n str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)\n )\n self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD\n self.result_queue = queue.Queue()\n\n def _annotate_image(self, image, detections):\n # loop over the detections\n (h, w) = image.shape[:2]\n result: List[Detection] = []\n for i in np.arange(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n if confidence > self.confidence_threshold:\n # extract the index of the class label from the `detections`,\n # then compute the (x, y)-coordinates of the bounding box for\n # the object\n idx = int(detections[0, 0, i, 1])\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n name = CLASSES[idx]\n result.append(Detection(name=name, prob=float(confidence)))\n\n # display the prediction\n label = f\"{name}: {round(confidence * 100, 2)}%\"\n cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)\n y = startY - 15 if startY - 15 > 15 else startY + 15\n cv2.putText(\n image,\n label,\n (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n COLORS[idx],\n 2,\n )\n return image, result\n\n def recv(self, frame: av.VideoFrame) -> av.VideoFrame:\n image = frame.to_ndarray(format=\"bgr24\")\n blob = cv2.dnn.blobFromImage(\n cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5\n )\n self._net.setInput(blob)\n detections = self._net.forward()\n annotated_image, result = self._annotate_image(image, detections)\n\n # NOTE: This `recv` method is called in another thread,\n # so it must be thread-safe.\n self.result_queue.put(result)\n\n return av.VideoFrame.from_ndarray(annotated_image, format=\"bgr24\")\n\n\n\n c = 0\n c+=1\n while True:\n if c < 100:\n break\n else:\n print('1')\n print('terminate')\n\n webrtc_ctx = webrtc_streamer(\n key=\"object-detection\",\n mode=WebRtcMode.SENDRECV,\n rtc_configuration=RTC_CONFIGURATION,\n video_processor_factory=PoseDetector,\n media_stream_constraints={\"video\": True, \"audio\": False},\n async_processing=True,\n )\n\n\nif __name__ == \"__main__\":\n import os\n\n DEBUG = os.environ.get(\"DEBUG\", \"false\").lower() not in [\"false\", \"no\", \"0\"]\n\n logging.basicConfig(\n format=\"[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: \"\n \"%(message)s\",\n force=True,\n )\n\n logger.setLevel(level=logging.DEBUG if DEBUG else logging.INFO)\n\n st_webrtc_logger = logging.getLogger(\"streamlit_webrtc\")\n st_webrtc_logger.setLevel(logging.DEBUG)\n\n fsevents_logger = logging.getLogger(\"fsevents\")\n fsevents_logger.setLevel(logging.WARNING)\n\n main()\n"
] |
[
[
"numpy.arange",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TransferRL/progressive_transfer
|
[
"04fc5fc400d65c278ffd0cff3773151ad01fc689"
] |
[
"lib/qlearning.py"
] |
[
"import gym\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport itertools\nimport numpy as np\nimport sys\nimport sklearn.pipeline\nimport sklearn.preprocessing\nfrom lib import plotting\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.kernel_approximation import RBFSampler\n\nif \"./\" not in sys.path:\n sys.path.append(\"./\")\n\n\nclass Estimator():\n \"\"\"\n Value Function approximator.\n \"\"\"\n\n def __init__(self, env, scaler, featurizer):\n # We create a separate model for each action in the environment's\n # action space. Alternatively we could somehow encode the action\n # into the features, but this way it's easier to code up.\n self.scaler = scaler\n self.featurizer = featurizer\n self.models = []\n for _ in range(env.action_space.n):\n model = SGDRegressor(learning_rate=\"constant\")\n # We need to call partial_fit once to initialize the model\n # or we get a NotFittedError when trying to make a prediction\n # This is quite hacky.\n model.partial_fit([self.featurize_state(env.reset())], [0])\n self.models.append(model)\n\n\n def featurize_state(self, state):\n \"\"\"\n Returns the featurized representation for a state.\n \"\"\"\n scaled = self.scaler.transform([state])\n featurized = self.featurizer.transform(scaled)\n return featurized[0]\n\n def predict(self, s, a=None):\n \"\"\"\n Makes value function predictions.\n\n Args:\n s: state to make a prediction for\n a: (Optional) action to make a prediction for\n\n Returns\n If an action a is given this returns a single number as the prediction.\n If no action is given this returns a vector or predictions for all actions\n in the environment where pred[i] is the prediction for action i.\n\n \"\"\"\n features = self.featurize_state(s)\n if not a:\n return np.array([m.predict([features])[0] for m in self.models])\n else:\n return self.models[a].predict([features])[0]\n\n def update(self, s, a, y):\n \"\"\"\n Updates the estimator parameters for a given state and action towards\n the target y.\n \"\"\"\n features = self.featurize_state(s)\n self.models[a].partial_fit([features], [y])\n\n\nclass QLearning():\n\n def __init__(self, env, rendering=False):\n # Feature Preprocessing: Normalize to zero mean and unit variance\n # We use a few samples from the observation space to do this\n observation_examples = np.array([env.observation_space.sample() for x in range(10000)])\n scaler = sklearn.preprocessing.StandardScaler()\n scaler.fit(observation_examples)\n\n # Used to converte a state to a featurizes represenation.\n # We use RBF kernels with different variances to cover different parts of the space\n featurizer = sklearn.pipeline.FeatureUnion([\n (\"rbf1\", RBFSampler(gamma=5.0, n_components=100)),\n (\"rbf2\", RBFSampler(gamma=2.0, n_components=100)),\n (\"rbf3\", RBFSampler(gamma=1.0, n_components=100)),\n (\"rbf4\", RBFSampler(gamma=0.5, n_components=100))\n ])\n featurizer.fit(scaler.transform(observation_examples))\n\n self.estimator = Estimator(env, scaler, featurizer)\n self.env = env\n self.replay_memory = None\n self.rendering = rendering\n\n def make_epsilon_greedy_policy(self, estimator, epsilon, nA):\n \"\"\"\n Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.\n\n Args:\n estimator: An estimator that returns q values for a given state\n epsilon: The probability to select a random action . float between 0 and 1.\n nA: Number of actions in the environment.\n\n Returns:\n A function that takes the observation as an argument and returns\n the probabilities for each action in the form of a numpy array of length nA.\n\n \"\"\"\n\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn\n\n def q_learning(self, env, estimator, num_episodes, discount_factor=1.0, epsilon=0.5, epsilon_decay=1.0):\n \"\"\"\n Q-Learning algorithm for fff-policy TD control using Function Approximation.\n Finds the optimal greedy policy while following an epsilon-greedy policy.\n\n Args:\n env: OpenAI environment.\n estimator: Action-Value function estimator\n num_episodes: Number of episodes to run for.\n discount_factor: Lambda time discount factor.\n epsilon: Chance the sample a random action. Float betwen 0 and 1.\n epsilon_decay: Each episode, epsilon is decayed by this factor\n\n Returns:\n An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.\n \"\"\"\n\n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n for i_episode in range(num_episodes):\n\n # The policy we're following\n policy = self.make_epsilon_greedy_policy(\n estimator, epsilon * epsilon_decay ** i_episode, env.action_space.n)\n\n # Print out which episode we're on, useful for debugging.\n # Also print reward for last episode\n last_reward = stats.episode_rewards[i_episode - 1]\n sys.stdout.flush()\n\n # Reset the environment and pick the first action\n state = env.reset()\n\n # Only used for SARSA, not Q-Learning\n next_action = None\n\n # One step in the environment\n for t in itertools.count():\n\n # Choose an action to take\n # If we're using SARSA we already decided in the previous step\n if next_action is None:\n action_probs = policy(state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n else:\n action = next_action\n\n # Take a step\n next_state, reward, done, _ = env.step(action)\n # env.render()\n\n # Update statistics\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n\n # TD Update\n q_values_next = estimator.predict(next_state)\n\n # Use this code for Q-Learning\n # Q-Value TD Target\n td_target = reward + discount_factor * np.max(q_values_next)\n\n # Use this code for SARSA TD Target for on policy-training:\n # next_action_probs = policy(next_state)\n # next_action = np.random.choice(np.arange(len(next_action_probs)), p=next_action_probs)\n # td_target = reward + discount_factor * q_values_next[next_action]\n\n # Update the function approximator using our target\n estimator.update(state, action, td_target)\n\n print(\"\\rStep {} @ Episode {}/{} ({})\".format(t, i_episode + 1, num_episodes, last_reward), end=\"\")\n\n if done:\n break\n\n state = next_state\n\n def learn(self, num_episodes=100):\n self.q_learning(self.env, self.estimator, num_episodes, epsilon=0.3)\n\n def play(self):\n self.replay_memory = [] # reset\n # done = 0\n policy = self.make_epsilon_greedy_policy(self.estimator, 0, self.env.action_space.n) #TODO: jm: is this the best way?\n state = self.env.reset()\n for t in range(100000):\n action_probs = policy(state)\n action = np.random.choice(np.arange(len(action_probs)), p=action_probs)\n next_state, reward, done, info = self.env.step(action)\n\n # print('done: {}'.format(done))\n # print('state: {}'.format(next_state))\n # print('action: {}'.format(action))\n\n self.replay_memory.append([state, action, next_state, reward, done])\n\n if self.rendering:\n self.env.render()\n # self.env.render_y()\n # self.env.render_orthographic()\n\n if done:\n print('done: {}'.format(next_state))\n break\n # pass\n # plt.figure()\n # plt.imshow(env.render(mode='rgb_array'))\n # break\n state = next_state\n\n return self.replay_memory\n\n\n\n"
] |
[
[
"sklearn.linear_model.SGDRegressor",
"sklearn.kernel_approximation.RBFSampler",
"numpy.ones",
"numpy.max",
"numpy.argmax",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
modenaxe/BMC
|
[
"b6f6e473878ab7b0c19430d1b66b6dba09059c63"
] |
[
"functions/muscles.py"
] |
[
"\"\"\"Muscle modeling and simulation.\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nfrom scipy.integrate import ode\nimport warnings\nimport configparser\n\n\n__author__ = 'Marcos Duarte, https://github.com/demotu/BMC'\n__version__ = 'muscles.py v.1 2015/03/01'\n\n\nclass Thelen2003():\n \"\"\" Thelen (2003) muscle model.\n \"\"\"\n\n def __init__(self, parameters=None, states=None):\n if parameters is not None:\n self.set_parameters(parameters)\n if states is not None:\n self.set_states(states)\n\n self.lm_data = []\n self.act_data = []\n\n\n def set_parameters(self, var=None):\n \"\"\"Load and set parameters for the muscle model.\n \"\"\"\n \n if var is None:\n var = './../data/muscle_parameter.txt'\n if isinstance(var, str):\n self.P = self.config_parser(var, 'parameters')\n elif isinstance(var, dict):\n self.P = var\n else:\n raise ValueError('Wrong parameters!') \n \n print('The parameters were successfully loaded ' +\n 'and are stored in the variable P.')\n \n\n def set_states(self, var=None):\n \"\"\"Load and set states for the muscle model.\n \"\"\"\n \n if var is None:\n var = './../data/muscle_state.txt'\n if isinstance(var, str):\n self.S = self.config_parser(var, 'states')\n elif isinstance(var, dict):\n self.S = var\n else:\n raise ValueError('Wrong states!') \n \n print('The states were successfully loaded ' +\n 'and are stored in the variable S.')\n \n\n def config_parser(self, filename, var):\n\n parser = configparser.ConfigParser()\n parser.optionxform = str # make option names case sensitive\n parser.read(filename)\n if not parser:\n raise ValueError('File %s not found!' %var)\n #if not 'Muscle' in parser.sections()[0]:\n # raise ValueError('Wrong %s file!' %var)\n var = {}\n for key, value in parser.items(parser.sections()[0]):\n if key.lower() in ['name', 'id']:\n var.update({key: value})\n else:\n try:\n value = float(value)\n except ValueError:\n print('\"%s\" value \"%s\" was replaced by NaN.' %(key, value))\n value = np.nan\n var.update({key: value})\n \n return var \n \n\n def force_l(self, lm, gammal=None):\n \"\"\"Thelen (2003) force of the contractile element vs. muscle length.\n\n Parameters\n ----------\n lm : float\n normalized muscle fiber length\n gammal : float, optional (default from parameter file)\n shape factor\n\n Returns\n -------\n fl : float\n normalized force of the muscle contractile element\n \"\"\"\n\n if gammal is None: gammal = self.P['gammal']\n\n fl = np.exp(-(lm-1)**2/gammal)\n \n return fl\n\n\n def force_pe(self, lm, kpe=None, epsm0=None):\n \"\"\"Thelen (2003) force of the muscle parallel element vs. muscle length.\n \n Parameters\n ----------\n lm : float\n normalized muscle fiber length\n kpe : float, optional (default from parameter file)\n exponential shape factor\n epsm0 : float, optional (default from parameter file)\n passive muscle strain due to maximum isometric force\n \n Returns\n -------\n fpe : float\n normalized force of the muscle parallel (passive) element\n \"\"\"\n \n if kpe is None: kpe = self.P['kpe']\n if epsm0 is None: epsm0 = self.P['epsm0']\n\n if lm <= 1:\n fpe = 0\n else:\n fpe = (np.exp(kpe*(lm-1)/epsm0)-1)/(np.exp(kpe)-1)\n \n return fpe\n \n \n def force_se(self, lt, ltslack=None, epst0=None, kttoe=None):\n \"\"\"Thelen (2003) force-length relationship of tendon vs. tendon length.\n \n Parameters\n ----------\n lt : float\n tendon length (normalized or not)\n ltslack : float, optional (default from parameter file)\n tendon slack length (normalized or not)\n epst0 : float, optional (default from parameter file)\n tendon strain at the maximal isometric muscle force\n kttoe : float, optional (default from parameter file)\n linear scale factor\n \n Returns\n -------\n fse : float\n normalized force of the tendon series element\n \"\"\"\n \n if ltslack is None: ltslack = self.P['ltslack']\n if epst0 is None: epst0 = self.P['epst0']\n if kttoe is None: kttoe = self.P['kttoe']\n\n epst = (lt-ltslack)/ltslack\n fttoe = .33\n # values from OpenSim Thelen2003Muscle\n epsttoe = .99*epst0*np.e**3/(1.66*np.e**3 - .67)\n ktlin = .67/(epst0 - epsttoe)\n #\n if epst <= 0:\n fse = 0\n elif epst <= epsttoe:\n fse = fttoe/(np.exp(kttoe)-1)*(np.exp(kttoe*epst/epsttoe)-1)\n else:\n fse = ktlin*(epst-epsttoe) + fttoe\n \n return fse\n \n \n def velo_fm(self, fm, a, fl, lmopt=None, vmmax=None, fmlen=None, af=None):\n \"\"\"Thelen (2003) velocity of the force-velocity relationship vs. CE force.\n \n Parameters\n ----------\n fm : float\n normalized muscle force\n a : float\n muscle activation level\n fl : float\n normalized muscle force due to the force-length relationship\n lmopt : float, optional (default from parameter file)\n optimal muscle fiber length\n vmmax : float, optional (default from parameter file)\n normalized maximum muscle velocity for concentric activation\n fmlen : float, optional (default from parameter file)\n normalized maximum force generated at the lengthening phase\n af : float, optional (default from parameter file)\n shape factor\n \n Returns\n -------\n vm : float\n velocity of the muscle\n \"\"\"\n\n if lmopt is None: lmopt = self.P['lmopt']\n if vmmax is None: vmmax = self.P['vmmax']\n if fmlen is None: fmlen = self.P['fmlen']\n if af is None: af = self.P['af']\n\n if fm <= a*fl: # isometric and concentric activation\n if fm > 0:\n b = a*fl + fm/af\n else:\n b = a*fl\n else: # eccentric activation\n asyE_thresh = 0.95 # from OpenSim Thelen2003Muscle\n if fm < a*fl*fmlen*asyE_thresh:\n b = (2 + 2/af)*(a*fl*fmlen - fm)/(fmlen - 1)\n else:\n fm0 = a*fl*fmlen*asyE_thresh\n b = (2 + 2/af)*(a*fl*fmlen - fm0)/(fmlen - 1)\n\n vm = (0.25 + 0.75*a)*1*(fm - a*fl)/b\n vm = vm*vmmax*lmopt\n\n return vm\n \n \n def force_vm(self, vm, a, fl, lmopt=None, vmmax=None, fmlen=None, af=None):\n \"\"\"Thelen (2003) force of the contractile element vs. muscle velocity.\n \n Parameters\n ----------\n vm : float\n muscle velocity\n a : float\n muscle activation level\n fl : float\n normalized muscle force due to the force-length relationship\n lmopt : float, optional (default from parameter file)\n optimal muscle fiber length\n vmmax : float, optional (default from parameter file)\n normalized maximum muscle velocity for concentric activation\n fmlen : float, optional (default from parameter file)\n normalized normalized maximum force generated at the lengthening phase\n af : float, optional (default from parameter file)\n shape factor\n \n Returns\n -------\n fvm : float\n normalized force of the muscle contractile element\n \"\"\"\n\n if lmopt is None: lmopt = self.P['lmopt']\n if vmmax is None: vmmax = self.P['vmmax']\n if fmlen is None: fmlen = self.P['fmlen']\n if af is None: af = self.P['af']\n\n vmmax = vmmax*lmopt\n if vm <= 0: # isometric and concentric activation\n fvm = af*a*fl*(4*vm + vmmax*(3*a + 1))/(-4*vm + vmmax*af*(3*a + 1))\n else: # eccentric activation\n fvm = a*fl*(af*vmmax*(3*a*fmlen - 3*a + fmlen - 1) + \\\n 8*vm*fmlen*(af + 1)) / \\\n (af*vmmax*(3*a*fmlen - 3*a + fmlen - 1) + 8*vm*(af + 1)) \n \n return fvm\n \n \n def lmt_eq(self, t, lmt0=None):\n \"\"\"Equation for muscle-tendon length.\"\"\"\n\n if lmt0 is None:\n lmt0 = self.S['lmt0']\n \n return lmt0\n\n \n def vm_eq(self, t, lm, lm0, lmt0, lmopt, ltslack, alpha0, vmmax, fm0):\n \"\"\"Equation for muscle velocity.\"\"\"\n\n if lm < 0.1*lmopt:\n lm = 0.1*lmopt\n #lt0 = lmt0 - lm0*np.cos(alpha0) \n a = self.activation(t)\n lmt = self.lmt_eq(t, lmt0)\n alpha = self.penn_ang(lmt=lmt, lm=lm, lm0=lm0, alpha0=alpha0)\n lt = lmt - lm*np.cos(alpha)\n fse = self.force_se(lt=lt, ltslack=ltslack)\n fpe = self.force_pe(lm=lm/lmopt)\n fl = self.force_l(lm=lm/lmopt)\n fce_t = fse/np.cos(alpha) - fpe\n #if fce_t < 0: fce_t=0\n vm = self.velo_fm(fm=fce_t, a=a, fl=fl)\n\n return vm\n\n\n def lm_sol(self, fun=None, t0=0, t1=3, lm0=None, lmt0=None, ltslack=None, lmopt=None,\n alpha0=None, vmmax=None, fm0=None, show=True, axs=None):\n \"\"\"Runge-Kutta (4)5 ODE solver for muscle length.\"\"\"\n\n if lm0 is None: lm0 = self.S['lm0']\n if lmt0 is None: lmt0 = self.S['lmt0']\n if ltslack is None: ltslack = self.P['ltslack']\n if alpha0 is None: alpha0 = self.P['alpha0']\n if lmopt is None: lmopt = self.P['lmopt']\n if vmmax is None: vmmax = self.P['vmmax']\n if fm0 is None: fm0 = self.P['fm0']\n \n if fun is None:\n fun = self.vm_eq\n f = ode(fun).set_integrator('dopri5', nsteps=1, max_step=0.005, atol=1e-8) \n f.set_initial_value(lm0, t0).set_f_params(lm0, lmt0, lmopt, ltslack, alpha0, vmmax, fm0)\n # suppress Fortran warning\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n data = []\n while f.t < t1:\n f.integrate(t1, step=True)\n d = self.calc_data(f.t, np.max([f.y, 0.1*lmopt]), lm0, lmt0,\n ltslack, lmopt, alpha0, fm0)\n data.append(d)\n\n warnings.resetwarnings()\n data = np.array(data)\n self.lm_data = data\n if show:\n self.lm_plot(data, axs)\n \n return data\n \n \n def calc_data(self, t, lm, lm0, lmt0, ltslack, lmopt, alpha0, fm0):\n \"\"\"Calculus of muscle-tendon variables.\"\"\"\n \n a = self.activation(t)\n lmt = self.lmt_eq(t, lmt0=lmt0)\n alpha = self.penn_ang(lmt=lmt, lm=lm, lm0=lm0, alpha0=alpha0)\n lt = lmt - lm*np.cos(alpha)\n fl = self.force_l(lm=lm/lmopt)\n fpe = self.force_pe(lm=lm/lmopt)\n fse = self.force_se(lt=lt, ltslack=ltslack)\n fce_t = fse/np.cos(alpha) - fpe\n vm = self.velo_fm(fm=fce_t, a=a, fl=fl, lmopt=lmopt)\n fm = self.force_vm(vm=vm, fl=fl, lmopt=lmopt, a=a) + fpe \n data = [t, lmt, lm, lt, vm, fm*fm0, fse*fm0, a*fl*fm0, fpe*fm0, alpha]\n \n return data\n \n \n def muscle_plot(self, a=1, axs=None):\n \"\"\"Plot muscle-tendon relationships with length and velocity.\"\"\"\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print('matplotlib is not available.')\n return\n \n if axs is None:\n _, axs = plt.subplots(nrows=1, ncols=3, figsize=(9, 4))\n \n lmopt = self.P['lmopt']\n ltslack = self.P['ltslack']\n vmmax = self.P['vmmax']\n alpha0 = self.P['alpha0']\n fm0 = self.P['fm0']\n lm0 = self.S['lm0']\n lmt0 = self.S['lmt0']\n lt0 = self.S['lt0']\n if np.isnan(lt0):\n lt0 = lmt0 - lm0*np.cos(alpha0)\n \n lm = np.linspace(0, 2, 101)\n lt = np.linspace(0, 1, 101)*0.05 + 1\n vm = np.linspace(-1, 1, 101)*vmmax*lmopt\n fl = np.zeros(lm.size)\n fpe = np.zeros(lm.size)\n fse = np.zeros(lt.size)\n fvm = np.zeros(vm.size)\n \n fl_lm0 = self.force_l(lm0/lmopt)\n fpe_lm0 = self.force_pe(lm0/lmopt)\n fm_lm0 = fl_lm0 + fpe_lm0\n ft_lt0 = self.force_se(lt0, ltslack)*fm0 \n \n for i in range(101):\n fl[i] = self.force_l(lm[i])\n fpe[i] = self.force_pe(lm[i])\n fse[i] = self.force_se(lt[i], ltslack=1)\n fvm[i] = self.force_vm(vm[i], a=a, fl=fl_lm0)\n\n lm = lm*lmopt\n lt = lt*ltslack\n fl = fl\n fpe = fpe\n fse = fse*fm0\n fvm = fvm*fm0\n \n xlim = self.margins(lm, margin=.05, minmargin=False)\n axs[0].set_xlim(xlim)\n ylim = self.margins([0, 2], margin=.05)\n axs[0].set_ylim(ylim)\n axs[0].plot(lm, fl, 'b', label='Active')\n axs[0].plot(lm, fpe, 'b--', label='Passive')\n axs[0].plot(lm, fl+fpe, 'b:', label='')\n axs[0].plot([lm0, lm0], [ylim[0], fm_lm0], 'k:', lw=2, label='')\n axs[0].plot([xlim[0], lm0], [fm_lm0, fm_lm0], 'k:', lw=2, label='')\n axs[0].plot(lm0, fm_lm0, 'o', ms=6, mfc='r', mec='r', mew=2, label='fl(LM0)')\n axs[0].legend(loc='best', frameon=True, framealpha=.5)\n axs[0].set_xlabel('Length [m]')\n axs[0].set_ylabel('Scale factor')\n axs[0].xaxis.set_major_locator(plt.MaxNLocator(4))\n axs[0].yaxis.set_major_locator(plt.MaxNLocator(4))\n axs[0].set_title('Muscle F-L (a=1)')\n \n xlim = self.margins([0, np.min(vm), np.max(vm)], margin=.05, minmargin=False)\n axs[1].set_xlim(xlim)\n ylim = self.margins([0, fm0*1.2, np.max(fvm)*1.5], margin=.025)\n axs[1].set_ylim(ylim)\n axs[1].plot(vm, fvm, label='')\n axs[1].set_xlabel('$\\mathbf{^{CON}}\\;$ Velocity [m/s] $\\;\\mathbf{^{EXC}}$')\n axs[1].plot([0, 0], [ylim[0], fvm[50]], 'k:', lw=2, label='')\n axs[1].plot([xlim[0], 0], [fvm[50], fvm[50]], 'k:', lw=2, label='')\n axs[1].plot(0, fvm[50], 'o', ms=6, mfc='r', mec='r', mew=2, label='FM0(LM0)')\n axs[1].plot(xlim[0], fm0, '+', ms=10, mfc='r', mec='r', mew=2, label='')\n axs[1].text(vm[0], fm0, 'FM0')\n axs[1].legend(loc='upper right', frameon=True, framealpha=.5)\n axs[1].set_ylabel('Force [N]')\n axs[1].xaxis.set_major_locator(plt.MaxNLocator(4))\n axs[1].yaxis.set_major_locator(plt.MaxNLocator(4))\n axs[1].set_title('Muscle F-V (a=1)')\n\n xlim = self.margins([lt0, ltslack, np.min(lt), np.max(lt)], margin=.05,\n minmargin=False)\n axs[2].set_xlim(xlim)\n ylim = self.margins([ft_lt0, 0, np.max(fse)], margin=.05)\n axs[2].set_ylim(ylim)\n axs[2].plot(lt, fse, label='')\n axs[2].set_xlabel('Length [m]')\n axs[2].plot([lt0, lt0], [ylim[0], ft_lt0], 'k:', lw=2, label='')\n axs[2].plot([xlim[0], lt0], [ft_lt0, ft_lt0], 'k:', lw=2, label='')\n axs[2].plot(lt0, ft_lt0, 'o', ms=6, mfc='r', mec='r', mew=2, label='FT(LT0)')\n axs[2].legend(loc='upper left', frameon=True, framealpha=.5)\n axs[2].set_ylabel('Force [N]')\n axs[2].xaxis.set_major_locator(plt.MaxNLocator(4))\n axs[2].yaxis.set_major_locator(plt.MaxNLocator(4))\n axs[2].set_title('Tendon') \n plt.suptitle('Muscle-tendon mechanics', fontsize=18, y=1.03)\n plt.tight_layout(w_pad=.1)\n plt.show()\n \n return axs\n\n\n def lm_plot(self, x, axs=None):\n \"\"\"Plot results of actdyn_ode45 function.\n data = [t, lmt, lm, lt, vm, fm*fm0, fse*fm0, fl*fm0, fpe*fm0, alpha]\n \"\"\"\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print('matplotlib is not available.')\n return\n \n if axs is None:\n _, axs = plt.subplots(nrows=3, ncols=2, sharex=True, figsize=(10, 6))\n\n axs[0, 0].plot(x[:, 0], x[:, 1], 'b', label='LMT')\n lmt = x[:, 2]*np.cos(x[:, 9]) + x[:, 3]\n if np.sum(x[:, 9]) > 0:\n axs[0, 0].plot(x[:, 0], lmt, 'g--', label=r'$LM \\cos \\alpha + LT$')\n else:\n axs[0, 0].plot(x[:, 0], lmt, 'g--', label=r'LM+LT')\n ylim = self.margins(x[:, 1], margin=.1)\n axs[0, 0].set_ylim(ylim)\n axs[0, 0].legend(framealpha=.5, loc='best')\n \n axs[0, 1].plot(x[:, 0], x[:, 3], 'b')\n #axs[0, 1].plot(x[:, 0], lt0*np.ones(len(x)), 'r')\n ylim = self.margins(x[:, 3], margin=.1)\n axs[0, 1].set_ylim(ylim)\n \n axs[1, 0].plot(x[:, 0], x[:, 2], 'b')\n #axs[1, 0].plot(x[:, 0], lmopt*np.ones(len(x)), 'r')\n ylim = self.margins(x[:, 2], margin=.1)\n axs[1, 0].set_ylim(ylim)\n \n axs[1, 1].plot(x[:, 0], x[:, 4], 'b')\n ylim = self.margins(x[:, 4], margin=.1)\n axs[1, 1].set_ylim(ylim)\n \n axs[2, 0].plot(x[:, 0], x[:, 5], 'b', label='Muscle')\n axs[2, 0].plot(x[:, 0], x[:, 6], 'g--', label='Tendon')\n ylim = self.margins(x[:, [5, 6]], margin=.1)\n axs[2, 0].set_ylim(ylim)\n axs[2, 0].set_xlabel('Time (s)')\n axs[2, 0].legend(framealpha=.5, loc='best')\n \n axs[2, 1].plot(x[:, 0], x[:, 8], 'b', label='PE')\n ylim = self.margins(x[:, 8], margin=.1)\n axs[2, 1].set_ylim(ylim)\n axs[2, 1].set_xlabel('Time (s)')\n axs[2, 1].legend(framealpha=.5, loc='best')\n \n ylabel = ['$L_{MT}\\,(m)$', '$L_{T}\\,(m)$', '$L_{M}\\,(m)$',\n '$V_{CE}\\,(m/s)$', '$Force\\,(N)$', '$Force\\,(N)$']\n for i, axi in enumerate(axs.flat):\n axi.set_ylabel(ylabel[i], fontsize=14)\n axi.yaxis.set_major_locator(plt.MaxNLocator(4))\n axi.yaxis.set_label_coords(-.2, 0.5)\n\n plt.suptitle('Simulation of muscle-tendon mechanics', fontsize=18,\n y=1.03)\n plt.tight_layout()\n plt.show()\n \n return axs\n \n \n def penn_ang(self, lmt, lm, lt=None, lm0=None, alpha0=None):\n \"\"\"Pennation angle.\n \n Parameters\n ----------\n lmt : float\n muscle-tendon length\n lt : float, optional (default=None)\n tendon length\n lm : float, optional (default=None)\n muscle fiber length\n lm0 : float, optional (default from states file)\n initial muscle fiber length\n alpha0 : float, optional (default from parameter file)\n initial pennation angle\n\n Returns\n -------\n alpha : float\n pennation angle\n \"\"\"\n\n if lm0 is None: lm0 = self.S['lm0']\n if alpha0 is None: alpha0 = self.P['alpha0']\n\n alpha = alpha0\n if alpha0 != 0:\n w = lm0*np.sin(alpha0)\n if lm is not None:\n cosalpha = np.sqrt(1-(w/lm)**2)\n elif lmt is not None and lt is not None:\n cosalpha = 1/(np.sqrt(1 + (w/(lmt-lt))**2))\n alpha = np.arccos(cosalpha)\n \n if alpha > 1.4706289: # np.arccos(0.1), 84.2608 degrees\n alpha = 1.4706289\n \n return alpha\n \n\n def excitation(self, t, u_max=None, u_min=None, t0=0, t1=5):\n \"\"\"Excitation signal, a square wave.\n\n Parameters\n ----------\n t : float\n time instant [s]\n u_max : float (0 < u_max <= 1), optional (default from parameter file)\n maximum value for muscle excitation\n u_min : float (0 < u_min < 1), optional (default from parameter file)\n minimum value for muscle excitation\n t0 : float, optional (default=0)\n initial time instant for muscle excitation equals to u_max [s]\n t1 : float, optional (default=5)\n final time instant for muscle excitation equals to u_max [s]\n\n Returns\n -------\n u : float (0 < u <= 1)\n excitation signal\n \"\"\"\n\n if u_max is None: u_max = self.P['u_max']\n if u_min is None: u_min = self.P['u_min']\n \n u = u_min\n if t >= t0 and t <= t1:\n u = u_max\n\n return u\n\n\n def activation_dyn(self, t, a, t_act=None, t_deact=None):\n \"\"\"Thelen (2003) activation dynamics, the derivative of `a` at `t`.\n\n Parameters\n ----------\n t : float\n time instant [s]\n a : float (0 <= a <= 1)\n muscle activation\n t_act : float, optional (default from parameter file)\n activation time constant [s]\n t_deact : float, optional (default from parameter file)\n deactivation time constant [s]\n \n Returns\n -------\n adot : float \n derivative of `a` at `t`\n \"\"\"\n\n if t_act is None: t_act = self.P['t_act']\n if t_deact is None: t_deact = self.P['t_deact']\n \n u = self.excitation(t)\n if u > a:\n adot = (u - a)/(t_act*(0.5 + 1.5*a))\n else:\n adot = (u - a)/(t_deact/(0.5 + 1.5*a))\n\n return adot\n\n\n def activation_sol(self, fun=None, t0=0, t1=3, a0=0, u_min=None,\n t_act=None, t_deact=None, show=True, axs=None):\n \"\"\"Runge-Kutta (4)5 ODE solver for activation dynamics.\n\n Parameters\n ----------\n fun : function object, optional (default is None and `actdyn` is used)\n function with ODE to be solved\n t0 : float, optional (default=0)\n initial time instant for the simulation [s]\n t1 : float, optional (default=0)\n final time instant for the simulation [s]\n a0 : float, optional (default=0)\n initial muscle activation\n u_max : float (0 < u_max <= 1), optional (default from parameter file)\n maximum value for muscle excitation\n u_min : float (0 < u_min < 1), optional (default from parameter file)\n minimum value for muscle excitation\n t_act : float, optional (default from parameter file)\n activation time constant [s]\n t_deact : float, optional (default from parameter file)\n deactivation time constant [s]\n show : bool, optional (default = True)\n if True (1), plot data in matplotlib figure\n axs : a matplotlib.axes.Axes instance, optional (default = None)\n\n Returns\n -------\n data : 2-d array\n array with columns [time, excitation, activation]\n \n \"\"\"\n\n if u_min is None: u_min = self.P['u_min']\n if t_act is None: t_act = self.P['t_act']\n if t_deact is None: t_deact = self.P['t_deact']\n \n if fun is None:\n fun = self.activation_dyn\n f = ode(fun).set_integrator('dopri5', nsteps=1, max_step=0.005, atol=1e-8) \n f.set_initial_value(a0, t0).set_f_params(t_act, t_deact)\n # suppress Fortran warning\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n data = []\n while f.t < t1:\n f.integrate(t1, step=True)\n data.append([f.t, self.excitation(f.t), np.max([f.y, u_min])])\n warnings.resetwarnings()\n data = np.array(data)\n if show:\n self.actvation_plot(data, axs)\n\n self.act_data = data\n \n return data\n\n\n def activation(self, t=None):\n \"\"\"Activation signal.\"\"\"\n \n data = self.act_data \n if t is not None and len(data):\n if t <= self.act_data[0, 0]:\n a = self.act_data[0, 2]\n elif t >= self.act_data[-1, 0]:\n a = self.act_data[-1, 2]\n else:\n a = np.interp(t, self.act_data[:, 0], self.act_data[:, 2])\n else:\n a = 1\n \n return a\n\n\n def actvation_plot(self, data, axs=None):\n \"\"\"Plot results of actdyn_ode45 function.\"\"\"\n\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print('matplotlib is not available.')\n return\n \n if axs is None:\n _, axs = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))\n \n axs.plot(data[:, 0], data[:, 1], color=[1, 0, 0, .6], label='Excitation')\n axs.plot(data[:, 0], data[:, 2], color=[0, 0, 1, .6], label='Activation')\n axs.set_xlabel('Time [s]')\n axs.set_ylabel('Level')\n axs.legend()\n plt.title('Activation dynamics')\n plt.tight_layout()\n plt.show()\n \n return axs\n \n \n def margins(self, x, margin=0.01, minmargin=True):\n \"\"\"Calculate plot limits with extra margins.\n \"\"\"\n rang = np.nanmax(x) - np.nanmin(x)\n if rang < 0.001 and minmargin:\n rang = 0.001*np.nanmean(x)/margin\n if rang < 1:\n rang = 1\n lim = [np.nanmin(x) - rang*margin, np.nanmax(x) + rang*margin]\n\n return lim\n"
] |
[
[
"numpy.nanmax",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.MaxNLocator",
"numpy.nanmin",
"numpy.max",
"numpy.nanmean",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"numpy.interp",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.isnan",
"numpy.arccos",
"matplotlib.pyplot.suptitle",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.cos",
"scipy.integrate.ode"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
RubenvanHeusden/HFO-Robotkeeper
|
[
"03bbe1170d703b7f264ef245b99a0ced2759ed39"
] |
[
"example/test_keepers/critic.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n\n\nclass Critic:\n def __init__(self, sess, state_size, action_size, param_size, num_outputs):\n \n #TODO : add batch normalization\n self.l1_size = 256\n self.l2_size = 128\n self.l3_size = 64\n self.l4_size = 32\n self.learning_rate = 0.0001\n self.sess = sess\n # providing a probibility distribution for the initialization of \n # the weights\n \n self._kernel_init = tf.truncated_normal_initializer(mean=0.0, \n stddev=0.01, dtype=tf.float64)\n \n \n # input consists of \n self.state_input = tf.placeholder(shape=[None, state_size], dtype=tf.float64)\n self.action_input = tf.placeholder(shape=[None, action_size], dtype=tf.float64)\n self.param_input = tf.placeholder(shape=[None, param_size], dtype=tf.float64)\n \n self.input_layer = tf.concat((self.state_input, self.action_input, self.param_input), axis=1)\n \n \n self._layer1 = tf.layers.dense(inputs = self.input_layer, \n kernel_initializer = self._kernel_init, \n activation=tf.nn.relu, units = self.l1_size)\n \n self._layer2 = tf.layers.dense(inputs = self._layer1, \n kernel_initializer = self._kernel_init, \n activation=tf.nn.relu, units = self.l2_size)\n \n self._layer3 = tf.layers.dense(inputs = self._layer2, \n kernel_initializer = self._kernel_init, \n activation=tf.nn.relu, units = self.l3_size)\n \n self._layer4 = tf.layers.dense(inputs = self._layer3, \n kernel_initializer = self._kernel_init, \n activation=tf.nn.relu, units = self.l4_size)\n \n self.out = tf.layers.dense(inputs = self._layer4, \n kernel_initializer = self._kernel_init, \n units = num_outputs)\n \n \n \n # predicted Q value that comes from the target critic network\n self.predicted_q_value = tf.placeholder(shape=[None, 1], dtype=tf.float64)\n self.loss = np.sum(tf.square(tf.subtract(self.predicted_q_value, self.out)))\n \n self.update_model = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n \n self.action_grads = tf.gradients(self.out, [self.action_input, self.param_input])\n \n def predict(self, inputs):\n return self.sess.run(self.out, feed_dict = {self.input_layer:inputs})\n \n \n def train(self):\n pass \n \n def get_gradients(self, state, action, params):\n return self.sess.run(self.action_grads, \n feed_dict = {self.state_input:state, \n self.action_input:action,\n self.param_input:params})\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
] |
[
[
"tensorflow.concat",
"tensorflow.gradients",
"tensorflow.layers.dense",
"tensorflow.placeholder",
"tensorflow.truncated_normal_initializer",
"tensorflow.subtract",
"tensorflow.train.AdamOptimizer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
fabian-paul/wepy
|
[
"3a470f364a5ffbefcdb5d17a12cd08d90f4944e4"
] |
[
"examples/Lennard_Jones_Pair/we.py"
] |
[
"\"\"\"Very simple example using a pair of Lennard-Jones particles.\n\nRequires the package `openmmtools` which can be installed from\nanaconda: `conda install -c omnia openmmtools`\n\nOpenmmtools just provides a ready-made system for the lennard jones\nparticles.\n\nThis script is broken up into several parts:\n\n1. Importing the pieces from wepy to run a WExplore simulation.\n\n2. Definition of a distance metric for this system and process.\n\n3. Definition of the parameters used in the simulation. Each is\ndescribed in detail.\n\n4. Definition of the I/O end points.\n\n5. Initialize the OpenMM Runner and get the starting state from the\nopenmmtools system.\n\n6. Initialize the Wexplore resampler.\n\n7. Initialize the boundary conditions. This makes the simulation\nnon-equilibrium by restarting \"unbound\" simulations in the initial\nstate.\n\n8. Initialize the reporters. This will result in the results files.\n\n9. Initialize the work mapper, which in this case is trivial since\nthis will only be run in serial.\n\n10. Initialize the simulation manager with all the parts.\n\n11. Actually run the simulation.\n\n\"\"\"\n\nimport sys\nfrom copy import copy\nimport os\nimport os.path as osp\n\nimport numpy as np\n\nimport simtk.openmm.app as omma\nimport simtk.openmm as omm\nimport simtk.unit as unit\n\nfrom openmmtools.testsystems import LennardJonesPair\nimport mdtraj as mdj\nfrom wepy.util.mdtraj import mdtraj_to_json_topology\n\nfrom wepy.sim_manager import Manager\n\nfrom wepy.resampling.distances.distance import Distance\nfrom wepy.resampling.resamplers.wexplore import WExploreResampler\nfrom wepy.walker import Walker\nfrom wepy.runners.openmm import OpenMMRunner, OpenMMState\nfrom wepy.runners.openmm import UNIT_NAMES, GET_STATE_KWARG_DEFAULTS\nfrom wepy.work_mapper.mapper import Mapper\nfrom wepy.boundary_conditions.unbinding import UnbindingBC\nfrom wepy.reporter.hdf5 import WepyHDF5Reporter\nfrom wepy.reporter.dashboard import WExploreDashboardReporter\nfrom wepy.reporter.setup import SetupReporter\nfrom wepy.reporter.restart import RestartReporter\n\nfrom scipy.spatial.distance import euclidean\n\n\n## PARAMETERS\n\n# Platform used for OpenMM which uses different hardware computation\n# kernels. Options are: Reference, CPU, OpenCL, CUDA.\n\n# we use the Reference platform because this is just a test\nPLATFORM = 'Reference'\n\n# Langevin Integrator\nTEMPERATURE= 300.0*unit.kelvin\nFRICTION_COEFFICIENT = 1/unit.picosecond\n# step size of time integrations\nSTEP_SIZE = 0.002*unit.picoseconds\n\n# Resampler parameters\n\n# the maximum weight allowed for a walker\nPMAX = 0.5\n# the minimum weight allowed for a walker\nPMIN = 1e-12\n\n# the maximum number of regions allowed under each parent region\nMAX_N_REGIONS = (10, 10, 10, 10)\n\n# the maximum size of regions, new regions will be created if a walker\n# is beyond this distance from each voronoi image unless there is an\n# already maximal number of regions\nMAX_REGION_SIZES = (1, 0.5, .35, .25) # nanometers\n\n# boundary condition parameters\n\n# maximum distance between between any atom of the ligand and any\n# other atom of the protein, if the shortest such atom-atom distance\n# is larger than this the ligand will be considered unbound and\n# restarted in the initial state\nCUTOFF_DISTANCE = 1.0 # nm\n\n# reporting parameters\n\n# these are the properties of the states (i.e. from OpenMM) which will\n# be saved into the HDF5\nSAVE_FIELDS = ('positions', 'box_vectors', 'velocities')\n# these are the names of the units which will be stored with each\n# field in the HDF5\nUNITS = UNIT_NAMES\n\n## INPUTS/OUTPUTS\n\n# the inputs directory\ninputs_dir = osp.realpath('./inputs')\n# the outputs path\noutputs_dir = osp.realpath('./outputs')\n# make the outputs dir if it doesn't exist\nos.makedirs(outputs_dir, exist_ok=True)\n\n# inputs filenames\njson_top_filename = \"pair.top.json\"\n\n# outputs\nhdf5_filename = 'results.wepy.h5'\nsetup_state_filename = 'setup.pkl'\nrestart_state_filename = 'restart.pkl'\n\n# normalize the input paths\njson_top_path = osp.join(inputs_dir, json_top_filename)\n\n# normalize the output paths\nhdf5_path = osp.join(outputs_dir, hdf5_filename)\nsetup_state_path = osp.join(outputs_dir, setup_state_filename)\nrestart_state_path = osp.join(outputs_dir, restart_state_filename)\n\n## System and OpenMMRunner\n\n# make the test system from openmmtools\ntest_sys = LennardJonesPair()\n\n# make the integrator\nintegrator = omm.LangevinIntegrator(TEMPERATURE, FRICTION_COEFFICIENT, STEP_SIZE)\n\n# make a context and set the positions\ncontext = omm.Context(test_sys.system, copy(integrator))\ncontext.setPositions(test_sys.positions)\n\n# get the data from this context so we have a state to start the\n# simulation with\nget_state_kwargs = dict(GET_STATE_KWARG_DEFAULTS)\ninit_sim_state = context.getState(**get_state_kwargs)\ninit_state = OpenMMState(init_sim_state)\n\n# initialize the runner\nrunner = OpenMMRunner(test_sys.system, test_sys.topology, integrator, platform=PLATFORM)\n\n## Distance Metric\n# we define a simple distance metric for this system, assuming the\n# positions are in a 'positions' field\nclass PairDistance(Distance):\n\n def __init__(self, metric=euclidean):\n self.metric = metric\n\n def image(self, state):\n return state['positions']\n\n def image_distance(self, image_a, image_b):\n dist_a = self.metric(image_a[0], image_a[1])\n dist_b = self.metric(image_b[0], image_b[1])\n\n return np.abs(dist_a - dist_b)\n\n\n# make a distance object which can be used to compute the distance\n# between two walkers, for our scorer class\ndistance = PairDistance()\n\n## Resampler\nresampler = WExploreResampler(distance=distance,\n init_state=init_state,\n max_region_sizes=MAX_REGION_SIZES,\n max_n_regions=MAX_N_REGIONS,\n pmin=PMIN, pmax=PMAX)\n\n## Boundary Conditions\n\n# the mdtraj here is needed for the distance function\nmdtraj_topology = mdj.Topology.from_openmm(test_sys.topology)\n\n# initialize the unbinding boundary conditions\nubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE,\n initial_state=init_state,\n topology=mdtraj_topology,\n ligand_idxs=np.array(test_sys.ligand_indices),\n receptor_idxs=np.array(test_sys.receptor_indices))\n\n## Reporters\n\njson_str_top = mdtraj_to_json_topology(mdtraj_topology)\n# make a dictionary of units for adding to the HDF5\nunits = dict(UNIT_NAMES)\n\n# open it in truncate mode first, then switch after first run\nhdf5_reporter = WepyHDF5Reporter(hdf5_path, mode='w',\n save_fields=SAVE_FIELDS,\n resampler=resampler,\n boundary_conditions=ubc,\n topology=json_str_top,\n units=units,)\n\ndashboard_reporter = WExploreDashboardReporter('./outputs/wepy.dash.txt', mode='w',\n step_time=STEP_SIZE.value_in_unit(unit.second),\n max_n_regions=resampler.max_n_regions,\n max_region_sizes=resampler.max_region_sizes,\n bc_cutoff_distance=ubc.cutoff_distance)\n\nsetup_reporter = SetupReporter(setup_state_path, mode='w')\n\nrestart_reporter = RestartReporter(restart_state_path, mode='w')\n\nreporters = [hdf5_reporter, dashboard_reporter, setup_reporter, restart_reporter]\n\n\n\n## Work Mapper\n\n# a simple work mapper\nmapper = Mapper()\n\n\n\n## Run the simulation\n\n\nif __name__ == \"__main__\":\n\n if sys.argv[1] == \"-h\" or sys.argv[1] == \"--help\":\n print(\"arguments: n_runs, n_cycles, n_steps, n_walkers\")\n else:\n n_runs = int(sys.argv[1])\n n_cycles = int(sys.argv[2])\n n_steps = int(sys.argv[3])\n n_walkers = int(sys.argv[4])\n\n print(\"Number of steps: {}\".format(n_steps))\n print(\"Number of cycles: {}\".format(n_cycles))\n\n # create the initial walkers\n init_weight = 1.0 / n_walkers\n init_walkers = [Walker(OpenMMState(init_sim_state), init_weight) for i in range(n_walkers)]\n\n # initialize the simulation manager\n sim_manager = Manager(init_walkers,\n runner=runner,\n resampler=resampler,\n boundary_conditions=ubc,\n work_mapper=mapper,\n reporters=reporters)\n\n # make a number of steps for each cycle. In principle it could be\n # different each cycle\n steps = [n_steps for i in range(n_cycles)]\n\n # actually run the simulations\n print(\"Running Simulations\")\n for run_idx in range(n_runs):\n print(\"Starting run: {}\".format(run_idx))\n sim_manager.run_simulation(n_cycles, steps, debug_prints=True)\n print(\"Finished run: {}\".format(run_idx))\n\n\n print(\"Finished first file\")\n"
] |
[
[
"numpy.array",
"numpy.abs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChristopherDavisUCI/NFL2021-Simulation
|
[
"5dd414285cebf781fe3a1f6756459f6075fbfab5"
] |
[
"make_standings.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom math import isclose\n\ndiv_series = pd.read_csv(\"data/divisions.csv\",index_col=0,squeeze=True)\ndiv_series.name = None\nteams = sorted(list(div_series.index))\ntol = .0001\n\n\n\nreverse_dict = {'Win':'Loss','Loss':'Win', 'Tie':'Tie'}\n\ndef get_outcome(row):\n s = row[\"Points_scored\"]\n a = row[\"Points_allowed\"]\n if s > a:\n return \"Win\"\n elif s < a:\n return \"Loss\"\n elif s == a:\n return \"Tie\"\n\ndef get_WLT(games):\n s = games.Outcome.value_counts()\n try:\n return (s.get(\"Win\",0) + 0.5*s.get(\"Tie\",0))/len(games)\n except:\n if len(games) == 0:\n return 0\n else:\n raise\n\ndef get_strength(teams, df_ind):\n return get_WLT(df_ind[df_ind[\"Team\"].isin(teams)])\n\ndef get_victories(team, df_ind):\n return set(df_ind[\"Opponent\"][(df_ind[\"Team\"] == team) & (df_ind[\"Outcome\"] == \"Win\")])\n\ndef get_opps(team, df_ind):\n return set(df_ind[\"Opponent\"][df_ind[\"Team\"] == team])\n\ndef get_common(teams, df_ind):\n opps = []\n for t in teams:\n opps.append(get_opps(t, df_ind))\n return set.intersection(*opps)\n\n# This seems to assume teams are either all in the same division\n# or all in different divisions.\ndef analyze_dict(d, df_ind, df_standings):\n outs = sorted(d.values(),reverse=True)\n if isclose(outs[0],outs[-1],abs_tol=tol):\n return None\n top_teams = [k for k in d.keys() if isclose(d[k],outs[0],abs_tol = tol)]\n if len(top_teams) == 1:\n return top_teams[0]\n if div_series[top_teams[0]] == div_series[top_teams[1]]:\n return break_tie_div(top_teams,df_ind,df_standings)\n else:\n return break_tie_conf(top_teams,df_ind,df_standings)\n\nbreak_tie_fns = {}\n\n# Are there any numerical precision problems here?\ndef fd21(teams,df_ind,df_standings):\n wlt_dict = {teams[0]:get_WLT(df_ind[(df_ind[\"Team\"] == teams[0]) & (df_ind[\"Opponent\"] == teams[1])])}\n wlt_dict[teams[1]] = get_WLT(df_ind[(df_ind[\"Team\"] == teams[1]) & (df_ind[\"Opponent\"] == teams[0])])\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,1)] = fd21\n\ndef fd22(teams,df_ind,df_standings):\n div_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.in_div)]) for t in teams}\n return analyze_dict(div_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,2)] = fd22\n\ndef fd23(teams,df_ind,df_standings):\n common = get_common(teams,df_ind)\n common_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.Opponent.isin(common))]) for t in teams}\n return analyze_dict(common_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,3)] = fd23\n\ndef fd24(teams,df_ind,df_standings):\n conf_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.in_conf)]) for t in teams}\n return analyze_dict(conf_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,4)] = fd24\n\ndef fd25(teams,df_ind,df_standings):\n wlt_dict = {t: get_strength(get_victories(t,df_ind),df_ind) for t in teams}\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,5)] = fd25\nbreak_tie_fns[(\"div\",3,5)] = fd25\n\ndef fd26(teams,df_ind,df_standings):\n wlt_dict = {t: get_strength(get_opps(t,df_ind),df_ind) for t in teams}\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,6)] = fd26\nbreak_tie_fns[(\"div\",3,6)] = fd26\n\n# The negatives are because we want \"lower rank\" to correspond to higher numbers\n# Maybe would be better to have a reverse flag in the dictionary\ndef fd27(teams,df_ind,df_standings):\n conf = div_series[teams[0]][:3]\n ps_series = (-df_standings.query(\"Conference==@conf\").Points_scored).rank(method=\"min\")\n pa_series = df_standings.query(\"Conference==@conf\").Points_allowed.rank(method=\"min\")\n rank_dict = {t: -ps_series[t] - pa_series[t] for t in teams}\n return analyze_dict(rank_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,7)] = fd27\nbreak_tie_fns[(\"div\",3,7)] = fd27\n\n# The negatives are because we want \"lower rank\" to correspond to higher numbers\ndef fd28(teams,df_ind,df_standings):\n ps_series = (-df_standings.Points_scored).rank(method=\"min\")\n pa_series = df_standings.Points_allowed.rank(method=\"min\")\n rank_dict = {t: -ps_series[t] - pa_series[t] for t in teams}\n return analyze_dict(rank_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,8)] = fd28\nbreak_tie_fns[(\"div\",3,8)] = fd28\n\ndef fd29(teams,df_ind,df_standings):\n common = get_common(teams,df_ind)\n pt_dict = {}\n for t in teams:\n df_common = df_ind[[\"Points_scored\",\"Points_allowed\"]][(df_ind.Team == t) & (df_ind.Opponent.isin(common))].sum()\n pt_dict[t] = df_common[\"Points_scored\"]-df_common[\"Points_allowed\"]\n return analyze_dict(pt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,9)] = fd29\nbreak_tie_fns[(\"div\",3,9)] = fd29\n\n# Would it be better to use df_standings for this?\ndef fd210(teams,df_ind,df_standings):\n pt_dict = {}\n for t in teams:\n df_common = df_ind[[\"Points_scored\",\"Points_allowed\"]][df_ind.Team == t].sum()\n pt_dict[t] = df_common[\"Points_scored\"]-df_common[\"Points_allowed\"]\n return analyze_dict(pt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",2,10)] = fd210\nbreak_tie_fns[(\"div\",3,10)] = fd210\n\n# How does the coin toss work for three teams?\ndef fd212(teams,df_ind,df_standings):\n return np.random.choice(teams)\n\nbreak_tie_fns[(\"div\",2,12)] = fd212\nbreak_tie_fns[(\"div\",3,12)] = fd212\n\ndef fd31(teams,df_ind,df_standings):\n df = df_ind[df_ind[\"Team\"].isin(teams) & df_ind[\"Opponent\"].isin(teams)]\n wlt_dict = {t: get_WLT(df[df.Team == t]) for t in teams}\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",3,1)] = fd31\n\ndef fd32(teams,df_ind,df_standings):\n wlt_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.in_div)]) for t in teams}\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",3,2)] = fd32\n\ndef fd33(teams,df_ind,df_standings):\n opps = []\n for t in teams:\n opps.append(get_opps(t,df_ind))\n common = set.intersection(*opps)\n wlt_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.Opponent.isin(common))]) for t in teams}\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",3,3)] = fd33\n\ndef fd34(teams,df_ind,df_standings):\n wlt_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.in_conf)]) for t in teams}\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"div\",3,4)] = fd34\n\ndef fc21(teams,df_ind,df_standings):\n df_head = df_ind[(df_ind[\"Team\"] == teams[0]) & (df_ind[\"Opponent\"] == teams[1])]\n if len(df_head) == 0:\n return None\n wlt_dict = {teams[0]:get_WLT(df_ind[(df_ind[\"Team\"] == teams[0]) & (df_ind[\"Opponent\"] == teams[1])])}\n wlt_dict[teams[1]] = get_WLT(df_ind[(df_ind[\"Team\"] == teams[1]) & (df_ind[\"Opponent\"] == teams[0])])\n return analyze_dict(wlt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"conf\",2,1)] = fc21\n\ndef fc23(teams,df_ind,df_standings):\n common = get_common(teams,df_ind)\n if len(common) < 4:\n return None\n common_dict = {t: get_WLT(df_ind[(df_ind.Team == t) & (df_ind.Opponent.isin(common))]) for t in teams}\n return analyze_dict(common_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"conf\",2,3)] = fc23\n\ndef fc28(teams,df_ind,df_standings):\n pt_dict = {}\n for t in teams:\n df_conf = df_ind[[\"Points_scored\",\"Points_allowed\"]][(df_ind.Team == t) & (df_ind.in_conf)].sum()\n pt_dict[t] = df_conf[\"Points_scored\"]-df_conf[\"Points_allowed\"]\n return analyze_dict(pt_dict, df_ind, df_standings)\n\nbreak_tie_fns[(\"conf\",2,8)] = fc28\n\nbreak_tie_fns[(\"conf\",2,2)] = break_tie_fns[(\"div\",2,4)]\nbreak_tie_fns[(\"conf\",2,4)] = break_tie_fns[(\"div\",2,5)]\nbreak_tie_fns[(\"conf\",2,5)] = break_tie_fns[(\"div\",2,6)]\nbreak_tie_fns[(\"conf\",2,6)] = break_tie_fns[(\"div\",2,7)]\nbreak_tie_fns[(\"conf\",2,7)] = break_tie_fns[(\"div\",2,8)]\nbreak_tie_fns[(\"conf\",2,9)] = break_tie_fns[(\"div\",2,10)]\nbreak_tie_fns[(\"conf\",2,11)] = break_tie_fns[(\"div\",2,12)]\n\ndef find_sweep(teams, df_ind, df_standings):\n for t in teams:\n others = [x for x in teams if x != t]\n outcomes = []\n temp_df = df_ind[(df_ind[\"Team\"] == t) & (df_ind[\"Opponent\"].isin(others))]\n outcomes += list(temp_df[\"Outcome\"])\n if len(outcomes) >= len(others): # make sure teams played\n if set(outcomes) == {'Win'}:\n return t\n elif set(outcomes) == {'Loss'}:\n return break_tie_conf(others,df_ind,df_standings)\n return None\n\nbreak_tie_fns[(\"conf\",3,2)] = find_sweep\nbreak_tie_fns[(\"conf\",3,3)] = break_tie_fns[(\"div\",3,4)]\nbreak_tie_fns[(\"conf\",3,4)] = break_tie_fns[(\"conf\",2,3)]\nbreak_tie_fns[(\"conf\",3,5)] = break_tie_fns[(\"div\",2,5)]\nbreak_tie_fns[(\"conf\",3,6)] = break_tie_fns[(\"div\",2,6)]\nbreak_tie_fns[(\"conf\",3,7)] = break_tie_fns[(\"div\",2,7)]\nbreak_tie_fns[(\"conf\",3,8)] = break_tie_fns[(\"div\",2,8)]\nbreak_tie_fns[(\"conf\",3,9)] = break_tie_fns[(\"conf\",2,8)]\nbreak_tie_fns[(\"conf\",3,10)] = break_tie_fns[(\"div\",2,10)]\nbreak_tie_fns[(\"conf\",3,12)] = break_tie_fns[(\"div\",2,12)]\n\ndef break_tie_div(teams,df_ind,df_standings):\n scenario = 2 if len(teams) == 2 else 3\n rules = sorted([c for (a,b,c) in break_tie_fns.keys() if a == 'div' and b == scenario])\n for c in rules:\n t = break_tie_fns[(\"div\",scenario,c)](teams,df_ind,df_standings)\n if t is not None:\n return t\n\ndef break_tie_conf(teams,df_ind,df_standings):\n scenario = 2 if len(teams) == 2 else 3\n rules = sorted([c for (a,b,c) in break_tie_fns.keys() if a == 'conf' and b == scenario])\n for c in rules:\n t = break_tie_fns[(\"conf\",scenario,c)](teams,df_ind,df_standings)\n #print((\"conf\",scenario,c))\n if t is not None:\n return t\n\n# Any numerical precision problems here?\ndef get_div_winners(df_ind,df_standings):\n winner_dict = {}\n divs = sorted(list(set(df_standings.Division)))\n for div in divs:\n df = df_standings[df_standings[\"Division\"] == div].sort_values(\"WLT\",ascending=False).copy()\n t = analyze_dict(dict(zip(df.index,df.WLT)),df_ind,df_standings)\n if t is None:\n t = break_tie_div(list(df.index),df_ind,df_standings)\n winner_dict[div] = t\n return winner_dict\n\ndef rank_div_winners(dw,df_ind,df_standings):\n playoffs = {}\n for conf in [\"AFC\",\"NFC\"]:\n playoffs[conf] = []\n teams = [dw[x] for x in dw.keys() if x[:3] == conf]\n while len(playoffs[conf]) < 3:\n df = df_standings[df_standings[\"Team\"].isin(teams)].sort_values(\"WLT\",ascending=False).copy()\n t = analyze_dict(dict(df.loc[teams,\"WLT\"]), df_ind, df_standings)\n if t is None:\n t = break_tie_conf(teams,df_ind,df_standings)\n playoffs[conf].append(t)\n try:\n teams.remove(t)\n except:\n pass\n playoffs[conf].append(teams[0])\n return playoffs\n\ndef rank_within_divs(dw,df_ind,df_standings):\n dr = {}\n for div in dw.keys():\n dr[div] = []\n df = df_standings[(df_standings[\"Division\"] == div) & ~(df_standings[\"Team\"] == dw[div])].sort_values(\"WLT\",ascending=False).copy()\n teams = list(df.index)\n while len(dr[div]) < 2:\n t = analyze_dict(dict(df.loc[teams,\"WLT\"]), df_ind, df_standings)\n if t is None:\n t = break_tie_div(teams,df_ind,df_standings)\n dr[div].append(t)\n teams.remove(t)\n dr[div].append(teams[0])\n dr[div] = [dw[div]]+dr[div]\n return dr\n\ndef make_ind(df):\n df_ind = pd.DataFrame(index=range(2*len(df)),columns=[\"Team\",\"Opponent\",\"Points_scored\",\"Points_allowed\",\"Outcome\",\"in_div\",\"in_conf\"])\n cols = list(df_ind.columns)\n inds = [cols.index(col) for col in [\"Team\",\"Opponent\",\"in_div\",\"in_conf\"]]\n df_ind.iloc[::2,inds] = df[[\"team_home\",\"team_away\",\"in_div\",\"in_conf\"]]\n df_ind.iloc[1::2,inds] = df[[\"team_away\",\"team_home\",\"in_div\",\"in_conf\"]]\n scores = df[[\"score_home\",\"score_away\"]].apply(tuple,axis=1).to_list()\n df_ind.loc[::2,[\"Points_scored\",\"Points_allowed\"]] = scores\n df_ind.loc[1::2,[\"Points_allowed\",\"Points_scored\"]] = scores\n df_ind.Outcome = df_ind.apply(get_outcome,axis=1)\n return df_ind\n\nclass Standings:\n df_standings = pd.DataFrame(index=sorted(teams,key=lambda t: div_series[t]),columns=[\"Team\",\"Wins\",\"Losses\",\"Ties\",\"Points_scored\",\"Points_allowed\",\"WLT\",\"Division\",\"Conference\"])\n df_standings.Team = df_standings.index\n df_standings.Division = df_standings.index.map(lambda t: div_series[t])\n df_standings.Conference = df_standings.index.map(lambda t: div_series[t][:3])\n standings = df_standings.copy()\n \n def __init__(self,df_scores):\n if \"schedule_playoff\" in df_scores.columns:\n df_scores = df_scores.loc[~df_scores[\"schedule_playoff\"]].copy()\n df_ind = make_ind(df_scores)\n for a,b in df_ind.groupby(\"Team\"):\n res = b.Outcome.value_counts()\n self.standings.loc[a,[\"Wins\",\"Losses\",\"Ties\",\"Points_scored\",\"Points_allowed\"]] = [res.get(\"Win\",0), res.get(\"Loss\",0), res.get(\"Tie\",0),\n b[\"Points_scored\"].sum(),b[\"Points_allowed\"].sum()]\n self.standings[\"WLT\"] = (self.standings[\"Wins\"]+0.5*self.standings[\"Ties\"])/(self.standings[\"Wins\"]+self.standings[\"Ties\"]+self.standings[\"Losses\"])\n dw_unranked = get_div_winners(df_ind,self.standings)\n dw = rank_div_winners(dw_unranked, df_ind, self.standings)\n self.div_ranks = rank_within_divs(dw_unranked,df_ind,self.standings)\n self.standings[\"Division_rank\"] = self.standings.apply(lambda x: self.div_ranks[x[\"Division\"]].index(x.name)+1, axis=1)\n self.standings = self.standings.sort_values([\"Division\", \"Division_rank\"])\n wild_cards = {}\n div_eligible = {k:self.div_ranks[k][1:] for k in self.div_ranks.keys()}\n for conf in [\"AFC\",\"NFC\"]:\n wild_cards[conf] = []\n while len(wild_cards[conf]) < 3:\n top_teams = [div_eligible[x][0] for x in div_eligible.keys() if x[:3] == conf]\n t = analyze_dict(dict(self.standings.loc[top_teams,\"WLT\"]), df_ind, self.standings)\n if t is None:\n t = break_tie_conf(top_teams, df_ind, self.standings)\n wild_cards[conf].append(t)\n try:\n div_eligible[div_series[t]].remove(t)\n except:\n pass\n self.playoffs = {conf:dw[conf] + wild_cards[conf] for conf in [\"AFC\",\"NFC\"]}\n\n"
] |
[
[
"pandas.read_csv",
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Sungyeop/Powerlaw_ML
|
[
"953061f848798409d6133f67679a2c55b8187bdf"
] |
[
"Supervised/Supervised.py"
] |
[
"import torch\nimport torchvision\nimport torch.nn.functional as F\nfrom torch import nn, optim\nfrom torchvision import transforms, datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nfrom scipy.special import expit\n\n\n\n# Training Options\n#==============================================================================================================\nData = 'MNIST' # MNIST dataset\n# Data = 'FMNIST' # Fashion-MNIST dataset\nEPOCH = 10 # Training epoch\nbatch = 100 # mini-batch size\nn1 = 70 # the number of nodes in the first hidden layer (Z1)\nn2 = 50 # the number of nodes in the second hidden layer (Z2)\nn3 = 35 # the number of nodes in the third hidden layer (Z3)\nlr = 0.005 # learning rate\nactivation = 'Sigmoid' # Sigmoid activation function\n# activation = 'ReLU' # ReLU activation function\nview = -1 # the snapshot time(epoch) of the visualization of the cluster \n # default : -1 (the last epoch) (0 <= view < EPOCH) \nepsilon = 10**(-8) # divergence regulator\nDEVICE = \"cpu\"\n#==============================================================================================================\n\n# Data Load\n#==============================================================================================================\nif Data == 'MNIST':\n trainset = datasets.MNIST(root = './.data/', train = True, download = True, transform = transforms.ToTensor())\n testset = datasets.MNIST(root = './.data/', train = False, download = True, transform = transforms.ToTensor())\nelif Data == 'FMNIST':\n trainset = datasets.FashionMNIST(root = './.data/', train = True, download = True, transform = transforms.ToTensor())\n testset = datasets.FashionMNIST(root = './.data/', train = False, download = True, transform = transforms.ToTensor()) \n\ntrain_loader = torch.utils.data.DataLoader(dataset = trainset, batch_size=batch, shuffle = True, num_workers=0)\ntest_loader = torch.utils.data.DataLoader(dataset = testset, batch_size=batch, shuffle = True, num_workers=0)\n#==============================================================================================================\n\n\nclass MLP(nn.Module):\n \n def __init__(self, n1, n2, n3):\n super(MLP, self).__init__()\n \n self.fc1 = nn.Linear(28*28,n1)\n self.fc2 = nn.Linear(n1,n2)\n self.fc3 = nn.Linear(n2,n3)\n self.fc4 = nn.Linear(n3,10)\n\n def forward(self,x):\n x = x.view(-1, 784)\n if activation == 'Sigmoid':\n Z1 = torch.sigmoid(self.fc1(x))\n Z2 = torch.sigmoid(self.fc2(Z1))\n Z3 = torch.sigmoid(self.fc3(Z2))\n elif activation == 'ReLU':\n Z1 = torch.relu(self.fc1(x))\n Z2 = torch.relu(self.fc2(Z1))\n Z3 = torch.relu(self.fc3(Z2))\n Y = self.fc4(Z3)\n return Z1, Z2, Z3, Y\n\nmodel = MLP(n1,n2,n3).to(DEVICE)\noptimizer = torch.optim.Adam(model.parameters(), lr = lr)\n\ndef evaluate(model, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(DEVICE), target.to(DEVICE)\n _, _, _, output = model(data)\n\n test_loss += F.cross_entropy(output, target,\n reduction='sum').item()\n\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n test_accuracy = 100. * correct / len(test_loader.dataset)\n return test_loss, test_accuracy\n\ndef train(model, train_loader, history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, \\\n history_W4, history_b4, history_trainloss, history_testloss): \n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(DEVICE), target.to(DEVICE)\n optimizer.zero_grad()\n _, _, _, output = model(data)\n \n W1 = model.fc1.weight.data.detach().numpy()\n b1 = model.fc1.bias.data.detach().numpy()\n W2 = model.fc2.weight.data.detach().numpy()\n b2 = model.fc2.bias.data.detach().numpy()\n W3 = model.fc3.weight.data.detach().numpy()\n b3 = model.fc3.bias.data.detach().numpy()\n W4 = model.fc4.weight.data.detach().numpy()\n b4 = model.fc4.bias.data.detach().numpy()\n \n history_W1.append(copy.deepcopy(W1))\n history_b1.append(copy.deepcopy(b1))\n history_W2.append(copy.deepcopy(W2))\n history_b2.append(copy.deepcopy(b2)) \n history_W3.append(copy.deepcopy(W3))\n history_b3.append(copy.deepcopy(b3)) \n history_W4.append(copy.deepcopy(W4))\n history_b4.append(copy.deepcopy(b4)) \n \n train_loss = F.cross_entropy(output, target)\n history_trainloss.append(train_loss.detach().numpy())\n test_data = testset.data.view(-1,784).type(torch.FloatTensor)/255.\n Y_test = testset.targets\n _, _, _, Y_test_pred = model(test_data)\n test_loss = F.cross_entropy(Y_test_pred, Y_test)\n history_testloss.append(test_loss.detach().numpy()) \n \n train_loss.backward()\n optimizer.step()\n \n return (history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, \\\n history_W4, history_b4, history_trainloss, history_testloss)\n\ndef Sigmoid(x):\n return expit(x)\n\ndef ReLU(x):\n return x * (x>0)\n\ndef FF(test, W1, b1, W2, b2, W3, b3, W4, b4):\n if activation == 'Sigmoid':\n E1 = Sigmoid(np.einsum('ij,jk->ik', test, W1.T) + b1)\n E2 = Sigmoid(np.einsum('ij,jk->ik', E1, W2.T) + b2)\n E3 = Sigmoid(np.einsum('ij,jk->ik', E2, W3.T) + b3)\n elif activation == 'ReLU':\n E1 = ReLU(np.einsum('ij,jk->ik', test, W1.T) + b1)\n E2 = ReLU(np.einsum('ij,jk->ik', E1, W2.T) + b2)\n E3 = ReLU(np.einsum('ij,jk->ik', E2, W3.T) + b3)\n Y = np.einsum('ij,jk->ik', E3, W4.T) + b4\n return E1, E2, E3, Y\n\ndef Cluster(history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, history_W4, history_b4, view):\n if view == -1:\n i = -1\n else:\n i = np.int(view*len(hisotry_W1)/batch)\n\n W1 = history_W1[i]\n b1 = history_b1[i]\n b1 = np.reshape(b1, (1,len(b1)))\n W2 = history_W2[i]\n b2 = history_b2[i]\n b2 = np.reshape(b2, (1,len(b2)))\n W3 = history_W3[i]\n b3 = history_b3[i]\n b3 = np.reshape(b3, (1,len(b3)))\n W4 = history_W4[i]\n b4 = history_b4[i]\n b4 = np.reshape(b4, (1,len(b4)))\n \n X = trainset.data.view(-1,28*28)\n X = X.type(torch.FloatTensor)/255.\n X = X.detach().numpy()\n Z1, Z2, Z3, Y = FF(X, W1, b1, W2, b2, W3, b3, W4, b4)\n\n if activation == 'Sigmoid':\n bina_Z1 = np.where(Z1 > 0.5, 1,0)\n name_Z1, count_Z1 = np.unique(bina_Z1, return_counts=True, axis=0)\n k_Z1, m_k_Z1 = np.unique(count_Z1, return_counts=True)\n\n bina_Z2 = np.where(Z2 > 0.5, 1,0)\n name_Z2, count_Z2 = np.unique(bina_Z2, return_counts=True, axis=0)\n k_Z2, m_k_Z2 = np.unique(count_Z2, return_counts=True)\n\n bina_Z3 = np.where(Z3 > 0.5, 1,0)\n name_Z3, count_Z3 = np.unique(bina_Z3, return_counts=True, axis=0)\n k_Z3, m_k_Z3 = np.unique(count_Z3, return_counts=True)\n elif activation == 'ReLU':\n bina_Z1 = np.where(Z1 > np.mean(Z1), 1,0)\n name_Z1, count_Z1 = np.unique(bina_Z1, return_counts=True, axis=0)\n k_Z1, m_k_Z1 = np.unique(count_Z1, return_counts=True)\n\n bina_Z2 = np.where(Z2 > np.mean(Z2), 1,0)\n name_Z2, count_Z2 = np.unique(bina_Z2, return_counts=True, axis=0)\n k_Z2, m_k_Z2 = np.unique(count_Z2, return_counts=True)\n\n bina_Z3 = np.where(Z3 > np.mean(Z3), 1,0)\n name_Z3, count_Z3 = np.unique(bina_Z3, return_counts=True, axis=0)\n k_Z3, m_k_Z3 = np.unique(count_Z3, return_counts=True)\n \n return (k_Z1, m_k_Z1, k_Z2, m_k_Z2, k_Z3, m_k_Z3)\n\n\ndef main():\n history_W1 = []\n history_b1 = []\n history_W2 = []\n history_b2 = []\n history_W3 = []\n history_b3 = []\n history_W4 = []\n history_b4 = []\n history_W5 = []\n history_b5 = []\n history_W6 = []\n history_b6 = []\n history_trainloss = []\n history_testloss = []\n \n print('Training Starts!')\n print('(Classification model(X-Z1-Z2-Z3-Y), Data : {}, Activation : {})'.format(Data, activation))\n \n for epoch in range(1, EPOCH + 1):\n history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, history_W4, history_b4, history_trainloss, history_testloss = \\\n train(model, train_loader, history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, history_W4, history_b4, history_trainloss, history_testloss)\n\n train_loss, train_accuracy = evaluate(model, train_loader)\n test_loss, test_accuracy = evaluate(model, test_loader)\n\n print('[{} epoch] Train accuracy: {:.2f}%, Test accuracy: {:.2f}%'.format(epoch, train_accuracy, test_accuracy))\n\n print('Training Ends!')\n\n print('Visualizing the cluster frequency of the hidden layer...')\n \n k_Z1, m_k_Z1, k_Z2, m_k_Z2, k_Z3, m_k_Z3 = \\\n Cluster(history_W1, history_b1, history_W2, history_b2, history_W3, history_b3, history_W4, history_b4, view)\n\n fig = plt.figure(figsize=(5,5))\n plt.plot(np.log(k_Z1), np.log(m_k_Z1), 'r.', label='$Z_1$')\n plt.plot(np.log(k_Z2), np.log(m_k_Z2), 'g.', label='$Z_2$')\n plt.plot(np.log(k_Z3), np.log(m_k_Z3), 'b.', label='$Z_3$')\n plt.xlabel(r'$\\log \\:k$', fontsize=13)\n plt.ylabel(r'$\\log \\:m(k)$', fontsize=13)\n plt.title('Cluster distribution', fontsize=13)\n plt.tight_layout()\n plt.legend(fontsize=12)\n plt.show()\n \n \n\n\n\n\n\n \n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.log",
"matplotlib.pyplot.tight_layout",
"scipy.special.expit",
"matplotlib.pyplot.title",
"numpy.einsum",
"matplotlib.pyplot.figure",
"numpy.unique",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.no_grad",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
neutronpy/neutronpy
|
[
"44ca74a0bef25c03397a77aafb359bb257de1fe6",
"44ca74a0bef25c03397a77aafb359bb257de1fe6",
"44ca74a0bef25c03397a77aafb359bb257de1fe6"
] |
[
"neutronpy/instrument/tools.py",
"tests/test_structure_factors.py",
"neutronpy/crystal/symmetry.py"
] |
[
"# -*- coding: utf-8 -*-\nimport inspect\nfrom numbers import Number\n\nimport numpy as np\n\nfrom ..constants import neutron_mass, hbar\nfrom ..crystal import Sample\nfrom ..energy import Energy\nfrom .exceptions import AnalyzerError, MonochromatorError, ScatteringTriangleError\n\n\nclass _Dummy(object):\n r\"\"\"Empty class for constructing empty objects monitor, guide, and detector\n \"\"\"\n\n def __init__(self, name='Dummy', **kwargs):\n self.name = name\n self.__dict__.update(kwargs)\n\n def __eq__(self, right):\n self_parent_keys = sorted(list(self.__dict__.keys()))\n right_parent_keys = sorted(list(right.__dict__.keys()))\n\n if not np.all(self_parent_keys == right_parent_keys):\n return False\n\n for key, value in self.__dict__.items():\n right_parent_val = getattr(right, key)\n if not np.all(value == right_parent_val):\n return False\n\n return True\n\n def __ne__(self, right):\n return not self.__eq__(right)\n\n def __repr__(self):\n return \"{1}({0})\".format(', '.join(\n ['{0}={1}'.format(key, getattr(self, key)) for key in self.__dict__.keys() if\n getattr(self, key, None) is not None and key != 'name']), self.name)\n\n\ndef _scalar(v1, v2, lattice):\n r\"\"\"Calculates the _scalar product of two vectors, defined by their\n fractional cell coordinates or Miller indexes.\n\n Parameters\n ----------\n v1 : array\n First input vector\n\n v2 : array\n Second input vector\n\n lattice : Sample class\n Class containing unit cell parameters\n\n Returns\n -------\n s : _scalar\n The _scalar product of the two input vectors scaled by the lattice\n parameters.\n\n Notes\n -----\n Translated from ResLib 3.4c, originally authored by A. Zheludev, 1999-2007, Oak Ridge National Laboratory\n\n \"\"\"\n\n [x1, y1, z1] = v1\n [x2, y2, z2] = v2\n\n s = x1 * x2 * lattice.a ** 2 + y1 * y2 * lattice.b ** 2 + z1 * z2 * lattice.c ** 2 + \\\n (x1 * y2 + x2 * y1) * lattice.a * lattice.b * np.cos(lattice.gamma) + \\\n (x1 * z2 + x2 * z1) * lattice.a * lattice.c * np.cos(lattice.beta) + \\\n (z1 * y2 + z2 * y1) * lattice.c * lattice.b * np.cos(lattice.alpha)\n\n return s\n\n\ndef _star(lattice):\n r\"\"\"Given lattice parametrs, calculate unit cell volume V, reciprocal\n volume Vstar, and reciprocal lattice parameters.\n\n Parameters\n ----------\n lattice : Class\n Sample class with the lattice parameters\n\n Returns\n -------\n [V, Vstar, latticestar] : [float, float, class]\n Returns the unit cell volume, reciprocal cell volume, and a Sample\n Class with reciprocal lattice parameters\n\n Notes\n -----\n Translated from ResLib 3.4c, originally authored by A. Zheludev, 1999-2007, Oak Ridge National Laboratory\n\n \"\"\"\n V = 2 * lattice.a * lattice.b * lattice.c * \\\n np.sqrt(np.sin((lattice.alpha + lattice.beta + lattice.gamma) / 2) *\n np.sin((-lattice.alpha + lattice.beta + lattice.gamma) / 2) *\n np.sin((lattice.alpha - lattice.beta + lattice.gamma) / 2) *\n np.sin((lattice.alpha + lattice.beta - lattice.gamma) / 2))\n\n Vstar = (2 * np.pi) ** 3 / V\n\n latticestar = Sample(0, 0, 0, 0, 0, 0)\n latticestar.a = 2 * np.pi * lattice.b * lattice.c * np.sin(lattice.alpha) / V\n latticestar.b = 2 * np.pi * lattice.a * lattice.c * np.sin(lattice.beta) / V\n latticestar.c = 2 * np.pi * lattice.b * lattice.a * np.sin(lattice.gamma) / V\n latticestar.alpha = np.arccos((np.cos(lattice.beta) * np.cos(lattice.gamma) -\n np.cos(lattice.alpha)) / (np.sin(lattice.beta) * np.sin(lattice.gamma)))\n latticestar.beta = np.arccos((np.cos(lattice.alpha) * np.cos(lattice.gamma) -\n np.cos(lattice.beta)) / (np.sin(lattice.alpha) * np.sin(lattice.gamma)))\n latticestar.gamma = np.arccos((np.cos(lattice.alpha) * np.cos(lattice.beta) -\n np.cos(lattice.gamma)) / (np.sin(lattice.alpha) * np.sin(lattice.beta)))\n\n return [V, Vstar, latticestar]\n\n\ndef _modvec(v, lattice):\n r\"\"\"Calculates the modulus of a vector, defined by its fractional cell\n coordinates or Miller indexes.\n\n Parameters\n ----------\n v : array\n Input vector\n\n lattice : Sample class\n Class containing unit cell parameters\n\n Returns\n -------\n v : float\n Modulus of the input vector scaled by the sample lattice\n\n Notes\n -----\n Translated from ResLib 3.4c, originally authored by A. Zheludev, 1999-2007,\n Oak Ridge National Laboratory\n\n \"\"\"\n\n return np.sqrt(_scalar(v, v, lattice))\n\n\ndef GetTau(x, getlabel=False):\n u\"\"\"τ-values for common monochromator and analyzer crystals.\n\n Parameters\n ----------\n x : float or string\n Either the numerical Tau value, in Å\\ :sup:`-1`, or a\n common monochromater / analyzer type. Currently included crystals and\n their corresponding τ values are\n\n +------------------+--------------+-----------+\n | String | τ | |\n +==================+==============+===========+\n | Be(002) | 3.50702 | |\n +------------------+--------------+-----------+\n | Co0.92Fe0.08(200)| 3.54782 | (Heusler) |\n +------------------+--------------+-----------+\n | Cu(002) | 3.47714 | |\n +------------------+--------------+-----------+\n | Cu(111) | 2.99913 | |\n +------------------+--------------+-----------+\n | Cu(220) | 4.91642 | |\n +------------------+--------------+-----------+\n | Cu2MnAl(111) | 1.82810 | (Heusler) |\n +------------------+--------------+-----------+\n | Ge(111) | 1.92366 | |\n +------------------+--------------+-----------+\n | Ge(220) | 3.14131 | |\n +------------------+--------------+-----------+\n | Ge(311) | 3.68351 | |\n +------------------+--------------+-----------+\n | Ge(511) | 5.76968 | |\n +------------------+--------------+-----------+\n | Ge(533) | 7.28063 | |\n +------------------+--------------+-----------+\n | PG(002) | 1.87325 | |\n +------------------+--------------+-----------+\n | PG(004) | 3.74650 | |\n +------------------+--------------+-----------+\n | PG(110) | 5.49806 | |\n +------------------+--------------+-----------+\n | Si(111) | 2.00421 | |\n +------------------+--------------+-----------+\n\n\n getlabel : boolean\n If True, return the name of the common crystal type that is a\n match to the input τ.\n\n Returns\n -------\n tau : float or string\n Returns either the numerical τ for a given crystal type or the\n name of a crystal type\n\n Notes\n -----\n Tau is defined as :math:`\\\\tau = 2\\\\pi/d`, where d is the d-spacing of the\n crystal in Angstroms.\n\n Translated from ResLib 3.4c, originally authored by A. Zheludev, 1999-2007,\n Oak Ridge National Laboratory\n\n \"\"\"\n choices = {'pg(002)'.lower(): 1.87325,\n 'pg(004)'.lower(): 3.74650,\n 'ge(111)'.lower(): 1.92366,\n 'ge(220)'.lower(): 3.14131,\n 'ge(311)'.lower(): 3.68351,\n 'be(002)'.lower(): 3.50702,\n 'pg(110)'.lower(): 5.49806,\n 'Cu2MnAl(111)'.lower(): 2 * np.pi / 3.437,\n 'Co0.92Fe0.08(200)'.lower(): 2 * np.pi / 1.771,\n 'Ge(511)'.lower(): 2 * np.pi / 1.089,\n 'Ge(533)'.lower(): 2 * np.pi / 0.863,\n 'Si(111)'.lower(): 2 * np.pi / 3.135,\n 'Cu(111)'.lower(): 2 * np.pi / 2.087,\n 'Cu(002)'.lower(): 2 * np.pi / 1.807,\n 'Cu(220)'.lower(): 2 * np.pi / 1.278,\n 'Cu(111)'.lower(): 2 * np.pi / 2.095}\n\n if getlabel:\n # return the index/label of the closest monochromator\n choices_ = dict((key, np.abs(value - x)) for (key, value) in choices.items())\n index = min(choices_, key=choices_.get)\n if np.abs(choices_[index]) < 5e-4:\n return index # the label\n else:\n return ''\n elif isinstance(x, (int, float)):\n return x\n else:\n try:\n return choices[x.lower()]\n except KeyError:\n calling_class = repr(inspect.stack()[1][0].f_locals[\"self\"])\n if calling_class.startswith('Monochromator'):\n raise MonochromatorError(\"Invalid Monochromator crystal type: {0}\".format(repr(x)))\n elif calling_class.startswith('Analyzer'):\n raise AnalyzerError(\"Invalid Analyzer crystal type: {0}\".format(repr(x)))\n\n\ndef _CleanArgs(*varargin):\n r\"\"\"Reshapes input arguments to be row-vectors. N is the length of the\n longest input argument. If any input arguments are shorter than N, their\n first values are replicated to produce vectors of length N. In any case,\n output arguments are row-vectors of length N.\n\n Parameters\n ----------\n varargin : tuple\n Converts arrays into formats appropriate for the calculation and\n extends arrays that are too short\n\n Returns\n -------\n [length, varargout] : [int, tuple]\n Returns the length of the input vectors and a tuple containing the\n cleaned vectors\n\n Notes\n -----\n Translated from ResLib 3.4c, originally authored by A. Zheludev, 1999-2007,\n Oak Ridge National Laboratory\n\n \"\"\"\n varargout = []\n lengths = np.array([], dtype=np.int32)\n for arg in varargin:\n if not isinstance(arg, list) and not isinstance(arg, np.ndarray):\n arg = [arg]\n varargout.append(np.array(arg))\n lengths = np.concatenate((lengths, [len(arg)]))\n\n length = max(lengths)\n bad = np.where(lengths < length)\n if len(bad[0]) > 0:\n for i in bad[0]:\n varargout[i] = np.concatenate((varargout[i], [varargout[i][-1]] * int(length - lengths[i])))\n lengths[i] = len(varargout[i])\n\n if len(np.where(lengths < length)[0]) > 0:\n raise ValueError('All inputs must have the same lengths: inputs had lengths {0}'.format(lengths))\n\n return [length] + varargout\n\n\ndef _voigt(x, a):\n def _approx1(t):\n return (t * 0.5641896) / (0.5 + t ** 2)\n\n def _approx2(t, u):\n return (t * (1.410474 + u * 0.5641896)) / (0.75 + (u * (3. + u)))\n\n def _approx3(t):\n return (16.4955 + t *\n (20.20933 + t *\n (11.96482 + t *\n (3.778987 + 0.5642236 * t)))) / (16.4955 + t *\n (38.82363 + t *\n (39.27121 + t *\n (21.69274 + t *\n (6.699398 + t)))))\n\n def _approx4(t, u):\n return (t * (36183.31 - u *\n (3321.99 - u *\n (1540.787 - u *\n (219.031 - u *\n (35.7668 - u *\n (1.320522 - u *\n 0.56419)))))) / (32066.6 - u *\n (24322.8 - u *\n (9022.23 - u *\n (2186.18 - u *\n (364.219 - u *\n (61.5704 - u *\n (1.84144 - u))))))))\n\n nx = x.size\n if len(a) == 1:\n a = np.ones(nx, dtype=np.complex64) * a\n y = np.zeros(nx, dtype=np.complex64)\n\n t = a - 1j * x\n ax = np.abs(x)\n s = ax + a\n u = t ** 2\n\n good = np.where(a == 0)\n y[good] = np.exp(-x[good] ** 2)\n\n good = np.where((a >= 15) | (s >= 15))\n y[good] = _approx1(t[good])\n\n good = np.where((s < 15) & (a < 15) & (a >= 5.5))\n y[good] = _approx2(t[good], u[good])\n\n good = np.where((s < 15) & (s >= 5.5) & (a < 5.5))\n y[good] = _approx2(t[good], u[good])\n\n good = np.where((s < 5.5) & (a < 5.5) & (a >= 0.75))\n y[good] = _approx3(t[good])\n\n good = np.where((s < 5.5) & (a >= 0.195 * ax - 0.176) & (a < 0.75))\n y[good] = _approx3(t[good])\n\n good = np.where((~((s < 5.5) & (a >= 0.195 * ax - 0.176))) & (a < 0.75))\n y[good] = np.exp(u[good]) - _approx4(t[good], u[good])\n\n y = np.real(y)\n return y\n\n\ndef project_into_plane(index, r0, rm):\n r\"\"\"Projects out-of-plane resolution into a specified plane by performing\n a gaussian integral over the third axis.\n\n Parameters\n ----------\n index : int\n Index of the axis that should be integrated out\n\n r0 : float\n Resolution prefactor\n\n rm : ndarray\n Resolution array\n\n Returns\n -------\n mp : ndarray\n Resolution matrix in a specified plane\n\n \"\"\"\n\n r = np.sqrt(2 * np.pi / rm[index, index]) * r0\n mp = rm\n\n b = rm[:, index] + rm[index, :].T\n b = np.delete(b, index, 0)\n\n mp = np.delete(mp, index, 0)\n mp = np.delete(mp, index, 1)\n\n mp -= 1 / (4. * rm[index, index]) * np.outer(b, b.T)\n\n return [r, mp]\n\n\ndef ellipse(saxis1, saxis2, phi=0, origin=None, npts=31):\n r\"\"\"Returns an ellipse.\n\n Parameters\n ----------\n saxis1 : float\n First semiaxis\n\n saxis2 : float\n Second semiaxis\n\n phi : float, optional\n Angle that semiaxes are rotated\n\n origin : list of floats, optional\n Origin position [x0, y0]\n\n npts: float, optional\n Number of points in the output arrays.\n\n Returns\n -------\n [x, y] : list of ndarray\n Two one dimensional arrays representing an ellipse\n \"\"\"\n\n if origin is None:\n origin = [0., 0.]\n\n theta = np.linspace(0., 2. * np.pi, npts)\n\n x = np.array(saxis1 * np.cos(theta) * np.cos(phi) - saxis2 * np.sin(theta) * np.sin(phi)) + origin[0]\n y = np.array(saxis1 * np.cos(theta) * np.sin(phi) + saxis2 * np.sin(theta) * np.cos(phi)) + origin[1]\n return np.vstack((x, y))\n\n\ndef get_bragg_widths(RM):\n r\"\"\"Returns the Bragg widths given a resolution matrix.\n\n Parameters\n ----------\n RM : array\n Resolution matrix, either in inverse angstroms or rlu\n\n Returns\n -------\n bragg : array\n Returns an array of bragg widths in the order [Qx, Qy, Qz, W], in the\n units given by the input matrix.\n\n \"\"\"\n bragg = np.array([np.sqrt(8 * np.log(2)) / np.sqrt(RM[0, 0]),\n np.sqrt(8 * np.log(2)) / np.sqrt(RM[1, 1]),\n np.sqrt(8 * np.log(2)) / np.sqrt(RM[2, 2]),\n get_phonon_width(0, RM, [0, 0, 0, 1])[1],\n np.sqrt(8 * np.log(2)) / np.sqrt(RM[3, 3])])\n\n return bragg * 2\n\n\ndef get_phonon_width(r0, M, C):\n T = np.diag(np.ones(4))\n T[3, :] = np.array(C)\n S = np.matrix(np.linalg.inv(T))\n MP = S.H * M * S\n [rp, MP] = project_into_plane(0, r0, MP)\n [rp, MP] = project_into_plane(0, rp, MP)\n [rp, MP] = project_into_plane(0, rp, MP)\n fwhm = np.sqrt(8 * np.log(2)) / np.sqrt(MP[0, 0])\n\n return [rp, fwhm]\n\n\ndef fproject(mat, i):\n if i == 0:\n v = 2\n j = 1\n elif i == 1:\n v = 0\n j = 2\n elif i == 2:\n v = 0\n j = 1\n else:\n raise ValueError('i={0} is an invalid value!'.format(i))\n\n [a, b, c] = mat.shape\n proj = np.zeros((2, 2, c))\n proj[0, 0, :] = mat[i, i, :] - mat[i, v, :] ** 2 / mat[v, v, :]\n proj[0, 1, :] = mat[i, j, :] - mat[i, v, :] * mat[j, v, :] / mat[v, v, :]\n proj[1, 0, :] = mat[j, i, :] - mat[j, v, :] * mat[i, v, :] / mat[v, v, :]\n proj[1, 1, :] = mat[j, j, :] - mat[j, v, :] ** 2 / mat[v, v, :]\n hwhm = proj[0, 0, :] - proj[0, 1, :] ** 2 / proj[1, 1, :]\n hwhm = np.sqrt(2. * np.log(2.)) / np.sqrt(hwhm)\n\n return hwhm\n\n\ndef calculate_projection_hwhm(MP):\n r\"\"\"\n\n Parameters\n ----------\n MP\n\n Returns\n -------\n\n \"\"\"\n theta = 0.5 * np.arctan2(2 * MP[0, 1], (MP[0, 0] - MP[1, 1]))\n S = [[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]]\n\n MP = np.matrix(S) * np.matrix(MP) * np.matrix(S).H\n\n hwhm_xp = 1.17741 / np.sqrt(MP[0, 0])\n hwhm_yp = 1.17741 / np.sqrt(MP[1, 1])\n\n return hwhm_xp, hwhm_yp, theta\n\n\ndef get_angle_ki_Q(ki, kf, Q, gonio_dir=-1, outside_scat_tri=False):\n r\"\"\"Returns the angle between ki and Q for rotation from\n `[ki, ki_perp, kz, w]` to `[q_perp, q_para, qz, w]` reference frame.\n\n Parameters\n ----------\n ki : float\n Initial wavevector in inverse angstroms\n\n kf : float\n Final wavevector in inverse angstroms\n\n Q : float\n Q position in inverse angstroms.\n\n gonio_dir : bool, optional\n If the goniometer direction is left-handed, set to -1. Default: 1\n\n outside_scat_tri : bool, optional\n Set to True if Q is outside the scattering triangle to fold back.\n Default: False\n\n Returns\n -------\n angle : float\n \"\"\"\n if Q == 0:\n angle = np.pi / 2.0\n else:\n c = (ki ** 2 - kf ** 2 + Q ** 2) / (2.0 * ki * Q)\n if abs(c) > 1.0:\n raise ScatteringTriangleError\n\n angle = np.arccos(c)\n\n if outside_scat_tri:\n angle = np.pi - angle\n\n return angle * np.sign(gonio_dir)\n\n\ndef get_kfree(W, kfixed, ki_fixed=True):\n r\"\"\"Calculates the free wavevector, either ki or kf, as specified by\n `ki_fixed`.\n\n Parameters\n ----------\n W : float\n Energy transfer\n\n kfixed : float\n Wavevector magnitude of the fixed k.\n\n ki_fixed : bool, optional\n If ki is fixed, set to True. Default: True\n\n Returns\n -------\n k : float\n Returns initial or final wavevector magnitude.\n\n \"\"\"\n kE_sq = Energy(energy=W).wavevector ** 2\n if ki_fixed:\n kE_sq = -kE_sq\n\n k_sq = kE_sq + kfixed ** 2\n\n if k_sq < 0.0:\n raise ScatteringTriangleError\n else:\n return np.sqrt(k_sq)\n\n\ndef chop(matrix, tol=1e-12):\n r\"\"\"Rounds values within `tol` of zero down(up) to zero\n\n Parameters\n ----------\n matrix : array, number\n The object to be chopped.\n\n tol : float, optional\n The tolerance under which values will be rounded. Default: 1e-12.\n\n Returns\n -------\n out : same as input\n Object with values chopped.\n\n \"\"\"\n if isinstance(matrix, tuple):\n return tuple(chop(item) for item in matrix)\n elif isinstance(matrix, list):\n return list(chop(item) for item in matrix)\n elif isinstance(matrix, (np.ndarray, np.matrixlib.defmatrix.matrix)):\n if np.iscomplexobj(matrix):\n rmat = matrix.real.copy()\n rmat[np.abs(rmat) < tol] = 0.0\n\n imat = matrix.imag.copy()\n imat[np.abs(imat) < tol] = 0.0\n\n return rmat + 1j * imat\n else:\n matrix[np.abs(matrix) < tol] = 0.0\n return matrix\n elif isinstance(matrix, Number):\n if np.iscomplex(matrix):\n real = matrix.real\n imag = matrix.imag\n if real < tol:\n real = 0.0\n if imag < tol:\n imag = 0.0\n return real + imag * 1j\n else:\n if matrix < tol:\n return 0.0\n else:\n return matrix\n else:\n print(type(matrix))\n return matrix\n",
"# -*- coding: utf-8 -*-\nr\"\"\"Test structure factor calculations\n\n\"\"\"\nimport numpy as np\nimport pytest\nfrom matplotlib import use\n\nuse('Agg')\n\nfrom mock import patch\nfrom neutronpy import Material\nfrom neutronpy.crystal.structure_factors import MagneticFormFactor\n\n\ninput = {'name': 'FeTe',\n 'composition': [{'ion': 'Fe', 'pos': [0.75, 0.25, 0.]},\n {'ion': 'Fe', 'pos': [1. - 0.75, 1. - 0.25, 0.0]},\n {'ion': 'Te', 'pos': [0.25, 0.25, 1. - 0.2839]},\n {'ion': 'Te', 'pos': [1. - 0.25, 1. - 0.25, 1. - (1. - 0.2839)]}],\n 'debye-waller': True,\n 'massNorm': True,\n 'formulaUnits': 1.,\n 'lattice': dict(abc=[3.81, 3.81, 6.25], abg=[90, 90, 90])}\n\n\ndef test_str_fac():\n \"\"\"Tests various positions for structure factor\n \"\"\"\n structure = Material(input)\n assert (np.abs(structure.calc_nuc_str_fac((2., 0., 0.))) ** 2 - 1702170.4663405998 < 1e-6)\n assert (np.abs(structure.calc_nuc_str_fac((2, 0, 0))) ** 2 - 1702170.4663405998 < 1e-6)\n assert (np.abs(structure.calc_nuc_str_fac((0, 2., 0))) ** 2 - 1702170.4663405998 < 1e-6)\n assert (np.abs(structure.calc_nuc_str_fac((0, 2, 0))) ** 2 - 1702170.4663405998 < 1e-6)\n\n ndarray_example = np.linspace(0.5, 1.5, 21)\n assert (np.sum(abs(structure.calc_nuc_str_fac((ndarray_example, 0, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, ndarray_example, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, 0, ndarray_example))) ** 2) - 16831011.814390473 < 1e-6)\n assert (\n np.sum(abs(structure.calc_nuc_str_fac((ndarray_example, ndarray_example, 0))) ** 2) - 10616602.544519115 < 1e-6)\n\n list_example = list(ndarray_example)\n assert (np.sum(abs(structure.calc_nuc_str_fac((list_example, 0, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, list_example, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, 0, list_example))) ** 2) - 16831011.814390473 < 1e-6)\n\n tuple_example = tuple(ndarray_example)\n assert (np.sum(abs(structure.calc_nuc_str_fac((tuple_example, 0, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, tuple_example, 0))) ** 2) - 7058726.6759794801 < 1e-6)\n assert (np.sum(abs(structure.calc_nuc_str_fac((0, 0, tuple_example))) ** 2) - 16831011.814390473 < 1e-6)\n\n\ndef test_N_atoms():\n \"\"\"Tests number of atoms in X g of material\n \"\"\"\n structure = Material(input)\n assert (structure.N_atoms(22) == 36110850351331465494528)\n\n\ndef test_volume():\n \"\"\"Tests volume of unitcell\n \"\"\"\n structure = Material(input)\n assert (structure.volume == 90.725624999999965)\n\n\ndef test_total_scattering_cross_section():\n \"\"\"Tests scattering cross section\n \"\"\"\n structure = Material(input)\n assert (structure.total_scattering_cross_section == 31.880000000000003)\n\n\ndef test_case():\n \"\"\"Test formulaUnits\n \"\"\"\n input_test = input\n del input_test['formulaUnits']\n structure = Material(input_test)\n del structure\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_plot(mock_show):\n \"\"\"Test unitcell plot\n \"\"\"\n structure = Material(input)\n structure.plot_unit_cell()\n\n\ndef test_optimal_thickness():\n \"\"\"Test optimal thickness calculation\n \"\"\"\n structure = Material(input)\n assert (structure.calc_optimal_thickness() == 1.9552936422413782)\n\n\ndef test_mag_form_fac():\n \"\"\"Tests the magnetic form factor single value\n \"\"\"\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(q=1.)[0], ion.calc_mag_form_fac(q=1.)[1:]\n del _temp\n assert (abs(formfac - 0.932565) < 1e-6)\n\n\ndef test_mag_form_fac_case1():\n \"\"\"Tests the magnetic form factor no q given\n \"\"\"\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac()[0], ion.calc_mag_form_fac()[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)\n\n\ndef test_mag_form_fac_case2():\n \"\"\"Tests the magnetic form factor q range provided\n \"\"\"\n ion = MagneticFormFactor('Fe')\n formfac, _temp = ion.calc_mag_form_fac(qrange=[0, 2])[0], ion.calc_mag_form_fac(qrange=[0, 2])[1:]\n del _temp\n assert (abs(np.sum(formfac) - 74.155233575216599) < 1e-12)\n\n\nif __name__ == \"__main__\":\n pytest.main()\n",
"# -*- coding: utf-8 -*-\nr\"\"\"Symmetry operations\n\n\"\"\"\nimport numpy as np\n\nfrom ..constants import symmetry\n\nspace_groups = symmetry()['space_groups']\n\n\nclass SpaceGroup(object):\n r\"\"\"Class defining a space group of a crystal\n\n Attributes\n ----------\n full_name\n generators\n group_number\n hm_symbol\n lattice_type\n string_generators\n symbol\n symmetry_operations\n total_operations\n\n Methods\n -------\n symmetrize_position\n\n \"\"\"\n def __init__(self, symbol='P1'):\n if isinstance(symbol, int):\n for key, value in space_groups.items():\n if value['number'] == symbol:\n self._symbol = key\n elif isinstance(symbol, str):\n if symbol in space_groups:\n self._symbol = symbol\n else:\n for key, value in space_groups.items():\n if value['hermann-manguin_symbol'] == symbol or value['full_name'] == symbol:\n self._symbol = key\n else:\n raise KeyError('{0} is not a valid International symbol, Hermann–Mauguin symbol, or space group number'.format(symbol))\n\n self.point_group = space_groups[self.symbol]['point_group']\n self.full_name = space_groups[self.symbol]['full_name']\n self._generators_str = space_groups[self.symbol]['generators']\n self.lattice_type = space_groups[self.symbol]['type']\n self.group_number = space_groups[self.symbol]['number']\n self.hm_symbol = space_groups[self.symbol]['hermann-manguin_symbol']\n self._generators_mat = get_generator_from_str(self._generators_str)\n self.total_operations = space_groups[self.symbol]['total_operations']\n self.symmetry_operations = self._symmetry_operations_from_generators()\n\n def __repr__(self):\n return \"SpaceGroup({0})\".format(self.group_number)\n\n @property\n def symbol(self):\n r\"\"\"Space group symbol\n \"\"\"\n return self._symbol\n\n @symbol.setter\n def symbol(self, symbol):\n self.__init__(symbol)\n\n @property\n def string_generators(self):\n r\"\"\"Space group generators\n \"\"\"\n return self._generators_str\n\n @property\n def generators(self):\n r\"\"\"Space group generators in matrix format\n \"\"\"\n return self._generators_mat\n\n def _symmetry_operations_from_generators(self):\n symm_ops, new_ops = [np.copy(self.generators)] * 2\n while len(new_ops) > 0 and len(symm_ops) < self.total_operations:\n gen_ops = []\n for g in new_ops:\n test_ops = np.einsum('ijk,kl', symm_ops, g)\n for op in test_ops:\n op[:3, 3] = np.mod(get_translation(op), 1)\n op[np.where(np.abs(1 - get_translation(op)) < 1e-15), 3] = 0\n if not (np.abs(symm_ops - op) < 1e-15).all(axis=(1, 2)).any():\n gen_ops.append(op)\n symm_ops = np.append(symm_ops, [op], axis=0)\n new_ops = gen_ops\n assert len(symm_ops) == self.total_operations\n return symm_ops\n\n def symmetrize_position(self, vector):\n r\"\"\"Applies symmetry operations to a vector\n\n \"\"\"\n positions = []\n for op in self.symmetry_operations:\n positions.append(np.dot(get_rotation(op), np.array(vector)) + get_translation(op))\n\n return positions\n\n\ndef get_formatted_operations(operations):\n r\"\"\"Returns operations formatted in a list for easy parsing\n\n Parameters\n ----------\n operations\n\n Returns\n -------\n\n \"\"\"\n if (isinstance(operations, list) and isinstance(operations[0], str)) or isinstance(operations, str):\n operations = get_generator_from_str(operations)\n\n if isinstance(operations, np.ndarray):\n operations = [operations]\n\n return operations\n\n\ndef get_rotation(operations):\n r\"\"\"Returns rotational part of operator\n\n \"\"\"\n rotations = []\n for operation in get_formatted_operations(operations):\n rotations.append(operation[:3, :3])\n\n if len(rotations) == 1:\n rotations = rotations[0]\n\n return rotations\n\n\ndef get_translation(operations):\n r\"\"\"Returns rotational part of operator\n\n \"\"\"\n translations = []\n for operation in get_formatted_operations(operations):\n translations.append(operation[:3, 3])\n\n if len(translations) == 1:\n translations = translations[0]\n\n return translations\n\n\ndef get_generator_from_str(operations):\n r\"\"\"Returns generator arrays\n\n Returns\n -------\n operators : list of ndarrays\n List of operation arrays with shape (3,4)\n\n \"\"\"\n if isinstance(operations, str):\n operations = [operations]\n\n operators = []\n for gen in operations:\n components = gen.split(',')\n if len(components) > 3:\n raise ValueError('Generator string {0} is in wrong format'.format(gen))\n\n rotation = np.zeros((3, 3))\n translation = np.zeros(3)\n for i, comp in enumerate(components):\n elements = comp.split('+')\n if len(elements) > 1:\n translation[i] = eval(elements[-1].replace('/', './'))\n\n if '-x' in elements[0]:\n rotation[i, 0] = -1\n elif 'x' in elements[0]:\n rotation[i, 0] = 1\n\n if '-y' in elements[0]:\n rotation[i, 1] = -1\n elif 'y' in elements[0]:\n rotation[i, 1] = 1\n\n if '-z' in elements[0]:\n rotation[i, 2] = -1\n elif 'z' in elements[0]:\n rotation[i, 2] = 1\n\n out = np.zeros((4, 4), dtype=float)\n out[0:3, 0:3] = rotation\n out[0:3, 3] = translation\n out[3, 3] = 1.\n\n operators.append(out)\n\n if len(operators) == 1:\n operators = operators\n\n return operators\n\n\ndef get_str_from_generator(operations):\n r\"\"\"Returns strings of generators from arrays\n\n Parameters\n ----------\n operations : str, array, list\n\n Returns\n -------\n generators : list of str\n List of generator strings\n\n \"\"\"\n if isinstance(operations, np.ndarray) and len(operations.shape) < 3:\n operations = [operations]\n\n syms = ['x', 'y', 'z']\n signs = {-1: '-', 1: '+', 0: ''}\n generators = []\n for operation in operations:\n line = []\n for row in operation[:3, :]:\n element = ''\n for col, sym in zip(row[:3], syms):\n element += signs[int(col)] + np.abs(int(col)) * sym\n\n if row[3] == 0:\n translate = ''\n elif np.round(1. / row[3], 1) == 1.5:\n translate = '2/3'\n elif row[3] == 0.75:\n translate = '3/4'\n elif np.round(row[3], 3) == 0.833:\n translate = '5/6'\n else:\n denominator = int(np.round(1. / row[3]))\n translate = '1/{0}'.format(denominator)\n\n if translate != '':\n element += '+{0}'.format(translate)\n\n if len(element) >= 1 and element[0] == '+':\n element = element[1:]\n\n line.append(element)\n\n generators.append(','.join(line))\n\n return generators\n"
] |
[
[
"numpy.matrix",
"numpy.sqrt",
"numpy.linspace",
"numpy.arctan2",
"numpy.all",
"numpy.iscomplexobj",
"numpy.exp",
"numpy.where",
"numpy.sin",
"numpy.real",
"numpy.outer",
"numpy.zeros",
"numpy.log",
"numpy.linalg.inv",
"numpy.arccos",
"numpy.delete",
"numpy.iscomplex",
"numpy.array",
"numpy.abs",
"numpy.cos",
"numpy.ones",
"numpy.sign",
"numpy.vstack"
],
[
"matplotlib.use",
"numpy.sum",
"numpy.linspace"
],
[
"numpy.abs",
"numpy.einsum",
"numpy.round",
"numpy.copy",
"numpy.append",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kapousa/ProjXWebsiteProject_Jul2021
|
[
"52c0565575b878175f150a1cb42b4f8b376e4e62"
] |
[
"mylib/controllers/ModelController.py"
] |
[
"import os\nimport pickle\nimport random\n\nfrom datetime import datetime\n\nfrom app import db\nimport numpy as np\nfrom random import randint\nfrom matplotlib import pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report\nimport pandas as pd\nimport seaborn as sns\n\nfrom app.base.db_models import ModelAPIMethods\n\nfrom app.base.db_models.ModelProfile import ModelProfile\nfrom mylib.core.ModelProcessor import ModelProcessor\nfrom mylib.data_manipulation.AdjustDataFrame import remove_null_values, encode_data_frame, encode_prediction_data_frame, \\\n decode_predicted_values, deletemodelfiles\nfrom mylib.utiles.CVSReader import getcvsheader, get_new_headers_list, reorder_csv_file\nfrom mylib.utiles.CVSReader import get_only_file_name\nfrom mylib.db_helper.AttributesHelper import add_features, add_labels, delete_encoded_columns, get_model_id, \\\n encode_testing_features_values, get_features, get_labels, get_encoded_columns, add_api_details, \\\n update_api_details_id\nimport os\nimport pickle\nfrom random import randint\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import StandardScaler\n\nfrom app import db\nfrom app.base.db_models.ModelProfile import ModelProfile\nfrom app.base.db_models.ModelLabels import ModelLabels\nfrom app.base.db_models.ModelEncodedColumns import ModelEncodedColumns\nfrom app.base.db_models.ModelFeatures import ModelFeatures\nfrom app.base.db_models.ModelAPIDetails import ModelAPIDetails\nfrom mylib.data_manipulation.AdjustDataFrame import remove_null_values\nfrom mylib.db_helper.AttributesHelper import add_features, add_labels, delete_encoded_columns, get_model_id, \\\n encode_testing_features_values, get_features\nfrom mylib.utiles.CVSReader import get_only_file_name\nfrom mylib.utiles.CVSReader import getcvsheader, get_new_headers_list, reorder_csv_file\n\n\nclass ModelController:\n\n def __init__(self):\n ''' Constructor for this class. '''\n # Create some member animals\n self.members = ['Tiger', 'Elephant', 'Wild Cat']\n\n\ndef saveDSFile(self):\n return 'file uploaded successfully'\n\n\npkls_location = 'pkls/'\nscalars_location = 'scalars/'\ndf_location = 'data/'\nimage_location = 'app/'\nroot_path = '../app/'\n\n\ndef run_model(root_path, csv_file_location, predicted_columns):\n # ------------------Preparing data frame-------------------------#\n cvs_header = getcvsheader(csv_file_location)\n new_headers_list = get_new_headers_list(cvs_header, predicted_columns)\n reordered_data = reorder_csv_file(csv_file_location, new_headers_list)\n data = pd.read_csv(csv_file_location)\n model_id = randint(0, 10)\n\n # Determine features and lables\n features_last_index = len(new_headers_list) - (len(predicted_columns))\n model_features = new_headers_list[0:features_last_index]\n model_labels = predicted_columns\n\n # 1-Clean the data frame\n data = remove_null_values(data)\n\n # 2- Encode the data frame\n deleteencodedcolumns = delete_encoded_columns()\n\n data_column_count = len(data.columns)\n testing_values_len = data_column_count - len(predicted_columns)\n\n # take slice from the dataset, all rows, and cloumns from 0:8\n features_df = data[model_features]\n encoded_features = encode_data_frame(model_id, features_df, 'F')\n real_x = encoded_features.iloc[:, :].values # 2 param (test vales)\n # real_x = data.iloc[:, 0:testing_values_len].values # 2 param (test vales)\n labels_df = data[model_labels]\n encoded_labels = encode_data_frame(model_id, labels_df, 'L')\n real_y = encoded_labels.iloc[:, :].values # (predict values)\n # real_y = data.iloc[:, testing_values_len:data_column_count].values # (predict values)\n\n # Select proper model\n cls = ModelProcessor.modelselector(data, encoded_features, encoded_labels)\n\n training_x, testing_x, training_y, testing_y = train_test_split(real_x, real_y, test_size=0.25, random_state=0)\n s_c = StandardScaler() # test\n training_x = s_c.fit_transform(training_x)\n\n test_x = s_c.transform(testing_x)\n file_name = get_only_file_name(csv_file_location)\n scalar_file_name = scalars_location + file_name + '_scalear.sav'\n pickle.dump(s_c, open(scalar_file_name, 'wb'))\n #cls = MultiOutputClassifier(KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2),n_jobs=-1) # KNeighborsRegressor)\n cls.fit(training_x, training_y)\n\n model_file_name = pkls_location + file_name + '_model.pkl'\n pickle.dump(cls, open(model_file_name, 'wb'))\n # y_pred = cls.predict([test_x[0]])\n y_pred = cls.predict(test_x)\n # Evaluating the Algorithm\n Mean_Absolute_Error = metrics.mean_absolute_error(testing_y, y_pred)\n Mean_Squared_Error = metrics.mean_squared_error(testing_y, y_pred)\n Root_Mean_Squared_Error = np.sqrt(metrics.mean_squared_error(testing_y, y_pred))\n # print(metrics.confusion_matrix(testing_y, y_pred))\n # print(metrics.classification_report(testing_y, y_pred))\n # Predicting values\n # testing_y_argmax = testing_y.argmax(axis=1)\n # y_pred_argmax = y_pred.argmax(axis=1)\n # testing_y_argmin = testing_y.argmin(axis=1)\n # y_pred_argmin = y_pred.argmin(axis=1)\n # c_m = MultiOutputClassifier(confusion_matrix(testing_y, y_pred), n_jobs=-1)\n c_m = ''\n # acc = accuracy_score(testing_y, y_pred) * 100\n # clf_report = classification_report(testing_y, y_pred, output_dict=True)\n # print(acc)\n acc = cls.score(training_x, training_y) * 100\n\n # Delete old visualization images\n dir = os.path.join(root_path, 'static/images/plots/')\n for f in os.listdir(dir):\n os.remove(os.path.join(dir, f))\n\n # Show prediction\n plt.scatter(testing_y, y_pred, color=\"red\")\n x = [np.min(testing_y), np.max(testing_y)]\n y = [np.min(y_pred), np.max(y_pred)]\n plt.plot(x, y, color=\"#52b920\", label='Regression Line')\n plt.title(\"Testing model vs Prediction values\")\n plt.xlabel(\"Tested values\")\n plt.ylabel(\"Predicted values\")\n plot_image = os.path.join(root_path, 'static/images/plots/', get_only_file_name(csv_file_location) + '_plot.png')\n plot_image_path = os.path.join('/images/plots/', get_only_file_name(csv_file_location) + '_plot.png')\n plt.savefig(plot_image, dpi=300, bbox_inches='tight')\n plt.show()\n\n # plt.figure(figsize=(10, 10))\n # sns.heatmap(data.corr(), annot=True, linewidths=0.5, cmap=\"crest\")\n # plt.savefig(plot_image, dpi=300, bbox_inches='tight')\n # plt.show()\n\n # ------------------Predict values from the model-------------------------#\n # model = pickle.load(open(model_file_name, 'rb'))\n # testing_values = np.reshape(predicted_columns, (1, testing_values_len)) #to be deleted\n # predicted_values = model.predict([test_x[0]]) # to be deleted\n # all_return_values = {'accuracy': acc, 'confusion_matrix': c_m, 'predicted_values': predicted_values}\n now = datetime.now()\n all_return_values = {'accuracy': round(acc, 2), 'confusion_matrix': c_m, 'plot_image_path': plot_image_path,\n 'Mean_Absolute_Error': round(Mean_Absolute_Error, 2),\n 'Mean_Squared_Error': round(Mean_Squared_Error, 2),\n 'Root_Mean_Squared_Error': round(Root_Mean_Squared_Error, 2),\n 'created_on': now.strftime(\"%d/%m/%Y %H:%M:%S\"),\n 'updated_on': now.strftime(\"%d/%m/%Y %H:%M:%S\"),\n 'last_run_time': now.strftime(\"%d/%m/%Y %H:%M:%S\")}\n # print(\"Predicted Value:\", predicted_values)\n\n # Add model profile to the database\n modelmodel = {'model_id': model_id,\n 'model_name': file_name,\n 'user_id': 1,\n 'model_headers': str(cvs_header)[1:-1],\n 'prediction_results_accuracy': str(acc),\n 'mean_absolute_error': str(Mean_Absolute_Error),\n 'mean_squared_error': str(Mean_Squared_Error),\n 'root_mean_squared_error': str(Root_Mean_Squared_Error),\n 'plot_image_path': plot_image_path,\n 'created_on': now.strftime(\"%d/%m/%Y %H:%M:%S\"),\n 'updated_on': now.strftime(\"%d/%m/%Y %H:%M:%S\"),\n 'last_run_time': now.strftime(\"%d/%m/%Y %H:%M:%S\")}\n model_model = ModelProfile(**modelmodel)\n # Delete current profile\n model_model.query.filter().delete()\n db.session.commit()\n # Add new profile\n db.session.add(model_model)\n db.session.commit()\n\n # Add features, labels, and APIs details\n add_features_list = add_features(model_id, model_features)\n add_labels_list = add_labels(model_id, model_labels)\n api_details_id = random.randint(0, 22)\n api_details_list = add_api_details(model_id, api_details_id, 'v1')\n api_details_list = update_api_details_id(api_details_id)\n db.session.commit()\n\n return all_return_values\n\n\ndef predict_values_from_model(model_file_name, testing_values):\n # ------------------Predict values from the model-------------------------#\n # model = pickle.load(open(pkls_location + model_file_name + '_model.pkl', 'rb'))\n model = pickle.load(open(pkls_location + model_file_name + '_model.pkl', 'rb'))\n\n # Encode the testing values\n model_id = get_model_id()\n features_list = get_features()\n labels_list = get_labels()\n encoded_labels_list = get_encoded_columns('L')\n\n testing_values_dic = {}\n for i in range(len(features_list)):\n testing_values_dic[features_list[i]] = testing_values[i]\n reshaped_testing_values = np.reshape(testing_values, (1, len(testing_values)))\n reshaped_testing_values = reshaped_testing_values.flatten()\n encoded_testing_values = [encode_prediction_data_frame(reshaped_testing_values, 'F')]\n df_testing_values = pd.DataFrame(encoded_testing_values)\n # df_testing_values = pd.DataFrame(reshaped_testing_values) #Unencoded values\n # encoded_df_testing_values = encode_testing_features_values(model_id, testing_values_dic)\n # print(encoded_df_testing_values)\n predicted_values = model.predict(encoded_testing_values)\n predicted_values = predicted_values.flatten()\n print(predicted_values)\n decoded_predicted_values = decode_predicted_values(model_id, predicted_values, labels_list, encoded_labels_list)\n return decoded_predicted_values\n\n\ndef predict_values(csv_file_location, testing_values):\n file_name = get_only_file_name(csv_file_location)\n data = pd.read_csv(csv_file_location)\n data_column_count = len(data.columns)\n testing_values_len = data_column_count - 1\n\n model_file_name = pkls_location + file_name + '_model.pkl'\n model = pickle.load(open(model_file_name, 'rb'))\n testing_values = np.reshape(testing_values, (1, testing_values_len))\n testing_values = encode_testing_features_values(testing_values)\n predicted_values = model.predict(testing_values)\n\n return predicted_values\n\n\n# Return all model information\ndef get_model_status():\n try:\n model_profile_row = ModelProfile.query.all()\n model_profile = {}\n\n for profile in model_profile_row:\n model_profile = {'model_id': profile.model_id,\n 'model_name': profile.model_name,\n 'prediction_results_accuracy': str(round(float(profile.prediction_results_accuracy), 2)),\n 'mean_absolute_error': str(round(float(profile.mean_absolute_error), 2)),\n 'mean_squared_error': str(round(float(profile.mean_squared_error), 2)),\n 'root_mean_squared_error': str(round(float(profile.root_mean_squared_error), 2)),\n 'plot_image_path': profile.plot_image_path,\n 'created_on': profile.created_on,\n 'updated_on': profile.updated_on,\n 'last_run_time': profile.last_run_time}\n print(model_profile)\n return model_profile\n except Exception as e:\n print('Ohh -get_model_status...Something went wrong.')\n print(e)\n return 0\n\n\ndef delet_model():\n try:\n ModelEncodedColumns.query.filter().delete()\n ModelFeatures.query.filter().delete()\n ModelLabels.query.filter().delete()\n ModelAPIDetails.query.filter().delete()\n ModelProfile.query.filter().delete()\n db.session.commit()\n\n # Delete old model files\n delete_model_files = deletemodelfiles(scalars_location, pkls_location, df_location)\n\n return 1\n except Exception as e:\n print('Ohh -delet_models...Something went wrong.')\n print(e)\n return 0\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.reshape",
"numpy.min",
"sklearn.metrics.mean_absolute_error",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"sklearn.metrics.mean_squared_error",
"matplotlib.pyplot.plot",
"pandas.DataFrame",
"numpy.max",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
oxquantum/CVAE
|
[
"0352ddc51fbfd8d57b155e6de66b4c34e010beac"
] |
[
"_main_decision/_measurement_iteration/iteration.py"
] |
[
"import time\n\nimport numpy as np\n\nfrom ._helper_functions import estimate_error, get_observation_mask, update_log_prob_mask\n\n\ndef iteration(con, obs_mask, obs_data_mask, obs_data, likelihood, resol_ctrl, acq,\n log_posterior, recon, log_weight_samples, num_obs, options):\n prob = np.exp(log_posterior)\n\n max_idx = np.argmax(prob)\n x_best_guess = recon[max_idx]\n\n if num_obs >= options.DOE_options.max_obs:\n print('break')\n # break\n\n # calculate an acquisition map\n t0 = time.time()\n score = acq.get_score(obs_data_mask, recon, log_weight_samples, 0.0) # basic\n\n # code for decision resolution ###\n # measure the error on the current decision grid\n est_err = estimate_error(recon, obs_mask, mask_valid=resol_ctrl.mask_valid, log_prob=log_weight_samples)\n if est_err < 0.05: # error thresold for increasing resolution\n resol_ctrl.increase()\n # count the number of unobserved locations on the current decision resolution\n num_unobs_decision = resol_ctrl.get_num_unobs(obs_mask)\n\n # choose next measurement\n if options.DOE_options.batch_mode is False:\n # pointwise selection\n num_obs_next = 1\n else:\n # batch selection\n batch_decision_idxs = options.DOE_options.mcmc_idxs\n next_point = batch_decision_idxs[batch_decision_idxs > num_obs].min()\n next_idx = np.where(batch_decision_idxs == next_point)[0]\n # print('Next point: {}'.format(next_point))\n num_obs_next = next_point - num_obs\n if num_unobs_decision < num_obs_next:\n resol_ctrl.increase()\n\n next_mask = acq.choose_next_batch(score, num_obs_next, mask_valid=resol_ctrl.mask_valid)\n\n elapsed_time = time.time() - t0\n print('Time for decision: ', elapsed_time)\n\n # get the next measurement\n obs = get_observation_mask(next_mask, con) # mask-based implementaion\n\n log_weight_samples = update_log_prob_mask(obs, next_mask, recon, log_weight_samples, likelihood)\n\n # add new observations\n obs_mask = obs_mask + next_mask\n obs_data[next_mask != 0.0] = obs[next_mask != 0.0]\n obs_data_mask = np.stack((obs_data, obs_mask), axis=2)\n\n saving_data = (prob, x_best_guess, score, next_mask)\n\n return obs_mask, obs_data, obs_data_mask, log_weight_samples, saving_data\n"
] |
[
[
"numpy.exp",
"numpy.where",
"numpy.argmax",
"numpy.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
osanwe/Open-Vocabulary-Learning-on-Source-Code-with-a-Graph-Structured-Cache
|
[
"d0d6e2b2414e6774dd6c78b0c48c2a9db6c3e181",
"d0d6e2b2414e6774dd6c78b0c48c2a9db6c3e181"
] |
[
"tests/data/test_Batch.py",
"models/FITB/CharCNN.py"
] |
[
"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nimport logging\nimport unittest\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport mxnet as mx\nimport numpy as np\nfrom hypothesis import given, strategies as st\nfrom hypothesis.extra import numpy as hpnp\nfrom mxnet import nd\n\nfrom data.Batch import Batch, ClosedVocabInput, CharCNNInput, GSCVocabInput\nfrom experiments.utils import PaddedArray\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\n\n\nclass TestTask(unittest.TestCase):\n @given(input=st.recursive(st.builds(lambda x: nd.array(x, ctx=mx.cpu(0)),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())), st.lists))\n def test_recursive_move_to_context_moves_all_elements(self, input):\n input = [input]\n self.assertNotIn('cpu(1)', str(input)) # Super hacky test...\n Batch.recurse_move_to_context(input, mx.cpu(1))\n self.assertNotIn('cpu(0)', str(input)) # Super hacky test...\n\n\nclass TestClosedVocabInput(unittest.TestCase):\n @given(edges=st.dictionaries(st.characters(), hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n dict_class=OrderedDict, min_size=1),\n node_types=st.builds(lambda v, l: PaddedArray(v, l),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),\n node_names=st.builds(lambda v, l: PaddedArray(v, l),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),\n batch_sizes=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()))\n def test_unpack_and_repack_are_inverses(self, edges, node_types, node_names, batch_sizes):\n inp = ClosedVocabInput(edges, node_types, node_names, batch_sizes, mx.cpu())\n originp = deepcopy(inp)\n inp.repack(*inp.unpack())\n inp.batch_sizes = inp.batch_sizes\n self.assertEqual(inp.edges.keys(), originp.edges.keys())\n for k in inp.edges.keys():\n np.testing.assert_equal(inp.edges[k], originp.edges[k])\n np.testing.assert_equal(inp.node_names.values, originp.node_names.values)\n np.testing.assert_equal(inp.node_names.value_lengths, originp.node_names.value_lengths)\n np.testing.assert_equal(inp.node_types.values, originp.node_types.values)\n np.testing.assert_equal(inp.node_types.value_lengths, originp.node_types.value_lengths)\n np.testing.assert_equal(inp.batch_sizes, originp.batch_sizes)\n\n\nclass TestCharCNNInput(unittest.TestCase):\n @given(edges=st.dictionaries(st.characters(), hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n dict_class=OrderedDict, min_size=1),\n node_types=st.builds(lambda v, l: PaddedArray(v, l),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),\n node_names=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n batch_sizes=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()))\n def test_unpack_and_repack_are_inverses(self, edges, node_types, node_names, batch_sizes):\n inp = CharCNNInput(edges, node_types, node_names, batch_sizes, mx.cpu())\n originp = deepcopy(inp)\n inp.repack(*inp.unpack())\n inp.batch_sizes = inp.batch_sizes\n self.assertEqual(inp.edges.keys(), originp.edges.keys())\n for k in inp.edges.keys():\n np.testing.assert_equal(inp.edges[k], originp.edges[k])\n np.testing.assert_equal(inp.node_names, originp.node_names)\n np.testing.assert_equal(inp.node_types.values, originp.node_types.values)\n np.testing.assert_equal(inp.node_types.value_lengths, originp.node_types.value_lengths)\n np.testing.assert_equal(inp.batch_sizes, originp.batch_sizes)\n\n\nclass TestGSCVocabInput(unittest.TestCase):\n @given(edges=st.dictionaries(st.characters(), hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n dict_class=OrderedDict, min_size=1),\n node_types=st.builds(lambda v, l: PaddedArray(v, l),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes())),\n node_names=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()),\n batch_sizes=hpnp.arrays(dtype=np.dtype('float32'), shape=hpnp.array_shapes()))\n def test_unpack_and_repack_are_inverses(self, edges, node_types, node_names, batch_sizes):\n inp = GSCVocabInput(edges, node_types, node_names, batch_sizes, mx.cpu())\n originp = deepcopy(inp)\n inp.repack(*inp.unpack())\n inp.batch_sizes = inp.batch_sizes\n self.assertEqual(inp.edges.keys(), originp.edges.keys())\n for k in inp.edges.keys():\n np.testing.assert_equal(inp.edges[k], originp.edges[k])\n np.testing.assert_equal(inp.node_names, originp.node_names)\n np.testing.assert_equal(inp.node_types.values, originp.node_types.values)\n np.testing.assert_equal(inp.node_types.value_lengths, originp.node_types.value_lengths)\n np.testing.assert_equal(inp.batch_sizes, originp.batch_sizes)\n",
"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nimport itertools\nimport logging\nimport re\nfrom collections import OrderedDict\nfrom typing import List\n\nimport mxnet as mx\nimport numpy as np\nimport scipy as sp\nfrom mxnet import nd, gluon\nfrom tqdm import tqdm\n\nfrom data.AugmentedAST import AugmentedAST\nfrom data.BaseDataEncoder import BaseDataEncoder\nfrom data.Batch import Batch, CharCNNInput\nfrom experiments.utils import tuple_of_tuples_to_padded_array\nfrom models.FITB.FITBModel import too_useful_edge_types, FITBModel, edge_types_to_rewire\n\nlogger = logging.getLogger()\n\n\nclass FITBCharCNNDataPoint:\n def __init__(self, subgraph: AugmentedAST,\n node_types: List[List[str]],\n node_names: List[str],\n label: List[int],\n origin_file: str,\n encoder_hash: int):\n self.subgraph = subgraph\n self.edges = None\n self.node_types = node_types\n self.node_names = node_names\n self.label = label\n self.origin_file = origin_file\n self.encoder_hash = encoder_hash\n\n\nclass FITBCharCNNDataEncoder(BaseDataEncoder):\n DataPoint = FITBCharCNNDataPoint\n\n def __init__(self, graphs_and_instances, max_name_encoding_length, **kwargs):\n \"\"\"\n Collects all relevant training-data-wide information and initializes the encoding based on it\n \"\"\"\n all_node_types = set()\n all_edge_types = set()\n logger.info('Initializing {}'.format(self.__class__))\n for graph, _ in tqdm(graphs_and_instances):\n for node, data in graph.nodes:\n if graph.is_variable_node(node):\n if data['parentType'] == 'ClassOrInterfaceDeclaration':\n all_node_types.update([data['parentType']])\n else:\n all_node_types.update(re.split(r'[,.]', data['reference']))\n else:\n all_node_types.add(data['type'])\n\n for _, _, _, data in graph.edges:\n all_edge_types.add(data['type'])\n\n self.fill_in_flag = '__FILL_ME_IN!__'\n self.internal_node_flag = '__INTERNAL_NODE__'\n self.unk_flag = '__UNK__'\n\n # Make sure __PAD__ is always first, since we use 0 as our padding value later\n all_node_types = ['__PAD__', self.unk_flag, self.fill_in_flag] + sorted(list(all_node_types))\n self.all_node_types = {all_node_types[i]: i for i in range(len(all_node_types))}\n self.all_edge_types = frozenset(all_edge_types)\n self.max_name_encoding_length = max_name_encoding_length\n super().__init__(**kwargs)\n\n def encode(self, dp: FITBCharCNNDataPoint) -> None:\n \"\"\"\n Converts (in place) a datapoint into a form the model can consume.\n We leave the names as characters to save space and for flexibility with the embedding size\n \"\"\"\n super().encode(dp)\n self.node_types_to_ints(dp)\n\n dp.node_names = tuple(dp.node_names)\n dp.label = tuple(dp.label)\n\n\nclass FITBCharCNN(FITBModel):\n \"\"\"\n Model that uses a CharCNN on variable names to do the FITB task\n \"\"\"\n\n DataEncoder = FITBCharCNNDataEncoder\n InputClass = CharCNNInput\n\n @staticmethod\n def instance_to_datapoint(graph: AugmentedAST,\n instance,\n data_encoder: FITBCharCNNDataEncoder,\n max_nodes_per_graph: int = None):\n var_use, other_uses = instance\n\n fill_in_flag = data_encoder.fill_in_flag\n internal_node_flag = data_encoder.internal_node_flag\n\n subgraph = graph.get_containing_subgraph((var_use,) + other_uses, max_nodes_per_graph)\n\n # Flag the variable to be filled in, and prune its subgraph\n subgraph.nodes[var_use]['identifier'] = fill_in_flag\n edges_to_prune = subgraph.all_adjacent_edges(var_use, too_useful_edge_types)\n simplified_edges_to_prune = [(e[0], e[1], e[3]['type']) for e in edges_to_prune]\n for edge_type in edge_types_to_rewire:\n rewirees_in = []\n rewirees_out = []\n for edge in simplified_edges_to_prune:\n if edge[2] == edge_type and edge[0] != edge[1]:\n if edge[0] == var_use:\n rewirees_out.append(edge)\n elif edge[1] == var_use:\n rewirees_in.append(edge)\n for e_in, e_out in itertools.product(rewirees_in, rewirees_out):\n subgraph.add_edge(e_in[0], e_out[1], type=edge_type)\n subgraph._graph.remove_edges_from(edges_to_prune)\n for node in other_uses:\n subgraph.nodes[node]['other_use'] = True\n\n # Assemble node types, node names, and label\n subgraph.node_ids_to_ints_from_0()\n node_types = []\n node_names = []\n label = []\n for node, data in sorted(subgraph.nodes):\n if 'other_use' in data.keys() and data['other_use'] is True:\n label.append(node)\n if subgraph.is_variable_node(node):\n if data['identifier'] == fill_in_flag:\n node_types.append([fill_in_flag])\n else:\n node_types.append(sorted(list(set(re.split(r'[,.]', data['reference'])))))\n node_names.append(data['identifier'])\n else:\n node_types.append([data['type']])\n node_names.append(internal_node_flag)\n\n return data_encoder.DataPoint(subgraph, node_types, node_names, label, graph.origin_file,\n data_encoder.encoder_hash)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = kwargs['hidden_size']\n self.type_emb_size = kwargs['type_emb_size']\n self.name_emb_size = kwargs['name_emb_size']\n\n # Initializing input model components\n with self.name_scope():\n self.type_embedding = gluon.nn.Embedding(len(self.data_encoder.all_node_types), self.type_emb_size)\n self.name_emb_1 = gluon.nn.Conv1D(self.name_emb_size, 5, padding=2,\n in_channels=40) # Channels based on BaseDataEncoder.name_to_one_hot\n self.name_emb_pool = gluon.nn.MaxPool1D(pool_size=2, ceil_mode=True)\n self.name_emb_2 = gluon.nn.Conv1D(self.name_emb_size, 3, padding=1, in_channels=self.name_emb_size)\n self.node_init = gluon.nn.Dense(self.hidden_size, in_units=self.type_emb_size + self.name_emb_size)\n\n def batchify(self, data_filepaths: List[str], ctx: mx.context.Context):\n data = [self.data_encoder.load_datapoint(i) for i in data_filepaths]\n\n # Get the size of each graph\n batch_sizes = nd.array([len(dp.node_names) for dp in data], dtype='int32', ctx=ctx)\n\n combined_node_types = tuple(itertools.chain(*[dp.node_types for dp in data]))\n node_types = tuple_of_tuples_to_padded_array(combined_node_types, ctx)\n combined_node_names = tuple(itertools.chain(*[dp.node_names for dp in data]))\n node_names = []\n for name in combined_node_names:\n if name == self.data_encoder.internal_node_flag:\n node_names.append(self.data_encoder.name_to_1_hot('',\n embedding_size=self.data_encoder.max_name_encoding_length,\n mark_as_internal=True))\n elif name == self.data_encoder.fill_in_flag:\n node_names.append(\n self.data_encoder.name_to_1_hot('', embedding_size=self.data_encoder.max_name_encoding_length,\n mark_as_special=True))\n else:\n node_names.append(\n self.data_encoder.name_to_1_hot(name, embedding_size=self.data_encoder.max_name_encoding_length))\n node_names = nd.array(np.stack(node_names), dtype='float32', ctx=ctx)\n\n # Combine all the adjacency matrices into one big, disconnected graph\n edges = OrderedDict()\n for edge_type in self.data_encoder.all_edge_types:\n adj_mat = sp.sparse.block_diag([dp.edges[edge_type] for dp in data]).tocsr()\n adj_mat = nd.sparse.csr_matrix((adj_mat.data, adj_mat.indices, adj_mat.indptr), shape=adj_mat.shape,\n dtype='float32', ctx=ctx)\n edges[edge_type] = adj_mat\n\n # 1-hot whether a variable should have been indicated or not\n length = 0\n labels = []\n # Relabel the labels to match the indices in the batchified graph\n for dp in data:\n labels += [i + length for i in dp.label]\n length += len(dp.node_types)\n labels = nd.array(labels, dtype='int32', ctx=ctx)\n one_hot_labels = nd.zeros(length, dtype='float32', ctx=ctx)\n one_hot_labels[labels] = 1\n\n data = self.InputClass(edges, node_types, node_names, batch_sizes, ctx)\n return Batch(data, one_hot_labels)\n\n def init_hidden_states_and_edges(self, F, graph):\n # Get type and name embeddings\n type_emb = self.type_embedding(graph.node_types.values)\n type_emb = F.SequenceMask(type_emb, use_sequence_length=True, sequence_length=graph.node_types.value_lengths,\n axis=1)\n type_emb = F.max(type_emb, axis=1)\n name_emb = self.name_emb_1(graph.node_names)\n name_emb = self.name_emb_pool(name_emb)\n name_emb = self.name_emb_2(name_emb)\n name_emb = F.max(name_emb, axis=2)\n\n init_hidden_states = F.concat(type_emb, name_emb, dim=1)\n init_hidden_states = self.node_init(init_hidden_states)\n\n self.init_hidden_states = init_hidden_states\n return init_hidden_states, graph.edges\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.dtype"
],
[
"scipy.sparse.block_diag",
"numpy.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
oscarhiggott/Cirq
|
[
"60a8392e899484d81be55f89f6940c9ae5c5a053"
] |
[
"cirq/ops/controlled_gate_test.py"
] |
[
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union, Tuple, cast\n\nimport numpy as np\nimport pytest\nimport sympy\n\nimport cirq\nfrom cirq.type_workarounds import NotImplementedType\n\n\nclass GateUsingWorkspaceForApplyUnitary(cirq.SingleQubitGate):\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs\n ) -> Union[np.ndarray, NotImplementedType]:\n args.available_buffer[...] = args.target_tensor\n args.target_tensor[...] = 0\n return args.available_buffer\n\n def _unitary_(self):\n return np.eye(2)\n\n def __eq__(self, other):\n return isinstance(other, type(self))\n\n def __repr__(self):\n return ('cirq.ops.controlled_gate_test.'\n 'GateUsingWorkspaceForApplyUnitary()')\n\n\nclass GateAllocatingNewSpaceForResult(cirq.SingleQubitGate):\n def _apply_unitary_(self, args: cirq.ApplyUnitaryArgs\n ) -> Union[np.ndarray, NotImplementedType]:\n assert len(args.axes) == 1\n a = args.axes[0]\n seed = cast(Tuple[Union[int, slice, 'ellipsis'], ...],\n (slice(None),))\n zero = seed*a + (0, Ellipsis)\n one = seed*a + (1, Ellipsis)\n result = np.zeros(args.target_tensor.shape, args.target_tensor.dtype)\n result[zero] = args.target_tensor[zero]*2 + args.target_tensor[one]*3\n result[one] = args.target_tensor[zero]*5 + args.target_tensor[one]*7\n return result\n\n def _unitary_(self):\n return np.array([[2, 3], [5, 7]])\n\n def __eq__(self, other):\n return isinstance(other, type(self))\n\n def __repr__(self):\n return ('cirq.ops.controlled_gate_test.'\n 'GateAllocatingNewSpaceForResult()')\n\nclass RestrictedGate(cirq.SingleQubitGate):\n pass\n\n\nCY = cirq.ControlledGate(cirq.Y)\nCCH = cirq.ControlledGate(cirq.ControlledGate(cirq.H))\nCRestricted = cirq.ControlledGate(RestrictedGate())\n\n\ndef test_init():\n gate = cirq.ControlledGate(cirq.Z)\n assert gate.sub_gate is cirq.Z\n assert gate.num_qubits() == 2\n\n\ndef test_validate_args():\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n c = cirq.NamedQubit('c')\n\n # Need a control qubit.\n with pytest.raises(ValueError):\n CRestricted.validate_args([a])\n CRestricted.validate_args([a, b])\n\n # CY is a two-qubit operation (control + single-qubit sub gate).\n with pytest.raises(ValueError):\n CY.validate_args([a])\n with pytest.raises(ValueError):\n CY.validate_args([a, b, c])\n CY.validate_args([a, b])\n\n # Applies when creating operations.\n with pytest.raises(ValueError):\n _ = CY.on(a)\n with pytest.raises(ValueError):\n _ = CY.on(a, b, c)\n _ = CY.on(a, b)\n\n\ndef test_eq():\n eq = cirq.testing.EqualsTester()\n eq.add_equality_group(CY, cirq.ControlledGate(cirq.Y))\n eq.add_equality_group(CCH)\n eq.add_equality_group(cirq.ControlledGate(cirq.H))\n eq.add_equality_group(cirq.ControlledGate(cirq.X))\n eq.add_equality_group(cirq.X)\n\n\ndef test_unitary():\n cxa = cirq.ControlledGate(cirq.X**sympy.Symbol('a'))\n assert not cirq.has_unitary(cxa)\n assert cirq.unitary(cxa, None) is None\n\n assert cirq.has_unitary(CY)\n assert cirq.has_unitary(CCH)\n np.testing.assert_allclose(\n cirq.unitary(CY),\n np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, -1j],\n [0, 0, 1j, 0],\n ]),\n atol=1e-8)\n\n np.testing.assert_allclose(\n cirq.unitary(CCH),\n np.array([\n [1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, np.sqrt(0.5), np.sqrt(0.5)],\n [0, 0, 0, 0, 0, 0, np.sqrt(0.5), -np.sqrt(0.5)],\n ]),\n atol=1e-8)\n\n\n\[email protected]('gate', [\n cirq.X,\n cirq.X**0.5,\n cirq.Rx(np.pi),\n cirq.Rx(np.pi / 2),\n cirq.Z,\n cirq.H,\n cirq.CNOT,\n cirq.SWAP,\n cirq.CCZ,\n cirq.ControlledGate(cirq.ControlledGate(cirq.CCZ)),\n GateUsingWorkspaceForApplyUnitary(),\n GateAllocatingNewSpaceForResult(),\n])\ndef test_controlled_gate_is_consistent(gate: cirq.Gate):\n cgate = cirq.ControlledGate(gate)\n cirq.testing.assert_implements_consistent_protocols(cgate)\n\n\ndef test_pow_inverse():\n assert cirq.inverse(CRestricted, None) is None\n assert cirq.pow(CRestricted, 1.5, None) is None\n assert cirq.pow(CY, 1.5) == cirq.ControlledGate(cirq.Y**1.5)\n assert cirq.inverse(CY) == CY**-1 == CY\n\n\ndef test_extrapolatable_effect():\n a = cirq.NamedQubit('a')\n b = cirq.NamedQubit('b')\n\n assert cirq.ControlledGate(cirq.Z)**0.5 == cirq.ControlledGate(cirq.Z**0.5)\n\n assert (cirq.ControlledGate(cirq.Z).on(a, b)**0.5 ==\n cirq.ControlledGate(cirq.Z**0.5).on(a, b))\n\n\ndef test_reversible():\n assert (cirq.inverse(cirq.ControlledGate(cirq.S)) ==\n cirq.ControlledGate(cirq.S**-1))\n\n\nclass UnphaseableGate(cirq.SingleQubitGate):\n pass\n\n\ndef test_parameterizable():\n a = sympy.Symbol('a')\n cz = cirq.ControlledGate(cirq.Y)\n cza = cirq.ControlledGate(cirq.YPowGate(exponent=a))\n assert cirq.is_parameterized(cza)\n assert not cirq.is_parameterized(cz)\n assert cirq.resolve_parameters(cza, cirq.ParamResolver({'a': 1})) == cz\n\n\ndef test_circuit_diagram_info():\n assert cirq.circuit_diagram_info(CY) == cirq.CircuitDiagramInfo(\n wire_symbols=('@', 'Y'),\n exponent=1)\n\n assert cirq.circuit_diagram_info(cirq.ControlledGate(cirq.Y**0.5)\n ) == cirq.CircuitDiagramInfo(\n wire_symbols=('@', 'Y'),\n exponent=0.5)\n\n assert cirq.circuit_diagram_info(cirq.ControlledGate(cirq.S)\n ) == cirq.CircuitDiagramInfo(\n wire_symbols=('@', 'S'),\n exponent=1)\n\n class UndiagrammableGate(cirq.SingleQubitGate):\n pass\n\n assert cirq.circuit_diagram_info(cirq.ControlledGate(UndiagrammableGate()),\n default=None) is None\n\n\n# A contrived multiqubit Hadamard gate that asserts the consistency of\n# the passed in Args and puts an H on all qubits\n# displays them as 'H(qubit)' on the wire\nclass MultiH(cirq.MultiQubitGate):\n\n def _circuit_diagram_info_(self,\n args: cirq.CircuitDiagramInfoArgs\n ) -> cirq.CircuitDiagramInfo:\n assert args.known_qubit_count is not None\n assert args.known_qubits is not None\n\n return cirq.CircuitDiagramInfo(\n wire_symbols=tuple('H({})'.format(q) for q in args.known_qubits),\n connected=True\n )\n\n\ndef test_circuit_diagram():\n qubits = cirq.LineQubit.range(3)\n c = cirq.Circuit()\n c.append(cirq.ControlledGate(MultiH(2))(*qubits))\n\n cirq.testing.assert_has_diagram(c, \"\"\"\n0: ───@──────\n │\n1: ───H(1)───\n │\n2: ───H(2)───\n\"\"\")\n\n\nclass MockGate(cirq.TwoQubitGate):\n\n def _circuit_diagram_info_(self,\n args: cirq.CircuitDiagramInfoArgs\n ) -> cirq.CircuitDiagramInfo:\n self.captured_diagram_args = args\n return cirq.CircuitDiagramInfo(wire_symbols=tuple(['MOCK']), exponent=1,\n connected=True)\n\n\ndef test_uninformed_circuit_diagram_info():\n qbits = cirq.LineQubit.range(3)\n mock_gate = MockGate()\n cgate = cirq.ControlledGate(mock_gate)(*qbits)\n\n args = cirq.CircuitDiagramInfoArgs.UNINFORMED_DEFAULT\n\n assert (cirq.circuit_diagram_info(cgate, args) ==\n cirq.CircuitDiagramInfo(wire_symbols=('@', 'MOCK'), exponent=1,\n connected=True))\n assert mock_gate.captured_diagram_args == args\n\n\ndef test_bounded_effect():\n assert cirq.trace_distance_bound(CY**0.001) < 0.01\n\n\ndef test_repr():\n assert repr(\n cirq.ControlledGate(cirq.Z)) == 'cirq.ControlledGate(sub_gate=cirq.Z)'\n\n\ndef test_str():\n assert str(cirq.ControlledGate(cirq.X)) == 'CX'\n assert str(cirq.ControlledGate(cirq.Z)) == 'CZ'\n assert str(cirq.ControlledGate(cirq.S)) == 'CS'\n assert str(cirq.ControlledGate(cirq.Z**0.125)) == 'CZ**0.125'\n assert str(cirq.ControlledGate(cirq.ControlledGate(cirq.S))) == 'CCS'\n"
] |
[
[
"numpy.eye",
"numpy.array",
"numpy.zeros",
"numpy.sqrt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kolk/qa_factoid2natural
|
[
"ccdd0096217c8e88b148f353f0c89628b85f9c4d"
] |
[
"onmt/inputters/text_dataset.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom functools import partial\n\nimport torch\nfrom torchtext.data import Field, RawField\n\nfrom onmt.inputters.dataset_base import DatasetBase\n\n\nclass TextDataset(DatasetBase):\n \"\"\"\n Build `Example` objects, `Field` objects, and filter_pred function\n from text corpus.\n\n Args:\n fields (dict): a dictionary of `torchtext.data.Field`.\n Keys are like 'src', 'tgt', 'src_map', and 'alignment'.\n src_examples_iter (dict iter): preprocessed source example\n dictionary iterator.\n tgt_examples_iter (dict iter): preprocessed target example\n dictionary iterator.\n dynamic_dict (bool)\n \"\"\"\n\n @staticmethod\n def sort_key(ex):\n if hasattr(ex, \"tgt\"):\n if hasattr(ex, \"ans\"):\n return len(ex.src[0]), len(ex.ans[0]), len(ex.tgt[0])\n else:\n return len(ex.src[0]), len(ex.tgt[0])\n else:\n if hasattr(ex, \"ans\"):\n return len(ex.src[0]), len(ex.ans[0])\n else:\n return len(ex.src[0])\n\n\n @classmethod\n def make_examples(cls, sequences, side):\n \"\"\"\n Args:\n sequences: path to corpus file or iterable\n truncate (int): maximum sequence length (0 for unlimited).\n side (str): \"src\" or \"tgt\" or \"ans\". \n\n Yields:\n dictionaries whose keys are the names of fields and whose\n values are more or less the result of tokenizing with those\n fields.\n \"\"\"\n if isinstance(sequences, str):\n sequences = cls._read_file(sequences)\n for i, seq in enumerate(sequences):\n yield {side: seq, \"indices\": i}\n\n\n# mix this with partial\ndef _feature_tokenize(\n string, layer=0, tok_delim=None, feat_delim=None, truncate=None):\n tokens = string.split(tok_delim)\n if truncate is not None:\n tokens = tokens[:truncate]\n if feat_delim is not None:\n tokens = [t.split(feat_delim)[layer] for t in tokens]\n return tokens\n\n\nclass TextMultiField(RawField):\n def __init__(self, base_name, base_field, feats_fields):\n super(TextMultiField, self).__init__()\n self.fields = [(base_name, base_field)]\n for name, ff in sorted(feats_fields, key=lambda kv: kv[0]):\n self.fields.append((name, ff))\n\n @property\n def base_field(self):\n return self.fields[0][1]\n\n def process(self, batch, device=None):\n # batch (list(list(list))): batch_size x len(self.fields) x seq_len\n batch_by_feat = list(zip(*batch))\n base_data = self.base_field.process(batch_by_feat[0], device=device)\n if self.base_field.include_lengths:\n # lengths: batch_size\n base_data, lengths = base_data\n\n feats = [ff.process(batch_by_feat[i], device=device)\n for i, (_, ff) in enumerate(self.fields[1:], 1)]\n levels = [base_data] + feats\n # data: seq_len x batch_size x len(self.fields)\n data = torch.stack(levels, 2)\n if self.base_field.include_lengths:\n return data, lengths\n else:\n return data\n\n def preprocess(self, x):\n return [f.preprocess(x) for _, f in self.fields]\n\n def __getitem__(self, item):\n return self.fields[item]\n\n\ndef text_fields(base_name, **kwargs):\n \"\"\"Create text fields.\n Args:\n base_name (str)\n n_feats (int)\n include_lengths (bool)\n pad (str, optional): Defaults to <blank>.\n bos (str or NoneType, optional): Defaults to <s>\n eos (str or NoneType, optional): Defaults to </s>\n truncate (bool or NoneType, optional): Defaults to None.\n \"\"\"\n\n n_feats = kwargs[\"n_feats\"]\n include_lengths = kwargs[\"include_lengths\"]\n pad = kwargs.get(\"pad\", \"<blank>\")\n bos = kwargs.get(\"bos\", \"<s>\")\n eos = kwargs.get(\"eos\", \"</s>\")\n truncate = kwargs.get(\"truncate\", None)\n fields_ = []\n feat_delim = u\"│\" if n_feats > 0 else None\n for i in range(n_feats + 1):\n name = base_name + \"_feat_\" + str(i - 1) if i > 0 else base_name\n tokenize = partial(\n _feature_tokenize,\n layer=i,\n truncate=truncate,\n feat_delim=feat_delim)\n use_len = i == 0 and include_lengths\n feat = Field(\n init_token=bos, eos_token=eos,\n pad_token=pad, tokenize=tokenize,\n include_lengths=use_len)\n fields_.append((name, feat))\n assert fields_[0][0] == base_name # sanity check\n field = TextMultiField(fields_[0][0], fields_[0][1], fields_[1:])\n return [(base_name, field)]\n"
] |
[
[
"torch.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
landdafku11/CogAlg
|
[
"a95ea498af3104893f92028f466a56ef3a211f10",
"a95ea498af3104893f92028f466a56ef3a211f10"
] |
[
"frame_2D_alg/alternative versions/frame_blobs_lists.py",
"frame_2D_alg/alternative versions/comp_slice_flip.py"
] |
[
"import numpy as np\nfrom time import time\nfrom collections import deque, namedtuple\n\n''' \n frame_blobs() defines blobs: contiguous areas of positive or negative deviation of gradient. Gradient is estimated \n as |dx| + |dy|, then selectively and more precisely as hypot(dx, dy), from cross-comparison among adjacent pixels.\n Complemented by intra_blob (recursive search within blobs), it will be a 2D version of first-level core algorithm.\n\n frame_blobs() performs several levels (Le) of encoding, incremental per scan line defined by vertical coordinate y.\n value of y per Le line is shown relative to y of current input line, incremented by top-down scan of input image:\n canopy \n\n 1Le, line y: comp_pixel (lateral and vertical comp) -> pixel + derivatives tuple: dert ) frame of derts: dert__ \n 2Le, line y-1: form_P(dert2) -> 1D pattern P\n 3Le, line y-2: scan_P_(P, hP)-> hP, roots: down-connections, fork_: up-connections between Ps \n 4Le, line y-3: form_segment(hP, seg) -> seg: merge vertically-connected _Ps in non-forking blob segments\n 5Le, line y-4+ seg depth: form_blob(seg, blob): merge connected segments in fork_ incomplete blobs, recursively \n\n All 2D functions (y_comp, scan_P_, form_segment, form_blob) input two lines: higher and lower, convert elements of \n lower line into elements of new higher line, then displace elements of old higher line into higher function.\n\n Higher-line elements include additional variables, derived while they were lower-line elements.\n Processing is mostly sequential because blobs are irregular and very difficult to map to matrices.\n\n prefix '_' denotes higher-line variable or pattern, vs. same-type lower-line variable or pattern,\n postfix '_' denotes array name, vs. same-name elements of that array\n'''\n\n# Structures:\n\nDert = namedtuple('Dert', 'G, A, Dy, Dx, L, Ly')\nPattern = namedtuple('Pattern', 'sign, x0, I, G, Dy, Dx, L, dert_')\nSegment = namedtuple('Segment', 'y, I, G, Dy, Dx, L, Ly, Py_')\nBlob = namedtuple('Blob', 'Dert, sign, rng, box, map, seg_, dert__, Layers, hDerts, root_blob')\nFrame = namedtuple('Frame', 'I, G, Dy, Dx, blob_, i__, dert__')\n\n# Adjustable parameters:\nkwidth = 3 # Declare initial kernel size. Tested values are 2 or 3.\nave = 20\nDEBUG = True\n\nif kwidth == 3:\n ave *= 4\n rng = 1\nelif kwidth == 2:\n rng = 0\nelse:\n print(\"kwidth must be 2 or 3!\")\n\n\n# ************ MODULE FUNCTIONS *****************************************************************************************\n# -image_to_blobs()\n# -comp_pixel()\n# -form_P_()\n# -scan_P_()\n# -form_seg_()\n# -form_blob()\n# ***********************************************************************************************************************\n\ndef image_to_blobs(image): # root function, postfix '_' denotes array vs element, prefix '_' denotes higher- vs lower- line variable\n\n i__, dert__ = comp_pixel(image) # vertically and horizontally bilateral comparison of adjacent pixels\n frame = Frame(0, 0, 0, 0, [], i__, dert__) # params, blob_, dert__\n seg_ = deque() # buffer of running segments\n\n height, width = image.shape\n\n for y in range(height - kwidth + 1): # first and last row are discarded\n P_ = form_P_(i__[y], dert__[:, y].T) # horizontal clustering\n P_ = scan_P_(P_, seg_, frame)\n seg_ = form_seg_(y, P_, frame)\n\n while seg_: form_blob(seg_.popleft(), frame) # frame ends, last-line segs are merged into their blobs\n return frame # frame of 2D patterns\n\n # ---------- image_to_blobs() end -----------------------------------------------------------------------------------\n\n\ndef comp_pixel(image): # comparison between pixel and its neighbours within kernel, for the whole image\n\n # Initialize variables:\n if kwidth == 2:\n\n # Compare:\n dy__ = (image[1:, 1:] + image[:-1, 1:]) + (image[1:, :-1] - image[:-1, :-1]) * 0.5\n dx__ = (image[1:, 1:] - image[1:, :-1]) + (image[:-1, 1:] - image[:-1, :-1]) * 0.5\n\n # Sum pixel values:\n p__ = (image[:-1, :-1]\n + image[:-1, 1:]\n + image[1:, :-1]\n + image[1:, 1:]) * 0.25\n\n else:\n ycoef = np.array([-0.5, -1, -0.5, 0, 0.5, 1, 0.5, 0])\n xcoef = np.array([-0.5, 0, 0.5, 1, 0.5, 0, -0.5, -1])\n\n # Compare by subtracting centered image from translated image:\n d___ = np.array(list(map(lambda trans_slices:\n image[trans_slices] - image[1:-1, 1:-1],\n [\n (slice(None, -2), slice(None, -2)),\n (slice(None, -2), slice(1, -1)),\n (slice(None, -2), slice(2, None)),\n (slice(1, -1), slice(2, None)),\n (slice(2, None), slice(2, None)),\n (slice(2, None), slice(1, -1)),\n (slice(2, None), slice(None, -2)),\n (slice(1, -1), slice(None, -2)),\n ]))).swapaxes(0, 2).swapaxes(0, 1)\n\n # Decompose differences:\n dy__ = (d___ * ycoef).sum(axis=2)\n dx__ = (d___ * xcoef).sum(axis=2)\n\n # Sum pixel values:\n p__ = image[1:-1, 1:-1]\n\n # Compute gradient magnitudes per kernel:\n g__ = np.hypot(dy__, dx__)\n\n return p__[np.newaxis, ...], np.around(np.stack((g__, dy__, dx__), axis=0))\n\n\ndef form_P_(i_, dert_): # horizontally cluster and sum consecutive pixels and their derivatives into Ps\n\n P_ = deque() # row of Ps\n i = i_[0]\n g, dy, dx = dert_[0] # first dert\n x0, I, G, Dy, Dx, L = 0, i, g, dy, dx, 1 # P params\n vg = g - ave\n _s = vg > 0 # sign\n\n for x, (i, (g, dy, dx)) in enumerate(zip(i_[1:], dert_[1:]), start=1):\n vg = g - ave\n s = vg > 0\n if s != _s: # P is terminated and new P is initialized\n P = Pattern(_s, x0, I, G, Dy, Dx, L, dert_[x0:x0 + L])\n P_.append(P)\n x0, I, G, Dy, Dx, L = x, 0, 0, 0, 0, 0\n\n # accumulate P params:\n I += i\n G += vg\n Dy += dy\n Dx += dx\n L += 1\n _s = s # prior sign\n\n P = Pattern(_s, x0, I, G, Dy, Dx, L, dert_[x0:x0 + L])\n P_.append(P) # last P in row\n return P_\n\n # ---------- form_P_() end ------------------------------------------------------------------------------------------\n\n\ndef scan_P_(P_, seg_, frame): # integrate x overlaps (forks) between same-sign Ps and _Ps into blob segments\n\n new_P_ = deque()\n\n if P_ and seg_: # if both are not empty\n P = P_.popleft() # input-line Ps\n seg = seg_.popleft() # higher-line segments,\n _P = seg[-3][-1] # last element of each segment is higher-line P\n fork_ = []\n\n while True:\n x0 = P.x0 # first x in P\n xn = x0 + P.L # first x in next P\n _x0 = _P.x0 # first x in _P\n _xn = _x0 + _P.L # first x in next _P\n\n if P.sign == _P.sign and _x0 < xn and x0 < _xn: # test for sign match and x overlap\n seg[-1] += 1 # roots\n fork_.append(seg) # P-connected segments are buffered into fork_\n\n if xn < _xn: # _P overlaps next P in P_\n new_P_.append((P, fork_))\n fork_ = []\n if P_:\n P = P_.popleft() # load next P\n else: # terminate loop\n if seg[-1] != 1: # if roots != 1: terminate seg\n form_blob(seg, frame)\n break\n else: # no next-P overlap\n if seg[-1] != 1: # if roots != 1: terminate seg\n form_blob(seg, frame)\n\n if seg_: # load next _P\n seg = seg_.popleft()\n _P = seg[-3][-1]\n else: # if no seg left: terminate loop\n new_P_.append((P, fork_))\n break\n\n while P_: # terminate Ps and segs that continue at line's end\n new_P_.append((P_.popleft(), [])) # no fork\n while seg_:\n form_blob(seg_.popleft(), frame) # roots always == 0\n\n return new_P_\n\n # ---------- scan_P_() end ------------------------------------------------------------------------------------------\n\n\ndef form_seg_(y, P_, frame): # convert or merge every P into segment, merge blobs\n new_seg_ = deque()\n\n while P_:\n P, fork_ = P_.popleft()\n\n s, x0, I, G, Dy, Dx, L, dert_ = P\n xn = x0 + L # next-P x0\n\n if not fork_: # new_seg is initialized with initialized blob\n blob = [s, [0, 0, 0, 0, 0, 0], [], 1, [y, x0, xn]] # s, [I, G, Dy, Dx, L], seg_, open_segments, box\n new_seg = [y, I, G, Dy, Dx, L, 1, [P], blob, 0] # y0, I, G, Dy, Dx, N, L, Ly, Py_, blob, roots\n blob[2].append(new_seg)\n else:\n if len(fork_) == 1 and fork_[0][3] == 1: # P has one fork and that fork has one root\n new_seg = fork_[0]\n\n Is, Gs, Dys, Dxs, Ls, Ly = new_seg[1:-3] # fork segment params, P is merged into segment:\n new_seg[1:-3] = [Is + I, Gs + G, Dys + Dy, Dxs + Dx, Ls + L, Ly + 1]\n new_seg[-3].append(P) # Py_: vertical buffer of Ps\n new_seg[-1] = 0 # reset roots\n blob = new_seg[-2]\n\n else: # if > 1 forks, or 1 fork that has > 1 roots:\n blob = fork_[0][-2]\n new_seg = [y, I, G, Dy, Dx, L, 1, [P], blob, 0] # new_seg is initialized with fork blob\n blob[2].append(new_seg) # segment is buffered into blob\n\n if len(fork_) > 1: # merge blobs of all forks\n if fork_[0][-1] == 1: # if roots == 1: fork hasn't been terminated\n form_blob(fork_[0], frame) # merge seg of 1st fork into its blob\n\n for fork in fork_[1:len(fork_)]: # merge blobs of other forks into blob of 1st fork\n if fork[-1] == 1:\n form_blob(fork, frame)\n\n if not fork[-2] is blob:\n params, seg_, open_segs, box = fork[-2][1:] # merged blob, omit sign\n blob[1] = [par1 + par2 for par1, par2 in zip(blob[1], params)] # sum merging blobs\n blob[3] += open_segs\n blob[4][0] = min(blob[4][0], box[0]) # extend box y0\n blob[4][1] = min(blob[4][1], box[1]) # extend box x0\n blob[4][2] = max(blob[4][2], box[2]) # extend box xn\n for seg in seg_:\n if not seg is fork:\n seg[-2] = blob # blobs in other forks are references to blob in the first fork\n blob[2].append(seg) # buffer of merged root segments\n fork[-2] = blob\n blob[2].append(fork)\n blob[3] -= 1 # open_segments -= 1: shared with merged blob\n\n blob[4][1] = min(blob[4][1], x0) # extend box x0\n blob[4][2] = max(blob[4][2], xn) # extend box xn\n new_seg_.append(new_seg)\n\n return new_seg_\n\n # ---------- form_seg_() end --------------------------------------------------------------------------------------------\n\n\ndef form_blob(term_seg, frame): # terminated segment is merged into continued or initialized blob (all connected segments)\n\n y0s, Is, Gs, Dys, Dxs, Ls, Lys, Py_, blob, roots = term_seg\n I, G, Dy, Dx, L, Ly = blob[1]\n blob[1] = [I + Is, G + Gs, Dy + Dys, Dx + Dxs, L + Ls, Ly + Lys]\n blob[-2] += roots - 1 # number of open segments\n\n if blob[-2] == 0: # if open_segments == 0: blob is terminated and packed in frame\n s, [I, G, Dy, Dx, L, Ly], seg_, open_segs, [y0, x0, xn] = blob\n\n yn = y0s + Lys # yn from last segment\n mask = np.zeros((yn - y0, xn - x0), dtype=bool) # local map of blob\n new_seg_ = []\n for seg in seg_:\n y0s, Is, Gs, Dys, Dxs, Ls, Lys, Py_ = seg[:-2] # blob and roots are ignored\n seg = Segment(y0s, Is, Gs, Dys, Dxs, Ls, Lys, Py_) # convert to Segment namedtuple\n new_seg_.append(seg) # add segment to blob as namedtuple\n for y, P in enumerate(seg.Py_, start=seg.y):\n Pxn = P.x0 + P.L\n mask[y - y0, P.x0 - x0:Pxn - x0] = True\n\n del seg_\n\n frame[0] += I\n frame[1] += G\n frame[2] += Dy\n frame[3] += Dx\n frame[4].append(Blob(Dert=[G, None, Dy, Dx, L, Ly], # core Layer of current blob, A is None for g_Dert\n sign=s, # current g | ga sign\n rng=rng, # comp range\n map=map, # boolean map of blob to compute overlap\n box=(y0, yn, x0, xn), # boundary box\n seg_=new_seg_, # references down blob formation tree, in vertical (horizontal) order\n dert__=[],\n Layers=[], # summed reps of lower layers across sub_blob derivation tree\n hDerts=[I], # higher Dert params += higher-dert params, starting with I\n root_blob=blob # ref for feedback of all Derts params summed in sub_blobs\n ))\n del blob\n\n # ---------- form_blob() end ----------------------------------------------------------------------------------------\n\n\n# ************ PROGRAM BODY *********************************************************************************************\nif __name__ == '__main__':\n\n # Load inputs --------------------------------------------------------------------\n from utils import imread\n\n image = imread('./../images/raccoon_eye.jpg').astype(int)\n\n # Main ---------------------------------------------------------------------------\n\n start_time = time()\n frame_of_blobs = image_to_blobs(image)\n # frame_of_blobs = intra_blob(frame_of_blobs) # evaluate for deeper clustering inside each blob, recursively\n\n # DEBUG --------------------------------------------------------------------------\n if DEBUG:\n from utils import *\n\n draw('./../debug/root_blobs', map_frame(frame_of_blobs))\n\n f_angle = 0b01\n f_derive = 0b10\n # from intra_blob_test import intra_blob\n # intra_blob(frame_of_blobs[1])\n\n # END DEBUG -----------------------------------------------------------------------\n\n end_time = time() - start_time\n print(end_time)\n # ************ PROGRAM BODY END ******************************************************************************************",
"'''\nComp_slice is a terminal fork of intra_blob.\n-\nIt traces blob axis by cross-comparing vertically adjacent Ps: horizontal slices across an edge blob.\nThese low-M high-Ma blobs are vectorized into outlines of adjacent flat or high-M blobs.\n(high match: M / Ma, roughly corresponds to low gradient: G / Ga)\n-\nVectorization is clustering of Ps + their derivatives (derPs) into PPs: patterns of Ps that describe an edge.\nThis process is a reduced-dimensionality (2D->1D) version of cross-comp and clustering cycle, common across this project.\nAs we add higher dimensions (2D alg, 3D alg), this dimensionality reduction is done in salient high-aspect blobs\n(likely edges / contours in 2D or surfaces in 3D) to form more compressed skeletal representations of full-D patterns.\n-\nPlease see diagram:\nhttps://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/comp_slice_flip.drawio\n'''\n\nfrom collections import deque\nimport sys\nimport numpy as np\nfrom class_cluster import ClusterStructure, NoneType\nfrom slice_utils import draw_PP_\n\nimport warnings # to detect overflow issue, in case of infinity loop\nwarnings.filterwarnings('error')\n\nave = 30 # filter or hyper-parameter, set as a guess, latter adjusted by feedback, not needed here\naveG = 50 # filter for comp_g, assumed constant direction\nflip_ave = .1\nflip_ave_FPP = 0 # flip large FPPs only (change to 0 for debug purpose)\ndiv_ave = 200\nave_dX = 10 # difference between median x coords of consecutive Ps\nave_Dx = 10\nave_mP = 8 # just a random number right now.\nave_rmP = .7 # the rate of mP decay per relative dX (x shift) = 1: initial form of distance\nave_ortho = 20\n\nclass CDert(ClusterStructure):\n I = int\n Dy = int\n Dx = int\n G = int\n M = int\n Dyy = int\n Dyx = int\n Dxy = int\n Dxx = int\n Ga = int\n Ma = int\n Mdx = int\n Ddx = int\n flip_val = int\n\nclass CP(ClusterStructure):\n\n Dert = object # summed kernel parameters\n L = int\n x0 = int\n dX = int # shift of average x between P and _P, if any\n y = int # for visualization only\n sign = NoneType # sign of gradient deviation\n dert_ = list # array of pixel-level derts: (p, dy, dx, g, m), extended in intra_blob\n upconnect_ = list\n downconnect_cnt = int\n derP = object # derP object reference\n # only in Pd:\n Pm = object # reference to root P\n dxdert_ = list\n # only in Pm:\n Pd_ = list\n\nclass CderP(ClusterStructure):\n ## derDert\n mP = int\n dP = int\n mx = int\n dx = int\n mL = int\n dL = int\n mDx = int\n dDx = int\n mDy = int\n dDy = int\n P = object # lower comparand\n _P = object # higher comparand\n PP = object # FPP if flip_val, contains this derP\n fxflip = bool # flag: derP is a higher splicing | flipping point\n _fxflip = bool # flag: derP is a lower splicing | flipping point\n # from comp_dx\n fdx = NoneType\n # optional:\n dDdx = int\n mDdx = int\n dMdx = int\n mMdx = int\n\nclass CPP(ClusterStructure):\n\n Dert = object # set of P params accumulated in PP\n derPP = object # set of derP params accumulated in PP\n # between PPs:\n upconnect_ = list\n downconnect_cnt = int\n fPPm = NoneType # PPm if 1, else PPd; not needed if packed in PP_?\n fdiv = NoneType\n box = list # for visualization only, original box before flipping\n xflip_derP_ = list # derPs at potential splicing points\n xflip_PP_ = list # potentially spliced PPs in FPP\n # FPP params\n dert__ = list\n mask__ = bool\n # PP params\n derP__ = list\n P__ = list\n # PP FPP params\n derPf__ = list\n Pf__ = list\n PPmm_ = list\n PPdm_ = list\n PPmmf_ = list\n PPdmf_ = list\n # PPd params\n derPd__ = list\n Pd__ = list\n # PPd FPP params\n derPdf__ = list\n Pdf__ = list\n PPmd_ = list\n PPdd_ = list\n PPmdf_ = list\n PPddf_ = list # comp_dx params\n\n# Functions:\n'''\nleading '_' denotes higher-line variable or structure, vs. same-type lower-line variable or structure\ntrailing '_' denotes array name, vs. same-name elements of that array. '__' is a 2D array\nleading 'f' denotes flag\n-\nrough workflow:\n-\nintra_blob -> slice_blob(blob) -> derP_ -> PP,\nif flip_val(PP is FPP): pack FPP in blob.PP_ -> flip FPP.dert__ -> slice_blob(FPP) -> pack PP in FPP.PP_\nelse (PP is PP): pack PP in blob.PP_\n'''\n\ndef slice_blob(blob, verbose=False):\n '''\n Slice_blob converts selected smooth-edge blobs (high G, low Ga) into sliced blobs,\n adding horizontal blob slices: Ps or 1D patterns\n '''\n if not isinstance(blob, CPP): # input is blob, else FPP, no flipping\n flip_eval_blob(blob)\n\n dert__ = blob.dert__\n mask__ = blob.mask__\n height, width = dert__[0].shape\n if verbose: print(\"Converting to image...\")\n\n for fPPd in range(2): # run twice, 1st loop fPPd=0: form PPs, 2nd loop fPPd=1: form PPds\n\n P__ , derP__, Pd__, derPd__ = [], [], [], []\n zip_dert__ = zip(*dert__)\n _P_ = form_P_(list(zip(*next(zip_dert__))), mask__[0], 0) # 1st upper row\n P__ += _P_ # frame of Ps\n\n for y, dert_ in enumerate(zip_dert__, start=1): # scan top down\n if verbose: print(f\"\\rProcessing line {y + 1}/{height}, \", end=\"\"); sys.stdout.flush()\n\n P_ = form_P_(list(zip(*dert_)), mask__[y], y) # horizontal clustering - lower row\n derP_ = scan_P_(P_, _P_) # tests for x overlap between Ps, calls comp_slice\n\n Pd_ = form_Pd_(P_) # form Pds within Ps\n derPd_ = scan_Pd_(P_, _P_) # adds upconnect_ in Pds and calls derPd_2_PP_derPd_, same as derP_2_PP_\n\n derP__ += derP_; derPd__ += derPd_ # frame of derPs\n P__ += P_; Pd__ += Pd_\n _P_ = P_ # set current lower row P_ as next upper row _P_\n\n form_PP_shell(blob, derP__, P__, derPd__, Pd__, fPPd) # form PPs in blob or in FPP\n\n # draw PPs and FPPs\n if not isinstance(blob, CPP):\n draw_PP_(blob)\n\n\ndef form_P_(idert_, mask_, y): # segment dert__ into P__, in horizontal ) vertical order\n '''\n sums dert params within Ps and increments L: horizontal length.\n '''\n P_ = [] # rows of derPs\n dert_ = [list(idert_[0])] # get first dert from idert_ (generator/iterator)\n _mask = mask_[0] # mask bit per dert\n if ~_mask:\n I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert_[0]; L = 1; x0 = 0 # initialize P params with first dert\n\n for x, dert in enumerate(idert_[1:], start=1): # left to right in each row of derts\n mask = mask_[x] # pixel mask\n\n if mask: # masks: if 1,_0: P termination, if 0,_1: P initialization, if 0,_0: P accumulation:\n if ~_mask: # _dert is not masked, dert is masked, terminate P:\n P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma), L=L, x0=x0, dert_=dert_, y=y)\n P_.append(P)\n else: # dert is not masked\n if _mask: # _dert is masked, initialize P params:\n I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert; L = 1; x0 = x; dert_ = [dert]\n else:\n I += dert[0] # _dert is not masked, accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert\n Dy += dert[1]\n Dx += dert[2]\n G += dert[3]\n M += dert[4]\n Dyy += dert[5]\n Dyx += dert[6]\n Dxy += dert[7]\n Dxx += dert[8]\n Ga += dert[9]\n Ma += dert[10]\n L += 1\n dert_.append(dert)\n _mask = mask\n\n if ~_mask: # terminate last P in a row\n P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma), L=L, x0=x0, dert_=dert_, y=y)\n P_.append(P)\n\n return P_\n\ndef form_Pd_(P_):\n '''\n form Pd s across P's derts using Dx sign\n '''\n Pd__ = []\n for iP in P_:\n if (iP.downconnect_cnt>0) or (iP.upconnect_): # form Pd s if at least one connect in P, else they won't be compared\n P_Ddx = 0 # sum of Ddx across Pd s\n P_Mdx = 0 # sum of Mdx across Pd s\n Pd_ = [] # Pds in P\n _dert = iP.dert_[0] # 1st dert\n dert_ = [_dert]\n I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = _dert; L = 1; x0 = iP.x0 # initialize P params with first dert\n _sign = _dert[2] > 0\n x = 1 # relative x within P\n\n for dert in iP.dert_[1:]:\n sign = dert[2] > 0\n if sign == _sign: # same Dx sign\n I += dert[0] # accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert\n Dy += dert[1]\n Dx += dert[2]\n G += dert[3]\n M += dert[4]\n Dyy += dert[5]\n Dyx += dert[6]\n Dxy += dert[7]\n Dxx += dert[8]\n Ga += dert[9]\n Ma += dert[10]\n L += 1\n dert_.append(dert)\n\n else: # sign change, terminate P\n P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma),\n L=L, x0=x0, dert_=dert_, y=iP.y, sign=_sign, Pm=iP)\n if Dx > ave_Dx:\n # cross-comp of dx in P.dert_\n comp_dx(P); P_Ddx += P.Dert.Ddx; P_Mdx += P.Dert.Mdx\n Pd_.append(P)\n # reinitialize params\n I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert; x0 = iP.x0+x; L = 1; dert_ = [dert]\n\n _sign = sign\n x += 1\n # terminate last P\n P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma),\n L=L, x0=x0, dert_=dert_, y=iP.y, sign=_sign, Pm=iP)\n if Dx > ave_Dx:\n comp_dx(P); P_Ddx += P.Dert.Ddx; P_Mdx += P.Dert.Mdx\n Pd_.append(P)\n # update Pd params in P\n iP.Pd_ = Pd_; iP.Dert.Ddx = P_Ddx; iP.Dert.Mdx = P_Mdx\n Pd__ += Pd_\n\n return Pd__\n\n\ndef scan_P_(P_, _P_): # test for x overlap between Ps, call comp_slice\n\n derP_ = []\n for P in P_: # lower row\n for _P in _P_: # upper row\n # test for x overlap between P and _P in 8 directions\n if (P.x0 - 1 < (_P.x0 + _P.L) and (P.x0 + P.L) + 1 > _P.x0): # all Ps here are positive\n\n fcomp = [1 for derP in P.upconnect_ if P is derP.P] # upconnect could be derP or dirP\n if not fcomp:\n derP = comp_slice(_P, P) # form vertical and directional derivatives\n derP_.append(derP)\n P.upconnect_.append(derP)\n _P.downconnect_cnt += 1\n\n elif (P.x0 + P.L) < _P.x0: # stop scanning the rest of lower P_ if there is no overlap\n break\n return derP_\n\n\ndef scan_Pd_(P_, _P_): # test for x overlap between Pds\n\n derPd_ = []\n for P in P_: # lower row\n for _P in _P_: # upper row\n for Pd in P.Pd_: # lower row Pds\n for _Pd in _P.Pd_: # upper row Pds\n # test for same sign & x overlap between Pd and _Pd in 8 directions\n if (Pd.x0 - 1 < (_Pd.x0 + _Pd.L) and (Pd.x0 + Pd.L) + 1 > _Pd.x0) and (Pd.sign == _Pd.sign):\n\n fcomp = [1 for derPd in Pd.upconnect_ if Pd is derPd.P] # upconnect could be derP or dirP\n if not fcomp:\n derPd = comp_slice(_Pd, Pd)\n derPd_.append(derPd)\n Pd.upconnect_.append(derPd)\n _Pd.downconnect_cnt += 1\n\n elif (Pd.x0 + Pd.L) < _Pd.x0: # stop scanning the rest of lower P_ if there is no overlap\n break\n return derPd_\n\n\ndef form_PP_shell(blob, derP__, P__, derPd__, Pd__, fPPd):\n '''\n form vertically contiguous patterns of patterns by the sign of derP, in blob or in FPP\n '''\n if not isinstance(blob, CPP): # input is blob\n\n blob.derP__ = derP__; blob.P__ = P__\n blob.derPd__ = derPd__; blob.Pd__ = Pd__\n if fPPd:\n derP_2_PP_(blob.derP__, blob.PPdm_, 1, 1) # cluster by derPm dP sign\n derP_2_PP_(blob.derPd__, blob.PPdd_, 1, 1) # cluster by derPd dP sign\n else:\n derP_2_PP_(blob.derP__, blob.PPmm_, 1, 0) # cluster by derPm mP sign\n derP_2_PP_(blob.derPd__, blob.PPmd_, 1, 0) # cluster by derPd mP sign\n\n # assign spliced_PP after forming all PPs and FPPs\n PPs_ = [blob.PPdm_, blob.PPdd_, blob.PPmm_, blob.PPmd_]\n for PP_ in PPs_:\n for PP in PP_: # splice FPP with connected PPs:\n for derP in PP.xflip_derP_: # not empty in FPPs bordering on PPs only\n _P = derP._P\n if (derP._fxflip) and isinstance(_P.derP, CderP) and (_P.derP.PP not in PP.xflip_PP_):\n PP.xflip_PP_.append(_P.derP.PP) # derP is a lower splice point\n if (derP.fxflip) and (derP.PP not in PP.xflip_PP_):\n PP.xflip_PP_.append(derP.PP) # derP is a higher splice point\n else:\n FPP = blob # reassign for clarity\n FPP.derPf__ = derP__; FPP.Pf__ = P__\n FPP.derPdf__ = derPd__; FPP.Pdf__ = Pd__\n if fPPd:\n derP_2_PP_(FPP.derPf__, FPP.PPdmf_, 0, 1) # cluster by derPmf dP sign\n derP_2_PP_(FPP.derPdf__, FPP.PPddf_, 0, 1) # cluster by derPdf dP sign\n else:\n derP_2_PP_(FPP.derPf__, FPP.PPmmf_, 0, 0) # cluster by derPmf mP sign\n derP_2_PP_(FPP.derPdf__, FPP.PPmdf_, 0, 0) # cluster by derPdf mP sign\n\n\ndef derP_2_PP_(derP_, PP_, fflip, fPPd):\n '''\n first row of derP_ has downconnect_cnt == 0, higher rows may also have them\n '''\n for derP in reversed(derP_): # bottom-up to follow upconnects, derP is stored top-down\n if not derP.P.downconnect_cnt and not isinstance(derP.PP, CPP): # root derP was not terminated in prior call\n PP = CPP(Dert=CDert(), derPP=CderP()) # init\n accum_PP(PP,derP)\n\n if derP._P.upconnect_: # derP has upconnects\n upconnect_2_PP_(derP, PP_, fflip, fPPd) # form PPs across _P upconnects\n else:\n if (derP.PP.Dert.flip_val > flip_ave_FPP) and fflip:\n flip_FPP(derP.PP)\n PP_.append(derP.PP)\n\n\ndef upconnect_2_PP_(iderP, PP_, fflip, fPPd):\n '''\n compare sign of lower-layer iderP to the sign of its upconnects to form contiguous same-sign PPs\n '''\n confirmed_upconnect_ = []\n\n for derP in iderP._P.upconnect_: # potential upconnects from previous call\n if derP not in iderP.PP.derP__: # derP should not in current iPP derP_ list, but this may occur after the PP merging\n\n if (derP.P.Dert.flip_val>0 and iderP.P.Dert.flip_val>0 and iderP.PP.Dert.flip_val>0):\n # upconnect derP has different FPP, merge them\n if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP):\n merge_PP(iderP.PP, derP.PP, PP_)\n else: # accumulate derP to current FPP\n accum_PP(iderP.PP, derP)\n confirmed_upconnect_.append(derP)\n # not FPP\n else:\n if fPPd: same_sign = (iderP.dP > 0) == (derP.dP > 0) # comp dP sign\n else: same_sign = (iderP.mP > 0) == (derP.mP > 0) # comp mP sign\n\n if same_sign and not (iderP.P.Dert.flip_val>0) and not (derP.P.Dert.flip_val>0): # upconnect derP has different PP, merge them\n if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP):\n merge_PP(iderP.PP, derP.PP, PP_)\n else: # accumulate derP in current PP\n accum_PP(iderP.PP, derP)\n confirmed_upconnect_.append(derP)\n elif not isinstance(derP.PP, CPP): # sign changed, derP is root derP unless it already has FPP/PP\n PP = CPP(Dert=CDert(), derPP=CderP())\n accum_PP(PP,derP)\n derP.P.downconnect_cnt = 0 # reset downconnect count for root derP\n\n if derP._P.upconnect_:\n upconnect_2_PP_(derP, PP_, fflip, fPPd) # recursive compare sign of next-layer upconnects\n\n elif derP.PP is not iderP.PP and derP.P.downconnect_cnt == 0:\n if (derP.PP.Dert.flip_val > flip_ave_FPP) and fflip:\n flip_FPP(derP.PP)\n PP_.append(derP.PP) # terminate PP (not iPP) at the sign change\n\n iderP._P.upconnect_ = confirmed_upconnect_\n\n if not iderP.P.downconnect_cnt:\n if (iderP.PP.Dert.flip_val > flip_ave_FPP) and fflip:\n flip_FPP(iderP.PP)\n PP_.append(iderP.PP) # iPP is terminated after all upconnects are checked\n\n\ndef merge_PP(_PP, PP, PP_): # merge PP into _PP\n\n for derP in PP.derP__:\n if derP not in _PP.derP__:\n _PP.derP__.append(derP)\n derP.PP = _PP # update reference\n\n Dert = derP.P.Dert\n # accumulate Dert param of derP\n _PP.Dert.accumulate(I=Dert.I, Dy=Dert.Dy, Dx=Dert.Dx, G=Dert.G, M=Dert.M, Dyy=Dert.Dyy, Dyx=Dert.Dyx, Dxy=Dert.Dxy, Dxx=Dert.Dxx,\n Ga=Dert.Ga, Ma=Dert.Ma, Mdx=Dert.Mdx, Ddx=Dert.Ddx, flip_val=Dert.flip_val)\n\n # accumulate if PP' derP not in _PP\n _PP.derPP.accumulate(mP=derP.mP, dP=derP.dP, mx=derP.mx, dx=derP.dx,\n mL=derP.mL, dL=derP.dL, mDx=derP.mDx, dDx=derP.dDx,\n mDy=derP.mDy, dDy=derP.dDy)\n\n for splice_derP in PP.xflip_derP_:\n if splice_derP not in _PP.xflip_derP_:\n _PP.xflip_derP_.append(splice_derP)\n\n if PP in PP_:\n PP_.remove(PP) # remove merged PP\n\n\ndef flip_FPP(FPP):\n '''\n flip derts of FPP and call again slice_blob to get PPs of FPP\n '''\n # get box from P and P\n x0 = min(min([derP.P.x0 for derP in FPP.derP__]), min([derP._P.x0 for derP in FPP.derP__]))\n xn = max(max([derP.P.x0+derP.P.L for derP in FPP.derP__]), max([derP._P.x0+derP._P.L for derP in FPP.derP__]))\n y0 = min(min([derP.P.y for derP in FPP.derP__]), min([derP._P.y for derP in FPP.derP__]))\n yn = max(max([derP.P.y for derP in FPP.derP__]), max([derP._P.y for derP in FPP.derP__])) +1 # +1 because yn is not inclusive\n FPP.box = [y0,yn,x0,xn]\n # init empty derts, 11 params each: p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma\n dert__ = [np.zeros((yn-y0, xn-x0)) for _ in range(11)]\n mask__ = np.ones((yn-y0, xn-x0)).astype('bool')\n\n # fill empty dert with current FPP derts\n for derP in FPP.derP__:\n # _P\n for _x, _dert in enumerate(derP._P.dert_):\n for i, _param in enumerate(_dert):\n dert__[i][derP._P.y-y0, derP._P.x0-x0+_x] = _param\n mask__[derP._P.y-y0, derP._P.x0-x0+_x] = False\n # P\n for x, dert in enumerate(derP.P.dert_):\n for j, param in enumerate(dert):\n dert__[j][derP.P.y-y0, derP.P.x0-x0+x] = param\n mask__[derP.P.y-y0, derP.P.x0-x0+x] = False\n # flip dert__\n flipped_dert__ = [np.rot90(dert) for dert in dert__]\n flipped_mask__ = np.rot90(mask__)\n flipped_dert__[1],flipped_dert__[2] = \\\n flipped_dert__[2],flipped_dert__[1] # swap dy and dx in derts, always flipped in FPP\n FPP.dert__ = flipped_dert__\n FPP.mask__ = flipped_mask__\n # form PP_ in flipped FPP\n slice_blob(FPP, verbose=True)\n\n\ndef flip_eval_blob(blob):\n\n # L_bias (Lx / Ly) * G_bias (Gy / Gx), blob.box = [y0,yn,x0,xn], ddirection: preferential comp over low G\n horizontal_bias = (blob.box[3] - blob.box[2]) / (blob.box[1] - blob.box[0]) \\\n * (abs(blob.Dy) / abs(blob.Dx))\n\n if horizontal_bias > 1 and (blob.G * blob.Ma * horizontal_bias > flip_ave / 10):\n blob.fflip = 1 # rotate 90 degrees for scanning in vertical direction\n # swap blob Dy and Dx:\n Dy=blob.Dy; blob.Dy = blob.Dx; blob.Dx = Dy\n # rotate dert__:\n blob.dert__ = tuple([np.rot90(dert) for dert in blob.dert__])\n blob.mask__ = np.rot90(blob.mask__)\n # swap dert dys and dxs:\n blob.dert__ = list(blob.dert__) # convert to list since param in tuple is immutable\n blob.dert__[1], blob.dert__[2] = \\\n blob.dert__[2], blob.dert__[1]\n\n\ndef accum_Dert(Dert: dict, **params) -> None:\n Dert.update({param: Dert[param] + value for param, value in params.items()})\n\ndef accum_PP(PP, derP): # accumulate derP params in PP\n\n Dert = derP.P.Dert\n # accumulate Dert params\n ''' use:\n for param, PP_param in zip(Dert, PP.Dert):\n PP_param+=param\n ? '''\n PP.Dert.accumulate(I=Dert.I, Dy=Dert.Dy, Dx=Dert.Dx, G=Dert.G, M=Dert.M, Dyy=Dert.Dyy, Dyx=Dert.Dyx, Dxy=Dert.Dxy, Dxx=Dert.Dxx,\n Ga=Dert.Ga, Ma=Dert.Ma, Mdx=Dert.Mdx, Ddx=Dert.Ddx, flip_val=Dert.flip_val)\n # accumulate derP params\n PP.derPP.accumulate(mP=derP.mP, dP=derP.dP, mx=derP.mx, dx=derP.dx, mL=derP.mL, dL=derP.dL, mDx=derP.mDx, dDx=derP.dDx,\n mDy=derP.mDy, dDy=derP.dDy)\n PP.derP__.append(derP)\n\n derP.PP = PP # update reference\n\n if derP.fxflip or derP._fxflip: # add splice point\n PP.xflip_derP_.append(derP)\n\n\ndef comp_dx(P): # cross-comp of dx s in P.dert_\n\n Ddx = 0\n Mdx = 0\n dxdert_ = []\n _dx = P.dert_[0][2] # first dx\n for dert in P.dert_[1:]:\n dx = dert[2]\n ddx = dx - _dx\n if dx > 0 == _dx > 0: mdx = min(dx, _dx)\n else: mdx = -min(abs(dx), abs(_dx))\n dxdert_.append((ddx, mdx)) # no dx: already in dert_\n Ddx += ddx # P-wide cross-sign, P.L is too short to form sub_Ps\n Mdx += mdx\n _dx = dx\n P.dxdert_ = dxdert_\n P.Dert.Ddx = Ddx\n P.Dert.Mdx = Mdx\n\n\ndef comp_slice(_P, P, _derP_): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp\n\n s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dert.Dx, P.Dert.Dy, P.Dert.G, P.Dert.M, P.L, P.Dert.Ddx, P.Dert.Mdx # params per comp branch\n _s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dert.Dx, _P.Dert.Dy, _P.Dert.G, _P.Dert.M, _P.dX, _P.L, _P.Dert.Ddx, _P.Dert.Mdx\n\n dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?\n\n ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?\n mdX = min(dX, _dX) # dX is inversely predictive of mP?\n hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1\n\n L /= hyp # orthogonal L is reduced by hyp\n dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value\n M /= hyp # orthogonal M is reduced by hyp\n dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?\n\n dP = dL + dM # -> directional PPd, equal-weight params, no rdn?\n mP = mL + mM # -> complementary PPm, rdn *= Pd | Pm rolp?\n mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,\n\n P.Dert.flip_val = (dX * (P.Dert.Dy / (P.Dert.Dx+.001)) - flip_ave) # +.001 to avoid division by zero\n\n derP = CderP(P=P, _P=_P, mP=mP, dP=dP, dX=dX, mL=mL, dL=dL)\n P.derP = derP\n\n if P.Dert.flip_val>0: # derP.PP is FPP and _derP.PP is PP\n if ~(_P.Dert.flip_val>0) and (derP.mP >0) and (isinstance(_P.derP, CderP)): # positive mP AND _P.derP is derP: exclude 1st row Ps\n derP.fxflip = 1 # derP is a lower splice point\n\n elif _P.Dert.flip_val>0: # derP.PP is PP and _derP.PP is FPP\n if (_P.derP.mP >0) and (isinstance(_P.derP, CderP)): # positive mP AND _P.derP is derP: exclude 1st row Ps\n _P.derP._fxflip = 1 # _derP is a higher splice point\n\n return derP\n\n\ndef comp_slice_full(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp\n\n s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dert.Dx, P.Dert.Dy, P.Dert.G, P.Dert.M, P.L, P.Dert.Ddx, P.Dert.Mdx\n # params per comp branch, add angle params\n _s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dert.Dx, _P.Dert.Dy, _P.Dert.G, _P.Dert.M, _P.dX, _P.L, _P.Dert.Ddx, _P.Dert.Mdx\n\n dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?\n\n if dX > ave_dX: # internal comp is higher-power, else two-input comp not compressive?\n xn = x0 + L - 1\n _xn = _x0 + _L - 1\n mX = min(xn, _xn) - max(x0, _x0) # overlap = abs proximity: summed binary x match\n rX = dX / mX if mX else dX*2 # average dist / prox, | prox / dist, | mX / max_L?\n\n ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?\n mdX = min(dX, _dX) # dX is inversely predictive of mP?\n\n if dX * P.Dert.G > ave_ortho: # estimate params of P locally orthogonal to long axis, maximizing lateral diff and vertical match\n # diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/orthogonalization.png\n # Long axis is a curve of connections between ave_xs: mid-points of consecutive Ps.\n\n # Ortho virtually rotates P to connection-orthogonal direction:\n hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1\n L = L / hyp # orthogonal L\n # combine derivatives in proportion to the contribution of their axes to orthogonal axes:\n # contribution of Dx should increase with hyp(dX,dY=1), this is original direction of Dx:\n Dy = (Dy / hyp + Dx * hyp) / 2 # estimated along-axis D\n Dx = (Dy * hyp + Dx / hyp) / 2 # estimated cross-axis D\n '''\n alternatives:\n oDy = (Dy * hyp - Dx / hyp) / 2; oDx = (Dx / hyp + Dy * hyp) / 2; or:\n oDy = hypot( Dy / hyp, Dx * hyp); oDx = hypot( Dy * hyp, Dx / hyp)\n '''\n dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value\n dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?\n # no comp G: Dy, Dx are more specific:\n dDx = Dx - _Dx # same-sign Dx if Pd\n mDx = min(abs(Dx), abs(_Dx))\n if Dx > 0 != _Dx > 0: mDx = -mDx\n # min is value distance for opposite-sign comparands, vs. value overlap for same-sign comparands\n dDy = Dy - _Dy # Dy per sub_P by intra_comp(dx), vs. less vertically specific dI\n mDy = min(abs(Dy), abs(_Dy))\n if (Dy > 0) != (_Dy > 0): mDy = -mDy\n\n dDdx, dMdx, mDdx, mMdx = 0, 0, 0, 0\n if P.dxdert_ and _P.dxdert_: # from comp_dx\n fdx = 1\n dDdx = Ddx - _Ddx\n mDdx = min( abs(Ddx), abs(_Ddx))\n if (Ddx > 0) != (_Ddx > 0): mDdx = -mDdx\n # Mdx is signed:\n dMdx = min( Mdx, _Mdx)\n mMdx = -min( abs(Mdx), abs(_Mdx))\n if (Mdx > 0) != (_Mdx > 0): mMdx = -mMdx\n else:\n fdx = 0\n # coeff = 0.7 for semi redundant parameters, 0.5 for fully redundant parameters:\n dP = ddX + dL + 0.7*(dM + dDx + dDy) # -> directional PPd, equal-weight params, no rdn?\n # correlation: dX -> L, oDy, !oDx, ddX -> dL, odDy ! odDx? dL -> dDx, dDy?\n if fdx: dP += 0.7*(dDdx + dMdx)\n\n mP = mdX + mL + 0.7*(mM + mDx + mDy) # -> complementary PPm, rdn *= Pd | Pm rolp?\n if fdx: mP += 0.7*(mDdx + mMdx)\n mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,\n\n P.Dert.flip_val = (dX * (P.Dert.Dy / (P.Dert.Dx+.001)) - flip_ave) # avoid division by zero\n\n derP = CderP(P=P, _P=_P, mP=mP, dP=dP, dX=dX, mL=mL, dL=dL, mDx=mDx, dDx=dDx, mDy=mDy, dDy=dDy)\n P.derP = derP\n\n # Chee's version:\n # if flip value>0 AND positive mP (predictive value) AND flip_val sign changed AND _P.derP is derP: exclude 1st row Ps\n if (P.Dert.flip_val>0) and (derP.mP >0) and ((P.Dert.flip_val>0) != (_P.Dert.flip_val>0)):\n derP.fxflip = 1\n # if upper row derP xflip flag is true , add current lower row derP as lower spliced point\n if (isinstance(_P.derP, CderP)) and (_P.derP.fxflip == 1) and (_P.Dert.flip_val>0) and (_P.derP.mP >0):\n derP.fsplice= 1\n\n if fdx:\n derP.fdx=1; derP.dDdx=dDdx; derP.mDdx=mDdx; derP.dMdx=dMdx; derP.mMdx=mMdx\n\n '''\n min comp for rotation: L, Dy, Dx, no redundancy?\n mParam weighting by relative contribution to mP, /= redundancy?\n div_f, nvars: if abs dP per PPd, primary comp L, the rest is normalized?\n '''\n return derP\n\n''' radial comp extension for co-internal blobs:\n != sign comp x sum( adj_blob_) -> intra_comp value, isolation value, cross-sign merge if weak, else:\n == sign comp x ind( adj_adj_blob_) -> same-sign merge | composition:\n borrow = adj_G * rA: default sum div_comp S -> relative area and distance to adjj_blob_\n internal sum comp if mA: in thin lines only? comp_norm_G or div_comp_G -> rG?\n isolation = decay + contrast:\n G - G * (rA * ave_rG: decay) - (rA * adj_G: contrast, = lend | borrow, no need to compare vG?)\n if isolation: cross adjj_blob composition eval,\n else: cross adjj_blob merge eval:\n blob merger if internal match (~raG) - isolation, rdn external match:\n blob compos if external match (~rA?) + isolation,\n Also eval comp_slice over fork_?\n rng+ should preserve resolution: rng+_dert_ is dert layers,\n rng_sum-> rng+, der+: whole rng, rng_incr-> angle / past vs next g,\n rdn Rng | rng_ eval at rng term, Rng -= lost coord bits mag, always > discr?\n'''"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.hypot",
"numpy.stack"
],
[
"numpy.rot90",
"numpy.zeros",
"numpy.hypot",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sararob/adanet
|
[
"26388aeb67ec30c9e98635497e6b5b3476378db7"
] |
[
"adanet/core/tpu_estimator.py"
] |
[
"\"\"\"An AdaNet estimator implementation which can run on TPU.\n\nCopyright 2018 The AdaNet Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport functools\n\nfrom adanet.core.ensemble import MixtureWeightType\nfrom adanet.core.estimator import Estimator\nimport six\nimport tensorflow as tf\nfrom tensorflow.contrib.tpu.python.tpu import tpu_function\nfrom tensorflow.python import summary\n\n\n# TODO: support summaries on TPU during training.\[email protected]\ndef _rewire_summaries():\n \"\"\"Rewire Tensorflow summaries to be no-ops when running on TPU.\n\n Summaries are not currently supported on TPU.\n\n Yields:\n Context where summary functions are rewired to be no-ops when on TPU.\n \"\"\"\n\n if tpu_function.get_tpu_context().number_of_shards == 0:\n yield\n return\n\n tf.logging.log_first_n(\n tf.logging.WARN,\n \"Converting summaries to no-ops on TPU since they are not supported.\", 1)\n old_summary_audio = summary.audio\n old_summary_histogram = summary.histogram\n old_summary_image = summary.image\n old_summary_scalar = summary.scalar\n old_summary_tensor_summary = summary.tensor_summary\n old_summary_text = summary.text\n\n def _no_op(*args, **kwargs):\n del args, kwargs # Unused\n return tf.constant(\"\", name=\"summary_no_op\")\n\n # Monkey-patch global attributes.\n summary.audio = _no_op\n summary.histogram = _no_op\n summary.image = _no_op\n summary.scalar = _no_op\n summary.tensor_summary = _no_op\n summary.text = _no_op\n\n tf.summary.audio = _no_op\n tf.summary.histogram = _no_op\n tf.summary.image = _no_op\n tf.summary.scalar = _no_op\n tf.summary.tensor_summary = _no_op\n tf.summary.text = _no_op\n\n try:\n yield\n finally:\n # Revert monkey-patches.\n summary.audio = old_summary_audio\n summary.histogram = old_summary_histogram\n summary.image = old_summary_image\n summary.scalar = old_summary_scalar\n summary.tensor_summary = old_summary_tensor_summary\n summary.text = old_summary_text\n\n tf.summary.audio = old_summary_audio\n tf.summary.histogram = old_summary_histogram\n tf.summary.image = old_summary_image\n tf.summary.scalar = old_summary_scalar\n tf.summary.tensor_summary = old_summary_tensor_summary\n tf.summary.text = old_summary_text\n\n\nclass TPUEstimator(Estimator, tf.contrib.tpu.TPUEstimator):\n \"\"\"An adanet.Estimator capable of running on TPU.\n\n If running on TPU, all summary calls are rewired to be no-ops during training.\n\n WARNING: this API is highly experimental, unstable, and can change without\n warning.\n \"\"\"\n\n def __init__(self,\n head,\n subnetwork_generator,\n max_iteration_steps,\n mixture_weight_type=MixtureWeightType.SCALAR,\n mixture_weight_initializer=None,\n warm_start_mixture_weights=False,\n adanet_lambda=0.,\n adanet_beta=0.,\n evaluator=None,\n report_materializer=None,\n use_bias=False,\n metric_fn=None,\n force_grow=False,\n replicate_ensemble_in_training=False,\n adanet_loss_decay=.9,\n worker_wait_timeout_secs=7200,\n model_dir=None,\n report_dir=None,\n config=None,\n use_tpu=True,\n batch_size=None):\n if not use_tpu:\n tf.logging.warning(\n \"This adanet.TPUEstimator is meant to be used for running on TPU. \"\n \"If you want to run on CPU/GPU, use adanet.Estimator instead.\")\n\n super(TPUEstimator, self).__init__(\n head=head,\n subnetwork_generator=subnetwork_generator,\n max_iteration_steps=max_iteration_steps,\n mixture_weight_type=mixture_weight_type,\n mixture_weight_initializer=mixture_weight_initializer,\n warm_start_mixture_weights=warm_start_mixture_weights,\n adanet_lambda=adanet_lambda,\n adanet_beta=adanet_beta,\n evaluator=evaluator,\n report_materializer=report_materializer,\n use_bias=use_bias,\n metric_fn=metric_fn,\n force_grow=force_grow,\n replicate_ensemble_in_training=replicate_ensemble_in_training,\n adanet_loss_decay=adanet_loss_decay,\n worker_wait_timeout_secs=worker_wait_timeout_secs,\n model_dir=model_dir,\n report_dir=report_dir,\n config=config if config else tf.contrib.tpu.RunConfig(),\n use_tpu=use_tpu,\n eval_on_tpu=False,\n export_to_tpu=False,\n train_batch_size=batch_size or 0)\n\n def train(self,\n input_fn,\n hooks=None,\n steps=None,\n max_steps=None,\n saving_listeners=None):\n # Rewire summaries to be no-ops when running on TPU.\n # TODO: Rewire predict and eval when TPU support is added.\n with _rewire_summaries():\n return super(TPUEstimator, self).train(\n input_fn=input_fn,\n hooks=hooks,\n max_steps=max_steps,\n saving_listeners=saving_listeners)\n\n def _call_adanet_model_fn(self, input_fn, mode, params):\n \"\"\"See the `Estimator` base class for details.\"\"\"\n\n # Fakes TPU shard context before calling through to the parent to supress\n # warnings by CrossShardOptimizer when running on TPU. Warnings are issued\n # when `_adanet_model_fn` is called directly on CPU during the bookkeeping\n # phase. Since we rebuild the graph each time `_adanet_model_fn` is called,\n # this has no adverse effects.\n with tpu_function.tpu_shard_context(0):\n # Bind params to input_fn since the parent's input_fn is not expected to\n # have any arguments.\n input_fn = functools.partial(input_fn, params)\n super(TPUEstimator, self)._call_adanet_model_fn(input_fn, mode, params)\n\n def _adanet_model_fn(self, features, labels, mode, params):\n \"\"\"See the `Estimator` base class for details.\"\"\"\n\n estimator_spec = super(TPUEstimator, self)._adanet_model_fn(\n features, labels, mode, params)\n if \"use_tpu\" in params and mode == tf.estimator.ModeKeys.TRAIN:\n kwargs = {\n key: value\n for key, value in six.iteritems(estimator_spec._asdict())\n if key not in (\"eval_metric_ops\", \"scaffold\", \"training_chief_hooks\")\n }\n estimator_spec = tf.contrib.tpu.TPUEstimatorSpec(**kwargs)\n return estimator_spec\n"
] |
[
[
"tensorflow.logging.warning",
"tensorflow.constant",
"tensorflow.logging.log_first_n",
"tensorflow.contrib.tpu.RunConfig",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.python.tpu.tpu_function.get_tpu_context",
"tensorflow.contrib.tpu.python.tpu.tpu_function.tpu_shard_context"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
mickare/Robust-Reconstruction-of-Watertight-3D-Models
|
[
"c3afd98a8732c0447c153d38bfcefb5c4441bc7b"
] |
[
"reconstruction/filters/dilate.py"
] |
[
"from typing import Optional, Union\n\nimport numpy as np\nfrom scipy import ndimage\n\nfrom reconstruction.data.chunks import ChunkGrid, Chunk\nfrom reconstruction.data.faces import ChunkFace\n\nbool_t = Union[bool, np.bool8]\n\n\ndef dilate(image: ChunkGrid[bool_t], steps=1, structure: Optional[np.ndarray] = None, mask: ChunkGrid[bool] = None) \\\n -> ChunkGrid[bool_t]:\n if mask is None:\n return dilate_no_mask(image, steps, structure)\n # if steps < image.chunk_size:\n # return dilate_no_mask(image, structure, steps)\n # else:\n # return dilate_no_mask_fast(image, structure, steps)\n else:\n raise NotImplementedError\n\n\ndef dilate_no_mask(image: ChunkGrid[bool_t], steps=1, structure: Optional[np.ndarray] = None) -> ChunkGrid[bool_t]:\n if structure is not None:\n assert structure.ndim == 2 and structure.shape == (3, 3)\n\n __pad_slice = slice(1, -1)\n\n result = image.astype(np.bool8)\n for step in range(steps):\n # Temporary result between each step\n tmp = result.copy(empty=True)\n # Dilate inner chunk\n # result.pad_chunks(1)\n\n for index, r in result.chunks.items():\n if r.is_filled() and r.value:\n tmp.ensure_chunk_at_index(index).set_fill(r.value)\n for f, ni in ChunkGrid.iter_neighbors_indices(r.index):\n ch = tmp.ensure_chunk_at_index(ni)\n if not (ch.is_filled() and ch.value):\n arr = ch.to_array()\n arr[f.flip().slice()] = True\n ch.set_array(arr)\n ch.cleanup()\n continue\n\n padded = result.padding_at(index, 1, corners=False, edges=False)\n if (not np.any(padded)): # Skip, nothing to do\n continue\n\n # Do dilation\n dilated = ndimage.binary_dilation(padded, structure=structure)\n\n # Copy result to tmp\n ch = tmp.ensure_chunk_at_index(index)\n ch.set_array(dilated[1:-1, 1:-1, 1:-1])\n ch.cleanup()\n\n # Propagate to the next chunks\n for f in ChunkFace: # type: ChunkFace\n s = dilated[f.slice(other=__pad_slice)]\n if np.any(s):\n ch: Chunk = tmp.ensure_chunk_at_index(f.direction() + index)\n arr = ch.to_array()\n arr[f.flip().slice()] |= s\n ch.set_array(arr)\n\n # Set result\n result = tmp\n result.cleanup(remove=True)\n return result\n\n# def dilate_no_mask_fast(image: ChunkGrid[bool_t], structure: Optional[np.ndarray] = None, steps=1) -> ChunkGrid[bool_t]:\n# if structure is not None:\n# assert structure.ndim == 2 and structure.shape == (3, 3)\n#\n# # Only allow dilation on not fully filled spaces\n# assert not image.fill_value\n#\n# # Method cache (prevent lookup in loop)\n# __ndimage_binary_dilation = ndimage.binary_dilation\n# __grid_ensure_chunk_at_index = ChunkGrid.ensure_chunk_at_index\n# __chunk_set_array = Chunk.set_array\n#\n# result = image\n# size = image.chunk_size\n#\n# remaining = steps\n# step = 0\n# while remaining > 0:\n# # Binary dilation iterations in this step\n# iterations = min(remaining, image.chunk_size)\n# if iterations <= 0:\n# break\n#\n# # Temporary result between each major step\n# tmp = result.copy(empty=True)\n#\n# # Dilate inner chunk\n# result.pad_chunks(1)\n# for r in result.chunks:\n# if r.is_filled() and r.value: # Skip, nothing to do\n# continue\n#\n# src = result.get_block_at(r.index, (3, 3, 3), edges=True, corners=True)\n# if all(b.is_filled() and not b.value for b in np.flatiter(src)): # Skip, nothing to do\n# continue\n#\n# # Do dilation\n# padded = result.block_to_array(src)\n# dil = __ndimage_binary_dilation(padded, structure=structure, iterations=iterations)\n#\n# # Copy result to tmp\n# tmp.set_block_at(r.index, dil, replace=False)\n# tmp.ensure_chunk_at_index(r.index).set_array(dil[size:size + size, size:size + size, size:size + size])\n#\n# # Set result\n# result = tmp\n# result.cleanup(remove=True)\n# step += 1\n#\n# return result\n"
] |
[
[
"numpy.any",
"scipy.ndimage.binary_dilation"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
JohanpG/tf-object-counting
|
[
"511c69385f9443b34ba7ea1a39417a90627c1ca2"
] |
[
"utils/visualization_utils.py"
] |
[
"#----------------------------------------------\n#--- Author : Ahmet Ozlu\n#--- Mail : [email protected]\n#--- Date : 27th January 2018\n#----------------------------------------------\n\n\"\"\"A set of functions that are used for visualization.\n\nThese functions often receive an image, perform some visualization on the image.\nThe functions do not return a value, instead they modify the image itself.\n\n\"\"\"\n\n# Imports\nimport collections\nimport functools\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL.Image as Image\nimport PIL.ImageColor as ImageColor\nimport PIL.ImageDraw as ImageDraw\nimport PIL.ImageFont as ImageFont\nimport six\nimport tensorflow as tf\nimport cv2\nimport numpy\nimport os\n\n# string utils - import\nfrom utils.string_utils import custom_string_util\n\n# image utils - image saver import\nfrom utils.image_utils import image_saver\n\n# predicted_speed predicted_color module - import\nfrom utils.object_counting_module import object_counter\n# predicted_speed predicted_color module - import\nfrom utils.object_counting_module import object_counter_x_axis\n\n# color recognition module - import\nfrom utils.color_recognition_module import color_recognition_api\n\n# Variables\nis_vehicle_detected = [0]\nROI_POSITION = [0]\nROI_Area=[]\nDEVIATION = [0]\nis_color_recognition_enable = [0]\nmode_number = [0]\nx_axis = [0]\n\n_TITLE_LEFT_MARGIN = 10\n_TITLE_TOP_MARGIN = 10\n\nSTANDARD_COLORS = [\n 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',\n 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',\n 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',\n 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',\n 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',\n 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',\n 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',\n 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',\n 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',\n 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',\n 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',\n 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',\n 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',\n 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',\n 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',\n 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',\n 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',\n 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',\n 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',\n 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',\n 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',\n 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',\n 'WhiteSmoke', 'Yellow', 'YellowGreen'\n]\n\ncurrent_path = os.getcwd()\n\ndef save_image_array_as_png(image, output_path):\n \"\"\"Saves an image (represented as a numpy array) to PNG.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n output_path: path to which image should be written.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n with tf.gfile.Open(output_path, 'w') as fid:\n image_pil.save(fid, 'PNG')\n\ndef encode_image_array_as_png_str(image):\n \"\"\"Encodes a numpy array into a PNG string.\n\n Args:\n image: a numpy array with shape [height, width, 3].\n\n Returns:\n PNG encoded image string.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image))\n output = six.BytesIO()\n image_pil.save(output, format='PNG')\n png_string = output.getvalue()\n output.close()\n return png_string\n\ndef draw_bounding_box_on_image_array(current_frame_number, image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image (numpy array).\n\n Args:\n image: a numpy array with shape [height, width, 3].\n ymin: ymin of bounding box in normalized coordinates (same below).\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image(current_frame_number,image_pil, ymin, xmin, ymax, xmax, color,\n thickness, display_str_list,\n use_normalized_coordinates)\n np.copyto(image, np.array(image_pil))\n return is_vehicle_detected, csv_line, update_csv\n\ndef draw_bounding_box_on_image(current_frame_number,image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n \"\"\"Adds a bounding box to an image.\n\n Each string in display_str_list is displayed on a separate line above the\n bounding box in black text on a rectangle filled with the input 'color'.\n If the top of the bounding box extends to the edge of the image, the strings\n are displayed below the bounding box.\n\n Args:\n image: a PIL.Image object.\n ymin: ymin of bounding box.\n xmin: xmin of bounding box.\n ymax: ymax of bounding box.\n xmax: xmax of bounding box.\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list: list of strings to display in box\n (each to be shown on its own line).\n use_normalized_coordinates: If True (default), treat coordinates\n ymin, xmin, ymax, xmax as relative to the image. Otherwise treat\n coordinates as absolute.\n \"\"\"\n csv_line = \"\" # to create new csv line consists of vehicle type, predicted_speed, color and predicted_direction\n update_csv = False # update csv for a new vehicle that are passed from ROI - just one new line for each vehicles\n is_vehicle_detected = [0]\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n\n predicted_direction = \"n.a.\" # means not available, it is just initialization\n\n image_temp = numpy.array(image)\n detected_vehicle_image = image_temp[int(top):int(bottom), int(left):int(right)]\n\n '''if(bottom > ROI_POSITION): # if the vehicle get in ROI area, vehicle predicted_speed predicted_color algorithms are called - 200 is an arbitrary value, for my case it looks very well to set position of ROI line at y pixel 200'''\n if(x_axis[0] == 1):\n predicted_direction, is_vehicle_detected, update_csv = object_counter_x_axis.count_objects_x_axis(top, bottom, right, left, detected_vehicle_image, ROI_POSITION[0], ROI_POSITION[0]+DEVIATION[0], ROI_POSITION[0]+(DEVIATION[0]*2), DEVIATION[0],ROI_Area)\n elif(mode_number[0] == 2):\n predicted_direction, is_vehicle_detected, update_csv = object_counter.count_objects(top, bottom, right, left, detected_vehicle_image, ROI_POSITION[0], ROI_POSITION[0]+DEVIATION[0], ROI_POSITION[0]+(DEVIATION[0]*2), DEVIATION[0])\n\n if(1 in is_color_recognition_enable):\n predicted_color = color_recognition_api.color_recognition(detected_vehicle_image) \n \n try:\n font = ImageFont.truetype('arial.ttf', 16)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n if(1 in is_color_recognition_enable):\n display_str_list[0] = predicted_color + \" \" + display_str_list[0]\n csv_line = predicted_color + \",\" + str (predicted_direction) # csv line created\n else:\n display_str_list[0] = display_str_list[0]\n csv_line = str (predicted_direction) # csv line created\n \n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin), (left + text_width,\n text_bottom)],\n fill=color)\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n text_bottom -= text_height - 2 * margin\n return is_vehicle_detected, csv_line, update_csv\n\n\ndef draw_bounding_boxes_on_image_array(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n \"\"\"Draws bounding boxes on image (numpy array).\n\n Args:\n image: a numpy array object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list_list: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n\n Raises:\n ValueError: if boxes is not a [N, 4] array\n \"\"\"\n image_pil = Image.fromarray(image)\n draw_bounding_boxes_on_image(image_pil, boxes, color, thickness, display_str_list_list)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_bounding_boxes_on_image(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n \"\"\"Draws bounding boxes on image.\n\n Args:\n image: a PIL.Image object.\n boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).\n The coordinates are in normalized format between [0, 1].\n color: color to draw bounding box. Default is red.\n thickness: line thickness. Default value is 4.\n display_str_list_list: list of list of strings.\n a list of strings for each bounding box.\n The reason to pass a list of strings for a\n bounding box is that it might contain\n multiple labels.\n\n Raises:\n ValueError: if boxes is not a [N, 4] array\n \"\"\"\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n raise ValueError('Input must be of size [N, 4]')\n for i in range(boxes_shape[0]):\n display_str_list = ()\n if display_str_list_list:\n display_str_list = display_str_list_list[i]\n \n draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],\n boxes[i, 3], color, thickness, display_str_list)\n\ndef draw_bounding_boxes_on_image_tensors(images,\n boxes,\n classes,\n scores,\n category_index,\n max_boxes_to_draw=20,\n min_score_thresh=0.2):\n \"\"\"Draws bounding boxes on batch of image tensors.\n\n Args:\n images: A 4D uint8 image tensor of shape [N, H, W, C].\n boxes: [N, max_detections, 4] float32 tensor of detection boxes.\n classes: [N, max_detections] int tensor of detection classes. Note that\n classes are 1-indexed.\n scores: [N, max_detections] float32 tensor of detection scores.\n category_index: a dict that maps integer ids to category dicts. e.g.\n {1: {1: 'dog'}, 2: {2: 'cat'}, ...}\n max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.\n min_score_thresh: Minimum score threshold for visualization. Default 0.2.\n\n Returns:\n 4D image tensor of type uint8, with boxes drawn on top.\n \"\"\"\n visualize_boxes_fn = functools.partial(\n visualize_boxes_and_labels_on_image_array,\n category_index=category_index,\n instance_masks=None,\n keypoints=None,\n use_normalized_coordinates=True,\n max_boxes_to_draw=max_boxes_to_draw,\n min_score_thresh=min_score_thresh,\n agnostic_mode=False,\n line_thickness=4)\n\n def draw_boxes(image_boxes_classes_scores):\n \"\"\"Draws boxes on image.\"\"\"\n (image, boxes, classes, scores) = image_boxes_classes_scores\n image_with_boxes = tf.py_func(visualize_boxes_fn,\n [image, boxes, classes, scores], tf.uint8)\n return image_with_boxes\n\n images = tf.map_fn(\n draw_boxes, (images, boxes, classes, scores),\n dtype=tf.uint8,\n back_prop=False)\n return images\n\n\ndef draw_keypoints_on_image_array(image,\n keypoints,\n color='red',\n radius=2,\n use_normalized_coordinates=True):\n \"\"\"Draws keypoints on an image (numpy array).\n\n Args:\n image: a numpy array with shape [height, width, 3].\n keypoints: a numpy array with shape [num_keypoints, 2].\n color: color to draw the keypoints with. Default is red.\n radius: keypoint radius. Default value is 2.\n use_normalized_coordinates: if True (default), treat keypoint values as\n relative to the image. Otherwise treat them as absolute.\n \"\"\"\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_keypoints_on_image(image_pil, keypoints, color, radius,\n use_normalized_coordinates)\n np.copyto(image, np.array(image_pil))\n\n\ndef draw_keypoints_on_image(image,\n keypoints,\n color='red',\n radius=2,\n use_normalized_coordinates=True):\n \"\"\"Draws keypoints on an image.\n\n Args:\n image: a PIL.Image object.\n keypoints: a numpy array with shape [num_keypoints, 2].\n color: color to draw the keypoints with. Default is red.\n radius: keypoint radius. Default value is 2.\n use_normalized_coordinates: if True (default), treat keypoint values as\n relative to the image. Otherwise treat them as absolute.\n \"\"\"\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n keypoints_x = [k[1] for k in keypoints]\n keypoints_y = [k[0] for k in keypoints]\n if use_normalized_coordinates:\n keypoints_x = tuple([im_width * x for x in keypoints_x])\n keypoints_y = tuple([im_height * y for y in keypoints_y])\n for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):\n draw.ellipse([(keypoint_x - radius, keypoint_y - radius),\n (keypoint_x + radius, keypoint_y + radius)],\n outline=color, fill=color)\n\ndef draw_mask_on_image_array(image, mask, color='red', alpha=0.7):\n \"\"\"Draws mask on an image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_height, 3)\n mask: a uint8 numpy array of shape (img_height, img_height) with\n values between either 0 or 1.\n color: color to draw the keypoints with. Default is red.\n alpha: transparency value between 0 and 1. (default: 0.7)\n\n Raises:\n ValueError: On incorrect data type for image or masks.\n \"\"\"\n if image.dtype != np.uint8:\n raise ValueError('`image` not of type np.uint8')\n if mask.dtype != np.uint8:\n raise ValueError('`mask` not of type np.uint8')\n if np.any(np.logical_and(mask != 1, mask != 0)):\n raise ValueError('`mask` elements should be in [0, 1]')\n rgb = ImageColor.getrgb(color)\n pil_image = Image.fromarray(image)\n\n solid_color = np.expand_dims(\n np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])\n pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')\n pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')\n pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)\n np.copyto(image, np.array(pil_image.convert('RGB')))\n\n\ndef visualize_boxes_and_labels_on_image_array(current_frame_number,\n image,\n mode,\n color_recognition_status,\n boxes,\n classes,\n scores,\n category_index,\n\t\t\t\t\t targeted_objects=None,\n y_reference=None,\n deviation=None,\n instance_masks=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image in place, and returns\n that same image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]. Note that class indices are 1-based,\n and match the keys in the label map.\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width], can\n be None\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box to be visualized\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n csv_line_util = \"not_available\"\n counter = 0\n ROI_POSITION.insert(0,y_reference)\n DEVIATION.insert(0,deviation)\n is_vehicle_detected = []\n mode_number.insert(0,mode)\n is_color_recognition_enable.insert(0,color_recognition_status)\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = 'black'\n else:\n if not agnostic_mode:\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name'] \n else:\n class_name = 'N/A' \n display_str = '{}: {}%'.format(class_name,int(100*scores[i]))\n else:\n display_str = 'score: {}%'.format(int(100 * scores[i])) \n\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n if(mode == 1):\n counting_mode = \"\"\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n '''if instance_masks is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_masks_map[box],\n color=color\n )'''\n \n display_str_list=box_to_display_str_map[box]\n\n if(mode == 1 and targeted_objects == None):\n counting_mode = counting_mode + str(display_str_list)\n\n elif(mode == 1 and targeted_objects in display_str_list[0]):\n counting_mode = counting_mode + str(display_str_list)\n #Debug\n print(display_str_list)\n print(targeted_objects)\n if ((targeted_objects != None) and (targeted_objects in display_str_list[0])):\n if instance_masks is not None:\n draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n \n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates) \n \n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n elif (targeted_objects == None):\n if instance_masks is not None:\n draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n\n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates) \n \n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n if(1 in is_vehicle_detected):\n counter = 1\n del is_vehicle_detected[:]\n is_vehicle_detected = [] \n csv_line_util = class_name + \",\" + csv_line \n\n if(mode == 1):\n counting_mode = counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\")\n counting_mode = ''.join([i for i in counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\") if not i.isdigit()])\n counting_mode = str(custom_string_util.word_count(counting_mode))\n counting_mode = counting_mode.replace(\"{\", \"\").replace(\"}\", \"\")\n\n return counter, csv_line_util, counting_mode\n\n else:\n return counter, csv_line_util\n\ndef visualize_boxes_and_labels_on_image_array_x_axis(current_frame_number,\n image,\n mode,\n color_recognition_status,\n boxes,\n classes,\n scores,\n category_index,\n\t\t\t\t\t targeted_objects=None,\n x_reference=None,\n deviation=None,\n instance_masks=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4,\n roiArea=None):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image in place, and returns\n that same image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]. Note that class indices are 1-based,\n and match the keys in the label map.\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width], can\n be None\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box to be visualized\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n csv_line_util = \"not_available\"\n counter = 0\n ROI_POSITION.insert(0,x_reference)\n DEVIATION.insert(0,deviation)\n x_axis.insert(0,1)\n is_vehicle_detected = []\n mode_number.insert(0,mode)\n is_color_recognition_enable.insert(0,color_recognition_status)\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = 'black'\n else:\n if not agnostic_mode:\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name'] \n else:\n class_name = 'N/A' \n display_str = '{}: {}%'.format(class_name,int(100*scores[i]))\n else:\n display_str = 'score: {}%'.format(int(100 * scores[i])) \n\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n if(mode == 1):\n counting_mode = \"\"\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n '''if instance_masks is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_masks_map[box],\n color=color\n )'''\n \n display_str_list=box_to_display_str_map[box]\n\n # Added to support multiple classes\n result = any(elem in display_str_list[0] for elem in targeted_objects)\n\n if(mode == 1 and targeted_objects == None):\n counting_mode = counting_mode + str(display_str_list)\n\n #elif(mode == 1 and targeted_objects in display_str_list[0]):\n elif(mode == 1 and display_str_list[0].split(\":\")[0] in targeted_objects):\n counting_mode = counting_mode + str(display_str_list)\n print(display_str_list)\n #if ((targeted_objects != None) and (targeted_objects in display_str_list[0])):\n if ((targeted_objects != None) and display_str_list[0].split(\":\")[0] in targeted_objects):\n if instance_masks is not None:\n draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n \n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates) \n \n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n elif (targeted_objects == None):\n if instance_masks is not None:\n draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n\n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates) \n \n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n if(1 in is_vehicle_detected):\n counter = 1\n del is_vehicle_detected[:]\n is_vehicle_detected = [] \n csv_line_util = class_name + \",\" + csv_line \n\n if(mode == 1):\n counting_mode = counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\")\n counting_mode = ''.join([i for i in counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\") if not i.isdigit()])\n counting_mode = str(custom_string_util.word_count(counting_mode))\n counting_mode = counting_mode.replace(\"{\", \"\").replace(\"}\", \"\")\n\n return counter, csv_line_util, counting_mode\n\n else:\n return counter, csv_line_util\n\ndef visualize_boxes_and_labels_on_image_array_y_axis(current_frame_number,\n image,\n mode,\n color_recognition_status,\n boxes,\n classes,\n scores,\n category_index,\n\t\t\t\t\t targeted_objects=None,\n y_reference=None,\n deviation=None,\n instance_masks=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image in place, and returns\n that same image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]. Note that class indices are 1-based,\n and match the keys in the label map.\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width], can\n be None\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box to be visualized\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n csv_line_util = \"not_available\"\n counter = 0\n ROI_POSITION.insert(0,y_reference)\n DEVIATION.insert(0,deviation)\n is_vehicle_detected = []\n mode_number.insert(0,mode)\n is_color_recognition_enable.insert(0,color_recognition_status)\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = 'black'\n else:\n if not agnostic_mode:\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name'] \n else:\n class_name = 'N/A' \n display_str = '{}: {}%'.format(class_name,int(100*scores[i]))\n else:\n display_str = 'score: {}%'.format(int(100 * scores[i])) \n\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n if(mode == 2):\n counting_mode = \"\"\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n '''if instance_masks is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_masks_map[box],\n color=color\n )'''\n \n display_str_list=box_to_display_str_map[box]\n\n if(mode == 2 and targeted_objects == None):\n counting_mode = counting_mode + str(display_str_list)\n\n elif(mode == 2 and targeted_objects in display_str_list[0]):\n counting_mode = counting_mode + str(display_str_list)\n\n if ((targeted_objects != None) and (targeted_objects in display_str_list[0])):\n\t if instance_masks is not None:\n\t draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n\t\n\t is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n\t image,\n\t ymin,\n\t xmin,\n\t ymax,\n\t xmax,\n\t color=color,\n\t thickness=line_thickness,\n\t display_str_list=box_to_display_str_map[box],\n\t use_normalized_coordinates=use_normalized_coordinates) \n \n\t if keypoints is not None:\n\t draw_keypoints_on_image_array(\n\t image,\n\t box_to_keypoints_map[box],\n\t color=color,\n\t radius=line_thickness / 2,\n\t use_normalized_coordinates=use_normalized_coordinates)\n\n elif (targeted_objects == None):\n\t if instance_masks is not None:\n\t draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n\n\t is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n\t image,\n\t ymin,\n\t xmin,\n\t ymax,\n\t xmax,\n\t color=color,\n\t thickness=line_thickness,\n\t display_str_list=box_to_display_str_map[box],\n\t use_normalized_coordinates=use_normalized_coordinates) \n \n\t if keypoints is not None:\n\t draw_keypoints_on_image_array(\n\t image,\n\t box_to_keypoints_map[box],\n\t color=color,\n\t radius=line_thickness / 2,\n\t use_normalized_coordinates=use_normalized_coordinates)\n\n if(1 in is_vehicle_detected):\n counter = 1\n del is_vehicle_detected[:]\n is_vehicle_detected = [] \n csv_line_util = class_name + \",\" + csv_line \n\n if(mode == 2):\n counting_mode = counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\")\n counting_mode = ''.join([i for i in counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\") if not i.isdigit()])\n counting_mode = str(custom_string_util.word_count(counting_mode))\n counting_mode = counting_mode.replace(\"{\", \"\").replace(\"}\", \"\")\n\n return counter, csv_line_util, counting_mode\n\n else:\n return counter, csv_line_util\n\ndef visualize_boxes_and_labels_on_single_image_array(current_frame_number,\n image,\n mode,\n color_recognition_status,\n boxes,\n classes,\n scores,\n category_index,\n\t\t\t\t\t targeted_objects=None,\n y_reference=None,\n deviation=None,\n instance_masks=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.5,\n agnostic_mode=False,\n line_thickness=4):\n \"\"\"Overlay labeled boxes on an image with formatted scores and label names.\n\n This function groups boxes that correspond to the same location\n and creates a display string for each detection and overlays these\n on the image. Note that this function modifies the image in place, and returns\n that same image.\n\n Args:\n image: uint8 numpy array with shape (img_height, img_width, 3)\n boxes: a numpy array of shape [N, 4]\n classes: a numpy array of shape [N]. Note that class indices are 1-based,\n and match the keys in the label map.\n scores: a numpy array of shape [N] or None. If scores=None, then\n this function assumes that the boxes to be plotted are groundtruth\n boxes and plot all boxes as black with no classes or scores.\n category_index: a dict containing category dictionaries (each holding\n category index `id` and category name `name`) keyed by category indices.\n instance_masks: a numpy array of shape [N, image_height, image_width], can\n be None\n keypoints: a numpy array of shape [N, num_keypoints, 2], can\n be None\n use_normalized_coordinates: whether boxes is to be interpreted as\n normalized coordinates or not.\n max_boxes_to_draw: maximum number of boxes to visualize. If None, draw\n all boxes.\n min_score_thresh: minimum score threshold for a box to be visualized\n agnostic_mode: boolean (default: False) controlling whether to evaluate in\n class-agnostic mode or not. This mode will display scores but ignore\n classes.\n line_thickness: integer (default: 4) controlling line width of the boxes.\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.\n \"\"\"\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n csv_line_util = \"not_available\"\n counter = 0\n ROI_POSITION.insert(0,y_reference)\n DEVIATION.insert(0,deviation)\n is_vehicle_detected = []\n mode_number.insert(0,mode)\n is_color_recognition_enable.insert(0,color_recognition_status)\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = 'black'\n else:\n if not agnostic_mode:\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name'] \n else:\n class_name = 'N/A' \n display_str = '{}: {}%'.format(class_name,int(100*scores[i]))\n else:\n display_str = 'score: {}%'.format(int(100 * scores[i])) \n\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n if(mode == 1):\n counting_mode = \"\"\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n '''if instance_masks is not None:\n draw_mask_on_image_array(\n image,\n box_to_instance_masks_map[box],\n color=color\n )'''\n \n display_str_list=box_to_display_str_map[box]\n\n if(mode == 1 and targeted_objects == None):\n counting_mode = counting_mode + str(display_str_list)\n\n elif(mode == 1 and targeted_objects in display_str_list[0]):\n counting_mode = counting_mode + str(display_str_list)\n\n if ((targeted_objects != None) and (targeted_objects in display_str_list[0])):\n if instance_masks is not None:\n draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n \n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates) \n \n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n elif (targeted_objects == None):\n if instance_masks is not None:\n draw_mask_on_image_array(image, box_to_instance_masks_map[box], color=color)\n\n is_vehicle_detected, csv_line, update_csv = draw_bounding_box_on_image_array(current_frame_number,\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color=color,\n thickness=line_thickness,\n display_str_list=box_to_display_str_map[box],\n use_normalized_coordinates=use_normalized_coordinates) \n \n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n if(1 in is_vehicle_detected):\n counter = 1\n del is_vehicle_detected[:]\n is_vehicle_detected = [] \n csv_line_util = class_name + \",\" + csv_line \n\n if(mode == 1):\n counting_mode = counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\")\n counting_mode = ''.join([i for i in counting_mode.replace(\"['\", \" \").replace(\"']\", \" \").replace(\"%\", \"\") if not i.isdigit()])\n counting_mode = str(custom_string_util.word_count(counting_mode))\n counting_mode = counting_mode.replace(\"{\", \"\").replace(\"}\", \"\")\n\n return counter, csv_line_util, counting_mode\n\n else:\n return counter, csv_line_util\n\ndef add_cdf_image_summary(values, name):\n \"\"\"Adds a tf.summary.image for a CDF plot of the values.\n\n Normalizes `values` such that they sum to 1, plots the cumulative distribution\n function and creates a tf image summary.\n\n Args:\n values: a 1-D float32 tensor containing the values.\n name: name for the image summary.\n \"\"\"\n def cdf_plot(values):\n \"\"\"Numpy function to plot CDF.\"\"\"\n normalized_values = values / np.sum(values)\n sorted_values = np.sort(normalized_values)\n cumulative_values = np.cumsum(sorted_values)\n fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)\n / cumulative_values.size)\n fig = plt.figure(frameon=False)\n ax = fig.add_subplot('111')\n ax.plot(fraction_of_examples, cumulative_values)\n ax.set_ylabel('cumulative normalized values')\n ax.set_xlabel('fraction of examples')\n fig.canvas.draw()\n width, height = fig.get_size_inches() * fig.get_dpi()\n image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(\n 1, height, width, 3)\n return image\n cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)\n tf.summary.image(name, cdf_plot)\n\n"
] |
[
[
"numpy.ones_like",
"numpy.logical_and",
"tensorflow.gfile.Open",
"tensorflow.summary.image",
"numpy.uint8",
"numpy.arange",
"numpy.cumsum",
"numpy.sort",
"numpy.ceil",
"tensorflow.map_fn",
"numpy.array",
"numpy.sum",
"tensorflow.py_func",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
granatb/dtu_mlops
|
[
"8e23bb1aac6b5850e9a7b1ddbe43cf64db619950"
] |
[
"s7_scalable_applications/exercise_files/lfw_dataset.py"
] |
[
"\"\"\"\nLFW dataloading\n\"\"\"\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms\nimport glob\nimport os\nimport pandas as pd\nfrom torchvision.utils import make_grid\n\nimport matplotlib.pyplot as plt\nimport torchvision.transforms.functional as F\n\nplt.rcParams[\"savefig.bbox\"] = 'tight'\n\nclass LFWDataset(Dataset):\n def __init__(self, path_to_folder: str, transform) -> None:\n\n self.transform = transform\n self.paths = []\n for path in glob.iglob(os.path.join(path_to_folder, \"**\", \"*.jpg\")):\n self.paths.append(path)\n \n def __len__(self):\n print(f'images_count: {len(self.paths)}')\n return len(self.paths) # TODO: fill out\n \n def __getitem__(self, index: int) -> torch.Tensor:\n # TODO: fill out\n img = Image.open(self.paths[index])\n # print(torch.from_numpy(np.array(self.transform(img))).shape)\n return self.transform(img)\n\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-path_to_folder', default='.\\data\\lfw', type=str)\n parser.add_argument('-batch_size', default=512, type=int)\n parser.add_argument('-num_workers', default=1, type=int)\n parser.add_argument('-visualize_batch', action='store_true')\n parser.add_argument('-get_timing', action='store_true')\n parser.add_argument('-batches_to_check', default=100, type=int)\n \n args = parser.parse_args()\n \n lfw_trans = transforms.Compose([\n transforms.RandomAffine(5, (0.1, 0.1), (0.5, 2.0)),\n transforms.ToTensor()\n ])\n \n # Define dataset\n dataset = LFWDataset(args.path_to_folder, lfw_trans)\n \n # Define dataloader\n dataloader = DataLoader(\n dataset, \n batch_size=args.batch_size, \n shuffle=False,\n num_workers=args.num_workers\n )\n \n def show(imgs):\n if not isinstance(imgs, list):\n imgs = [imgs]\n fig, axs = plt.subplots(ncols=len(imgs), squeeze=False)\n for i, img in enumerate(imgs):\n img = img.detach()\n img = np.asarray(img).transpose(1,2,0)\n axs[0, i].imshow(img)\n axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n fig.savefig('figures\\image_grid.png')\n\n # for batch_idx, batch in enumerate(dataloader):\n # print(batch)\n\n if args.visualize_batch:\n # TODO: visualize a batch of images\n grid = make_grid(next(iter(dataloader)))\n show(grid)\n \n \n if args.get_timing:\n # lets do some repetitions\n res = [ ]\n for _ in range(5):\n start = time.time()\n for batch_idx, batch in enumerate(dataloader):\n if batch_idx > args.batches_to_check:\n break\n end = time.time()\n\n res.append(end - start)\n \n res = np.array(res)\n print('Timing: {np.mean(res)}+-{np.std(res)}')\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"torch.utils.data.DataLoader"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
msilvestro/dupin
|
[
"db06432cab6910c6965b9c35baaef96eb84f0d81"
] |
[
"start_clustering.py"
] |
[
"\"\"\"Start the clustering.\"\"\"\n# pylint: disable=C0103\nfrom time import time\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples\nfrom clustering.kmedoids import pam_npass\nfrom clustering.metrics import (dissimilarity_matrix, _dissimilarity_matrix,\n euclidean_distance, manhattan_distance,\n supremum_distance)\n\n# parameters to be changed\nK_MIN = 2 # minimum number of clusters to test\nK_MAX = 50 # maximum number of clusters to test\nMETHOD = 'kmedoids' # method of clustering\nDISTANCE = 'manhattan' # distance for the clustering\n\n# load the data\ndata = np.loadtxt('data/warped_curves.gz')\n\n# initialize the vectors\nk_range = np.arange(K_MIN, K_MAX)\nsil_scores = np.zeros(K_MAX - K_MIN)\nall_labels = np.empty((K_MAX - K_MIN, data.shape[0]), dtype=np.uint32)\nsil_samples = np.empty((K_MAX - K_MIN, data.shape[0]))\n\n# compute the dissimilarity matrix based on the chosen distance\nif DISTANCE == 'manhattan':\n dm = _dissimilarity_matrix(manhattan_distance)\ndiss = dm(data)\n\n# perform the clustering according to the parameters\nfor k in k_range:\n # keep track of the ongoing process\n print(\"## {:} ##\".format(k))\n\n # start the clustering and time it\n start = time()\n if METHOD == 'kmedoids':\n labels = pam_npass(diss, k, npass=10)[0]\n print(\"Elapsed time: {:.4f}\".format(time() - start))\n\n # compute the silhouettes for the clustering\n sil = silhouette_samples(diss, labels, metric='precomputed')\n sil_score = sil.mean()\n print(\"Silhouette score: {:.6f}\".format(sil_score))\n sil_scores[k - K_MIN] = sil_score\n all_labels[k - K_MIN] = labels\n sil_samples[k - K_MIN] = sil\n\n# save the results\nnp.savetxt(\n 'results/clustering/{:}_{:}_k_range.gz'.format(METHOD, DISTANCE),\n k_range\n)\nnp.savetxt(\n 'results/clustering/{:}_{:}_sil_scores.gz'.format(METHOD, DISTANCE),\n sil_scores\n)\nnp.savetxt(\n 'results/clustering/{:}_{:}_all_labels.gz'.format(METHOD, DISTANCE),\n all_labels\n)\nnp.savetxt(\n 'results/clustering/{:}_{:}_sil_samples.gz'.format(METHOD, DISTANCE),\n sil_samples\n)\n"
] |
[
[
"sklearn.metrics.silhouette_samples",
"numpy.arange",
"numpy.loadtxt",
"numpy.zeros",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leoozy/cite-rewrite
|
[
"98dbc1fe8eb27c83c71e6dd5248c539a046b299f"
] |
[
"torch_data_loader.py"
] |
[
"import numpy as np\r\nimport h5py\r\nimport os\r\nimport torch\r\nimport pdb\r\nfrom torch.utils.data import Dataset\r\nclass TorchDataLoader(Dataset):\r\n \"\"\"Class minibatches from data on disk in HDF5 format\"\"\"\r\n def __init__(self, args, region_dim, phrase_dim, plh, split):\r\n \"\"\"Constructor\r\n\r\n Arguments:\r\n args -- command line arguments passed into the main function\r\n region_dim -- dimensions of the region features\r\n phrase_dim -- dimensions of the phrase features\r\n plh -- placeholder dictory containing the tensor inputs\r\n split -- the data split (i.e. 'train', 'test', 'val')\r\n \"\"\"\r\n\r\n self.dataset = None\r\n self.datafn = os.path.join('../', '%s_imfeats.h5' % split)\r\n with h5py.File(self.datafn, 'r', swmr = True) as dataset:\r\n\t\r\n vecs = np.array(dataset['phrase_features'], np.float32)\r\n phrases = list(dataset['phrases'])\r\n assert(vecs.shape[0] == len(phrases))\r\n\r\n w2v_dict = {}\r\n for index, phrase in enumerate(phrases):\r\n \tw2v_dict[phrase] = vecs[index, :]\r\n\r\n # mapping from uniquePhrase to w2v\r\n self.w2v_dict = w2v_dict\r\n self.pairs = list(dataset['pairs'])\r\n self.n_pairs = len(self.pairs[0])\r\n self.pair_index = range(self.n_pairs)\r\n\r\n self.split = split\r\n self.plh = plh\r\n self.is_train = split == 'train'\r\n self.neg_to_pos_ratio = args.neg_to_pos_ratio\r\n self.batch_size = args.batch_size\r\n self.max_boxes = args.max_boxes\r\n if self.is_train:\r\n self.success_thresh = args.train_success_thresh\r\n else:\r\n self.success_thresh = args.test_success_thresh\r\n\r\n self.region_feature_dim = region_dim\r\n self.phrase_feature_dim = phrase_dim\r\n\r\n def __len__(self):\r\n return self.n_pairs\r\n\r\n def __getitem__(self, index):\r\n \"\"\"Returns a minibatch given a valid id for it\r\n\r\n Arguments:\r\n batch_id -- number between 0 and self.num_batches()\r\n\r\n Returns:\r\n feed_dict -- dictionary containing minibatch data\r\n gt_labels -- indicates positive/negative regions\r\n num_pairs -- number of pairs without padding\r\n \"\"\"\r\n with h5py.File(self.datafn, 'r', swmr = True) as dataset:\r\n\r\n region_features = np.zeros((self.max_boxes,\r\n self.region_feature_dim), dtype=np.float32)\r\n \r\n\r\n gt_labels = np.zeros((self.max_boxes),\r\n dtype=np.float32)\r\n phrase_features = np.zeros((self.phrase_feature_dim),\r\n dtype=np.float32)\r\n\r\n\r\n\r\n\t#print(\"index\", index)\r\n # paired image\r\n im_id = self.pairs[0][index]\r\n \r\n # paired phrase\r\n phrase = self.pairs[1][index]\r\n\r\n # phrase instance identifier\r\n p_id = self.pairs[2][index]\r\n\r\n # gets region features\r\n features = np.array(dataset[im_id], np.float32)\r\n num_boxes = min(len(features), self.max_boxes)\r\n features = features[:num_boxes, :self.region_feature_dim]\r\n overlaps = np.array(dataset['%s_%s_%s' % (im_id, phrase, p_id)])\r\n\r\n # last 4 dimensions of overlaps are ground truth box coordinates\r\n assert(num_boxes <= len(overlaps) - 4)\r\n overlaps = overlaps[:num_boxes]\r\n maxboxes_id = np.argmax(overlaps)\r\n region_features[:num_boxes,:] = features\r\n maxboxes_region_feature = region_features[maxboxes_id]\r\n phrase_features[:] = self.w2v_dict[phrase]\r\n gt_labels[ :num_boxes] = overlaps >= self.success_thresh\r\n if self.is_train:\r\n \tnum_pos = int(np.sum(gt_labels[:]))\r\n \tnum_neg = num_pos * self.neg_to_pos_ratio\r\n \tnegs = np.random.permutation(np.where(overlaps < 0.3)[0])\r\n\r\n \tif len(negs) < num_neg: # if not enough negatives\r\n \tnegs = np.random.permutation(np.where(overlaps < 0.4)[0])\r\n\r\n # logistic loss only counts a region labeled as -1 negative\r\n \tgt_labels[negs[:num_neg]] = -1\r\n\r\n\r\n return phrase_features, region_features, self.is_train, self.max_boxes, gt_labels\r\n\r\n"
] |
[
[
"numpy.argmax",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SiyangJ/COMP562
|
[
"297422599d7de752f16d1ec231d15f866dc4d2ab"
] |
[
"FinalProject/Model/Building/config.py"
] |
[
"import os\nimport sys\nimport tensorflow as tf\nimport configparser\nFLAGS = tf.app.flags.FLAGS\n\n## Configuration File Parse\nCONFIG_DIR = './config.ini'\nif len(sys.argv)>1 and sys.argv[1][-4:]=='.ini':\n CONFIG_DIR = sys.argv[1]\nCFP = configparser.ConfigParser()\nCFP.read(CONFIG_DIR)\n\nARGS = CFP['Default']\n\n'''\n[Default]\nseq_length=388 #seq length\nbatch_size=4 #batch_size\nfeature_num=1940 #dim of a seq\ny_size=50\nlstm_size=64 #hidden layer units\nlstm_layers=6\nkeep_prob=0.5\nlr=0.0001 #initial learn rate\nsep=0.8 #train and test sep\nepoch_size=10000 #train number\nsave_path=/pine/scr/s/i/siyangj/DeepStock/FinalProject/Model/Building/ckpt/\ndata_path=/pine/scr/s/i/siyangj/DeepStock/FinalProject/Data/XY_sequence.h5\nX_ID = X\nY_ID = Y\n'''\n\n################### Train Data################\ntf.app.flags.DEFINE_string('data_path', ARGS['data_path'],\n \"Where to read data.\")\ntf.app.flags.DEFINE_string('X_ID', ARGS['X_ID'],\n \"ID of X in data.\")\ntf.app.flags.DEFINE_string('Y_ID', ARGS['Y_ID'],\n \"ID of Y in data.\")\ntf.app.flags.DEFINE_float('sep', ARGS.getfloat('sep'), \n \"Data split ratio\")\n\n####################################################\n\ntf.app.flags.DEFINE_integer('seq_length', ARGS.getint('seq_length'), \n \"Sequence length of one sample\")\ntf.app.flags.DEFINE_integer('batch_size', ARGS.getint('batch_size'), \n \"Number of samples per batch.\")\ntf.app.flags.DEFINE_integer('feature_num', ARGS.getint('feature_num'), \n \"Number of features in one time step of one sample.\")\ntf.app.flags.DEFINE_integer('y_size', ARGS.getint('y_size'), \n \"Output size in one time step of one sample.\")\n\n####################################################\n\ntf.app.flags.DEFINE_integer('lstm_size', ARGS.getint('lstm_size'), \n \"Hidden layer size.\")\ntf.app.flags.DEFINE_integer('lstm_layers', ARGS.getint('lstm_layers'), \n \"Number of lstm hidden layers.\")\n\n############### Training and Learning rate decay ##################################\ntf.app.flags.DEFINE_float('lr', ARGS.getfloat('lr'), \n \"Start learning rate.\")\ntf.app.flags.DEFINE_float('keep_prob', ARGS.getfloat('keep_prob'), \n \"Keeping probability for dropout.\")\n\n###################################################################################\ntf.app.flags.DEFINE_integer('epoch_size', ARGS.getint('epoch_size'),\n \"Epochs for training.\")\ntf.app.flags.DEFINE_string('save_path', ARGS['save_path'], \n \"Output folder where training logs and models are dumped.\")\ntf.app.flags.DEFINE_integer('random_seed', ARGS.getint('random_seed'), \n \"Seed used to initialize rng.\")\ntf.app.flags.DEFINE_bool('xavier_init', ARGS.getboolean('xavier_init'),\n \"Xavier initialization or truncated normal.\")\n\ntf.app.flags.DEFINE_bool('l2_reg', ARGS.getboolean('l2_reg'),\n \"Whether to do l2 regularization.\")\nif FLAGS.l2_reg:\n tf.app.flags.DEFINE_float('l2_reg_weight', ARGS.getfloat('l2_reg_weight'),\n \"L2 regularization weight.\")\n## TODO\n## Need to implement\n\ntf.app.flags.DEFINE_integer('learning_rate_reduce_life', ARGS.getint('learning_rate_reduce_life'), \n \"Number of batches until learning rate is reduced. lr *= 0.1\")\ntf.app.flags.DEFINE_float('learning_rate_percentage', ARGS.getfloat('learning_rate_percentage'), \n \"Number of batches until learning rate is reduced. lr *= 0.1\")\ntf.app.flags.DEFINE_integer('checkpoint_period', ARGS.getint('checkpoint_period'), \n \"Number of batches in between checkpoints\")\ntf.app.flags.DEFINE_string('last_trained_checkpoint', ARGS['last_trained_checkpoint'], \n \"The model used for testing\")\ntf.app.flags.DEFINE_bool('restore_from_last', ARGS.getboolean('restore_from_last'), \n \"whether start training from last trained checkpoint\")\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"tensorflow.app.flags.DEFINE_string"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
METASPACE2020/METASPACE
|
[
"e1acd9a409f84a78eed7ca9713258c09b0e137ca",
"e1acd9a409f84a78eed7ca9713258c09b0e137ca",
"e1acd9a409f84a78eed7ca9713258c09b0e137ca",
"e1acd9a409f84a78eed7ca9713258c09b0e137ca",
"e1acd9a409f84a78eed7ca9713258c09b0e137ca"
] |
[
"metaspace/python-client/metaspace/tests/test_sm_dataset.py",
"metaspace/engine/sm/engine/annotation/metrics.py",
"metaspace/engine/sm/engine/annotation/fdr.py",
"metaspace/engine/sm/engine/tests/annotation_spark/test_segmenter.py",
"metaspace/python-client/metaspace/image_processing.py"
] |
[
"import json\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\nfrom unittest.mock import patch\n\nimport pytest\nimport numpy as np\n\nfrom metaspace.sm_annotation_utils import (\n IsotopeImages,\n SMDataset,\n GraphQLClient,\n SMInstance,\n MolecularDB,\n)\nfrom metaspace.tests.utils import sm, my_ds_id, advanced_ds_id\n\nEXPECTED_RESULTS_COLS = [\n 'msm',\n 'moc',\n 'rhoSpatial',\n 'rhoSpectral',\n 'fdr',\n 'mz',\n 'moleculeNames',\n 'moleculeIds',\n 'intensity',\n 'colocCoeff',\n]\n\n\[email protected]()\ndef dataset(sm, my_ds_id):\n return sm.dataset(id=my_ds_id)\n\n\[email protected]()\ndef advanced_dataset(sm, advanced_ds_id):\n return sm.dataset(id=advanced_ds_id)\n\n\[email protected]()\ndef downloadable_dataset_id(sm: SMInstance):\n OLD_DATASET_FIELDS = GraphQLClient.DATASET_FIELDS\n GraphQLClient.DATASET_FIELDS += ' canDownload'\n datasets = sm.datasets()\n GraphQLClient.DATASET_FIELDS = OLD_DATASET_FIELDS\n\n for ds in datasets:\n if ds._info['canDownload'] and ds._info['inputPath'].startswith('s3a:'):\n return ds.id\n\n\ndef test_annotations(dataset: SMDataset):\n annotations = dataset.annotations()\n\n assert len(annotations) > 0\n assert len(annotations[0]) == 2 # sf, adduct tuple\n\n\ndef test_results(dataset: SMDataset):\n annotations = dataset.results(database=('HMDB', 'v4'), fdr=0.5)\n\n assert len(annotations) > 0\n assert all(col in annotations.columns for col in EXPECTED_RESULTS_COLS)\n assert list(annotations.index.names) == ['formula', 'adduct']\n\n\ndef test_results_with_coloc(dataset: SMDataset):\n coloc_with = dataset.results(database=('HMDB', 'v4'), fdr=0.5).ion[0]\n coloc_annotations = dataset.results(database=('HMDB', 'v4'), fdr=0.5, coloc_with=coloc_with)\n\n assert len(coloc_annotations) > 0\n assert coloc_annotations.colocCoeff.all()\n\n\ndef test_results_with_int_database_id(dataset: SMDataset):\n annotations = dataset.results(22, fdr=0.5)\n\n assert len(annotations) > 0\n\n\ndef test_results_with_str_database_id(dataset: SMDataset):\n # The type of database IDs was up in the air for a while. Both ints and int-strings are accepted\n # and are converted to the correct form internally\n annotations = dataset.results('22', fdr=0.5)\n\n assert len(annotations) > 0\n\n\n@patch(\n 'metaspace.sm_annotation_utils.GraphQLClient.get_visible_databases',\n return_value=[{'id': '22', 'name': 'HMDB', 'version': 'v4'}],\n)\n@patch('metaspace.sm_annotation_utils.GraphQLClient.getAnnotations', return_value=[])\ndef test_map_database_works_handles_strs_ids_from_api(\n mock_getAnnotations, mock_get_databases, dataset: SMDataset\n):\n # This test is just to ensure that the forward-compatibility with string IDs has the correct behavior\n dataset.results()\n\n print(mock_getAnnotations.call_args)\n annot_filter = mock_getAnnotations.call_args[1]['annotationFilter']\n assert annot_filter['databaseId'] == '22'\n\n\ndef test_results_neutral_loss_chem_mod(advanced_dataset: SMDataset):\n \"\"\"\n Test setup: Create a dataset with a -H2O neutral loss and a -H+C chem mod.\n \"\"\"\n annotations = advanced_dataset.results(database=('HMDB', 'v4'), fdr=0.5)\n annotations_cm = advanced_dataset.results(\n database=('HMDB', 'v4'), fdr=0.5, include_chem_mods=True\n )\n annotations_nl = advanced_dataset.results(\n database=('HMDB', 'v4'), fdr=0.5, include_neutral_losses=True\n )\n annotations_cm_nl = advanced_dataset.results(\n database=('HMDB', 'v4'), fdr=0.5, include_chem_mods=True, include_neutral_losses=True\n )\n\n # Check expected columns\n assert list(annotations_cm.index.names) == ['formula', 'adduct', 'chemMod']\n assert list(annotations_nl.index.names) == ['formula', 'adduct', 'neutralLoss']\n assert list(annotations_cm_nl.index.names) == ['formula', 'adduct', 'chemMod', 'neutralLoss']\n\n # Check CMs / NLs are present when explicitly included\n assert len(annotations_cm[annotations_cm.index.get_level_values('chemMod') != '']) > 0\n assert len(annotations_nl[annotations_nl.index.get_level_values('neutralLoss') != '']) > 0\n assert len(annotations_cm_nl[annotations_cm_nl.index.get_level_values('chemMod') != '']) > 0\n assert len(annotations_cm_nl[annotations_cm_nl.index.get_level_values('neutralLoss') != '']) > 0\n\n # Check CMs / NLs are excluded if they're not explicitly included\n assert annotations.index.is_unique\n assert annotations_cm.index.is_unique\n assert annotations_nl.index.is_unique\n assert annotations_cm_nl.index.is_unique\n assert len(annotations) < len(annotations_cm) < len(annotations_cm_nl)\n assert len(annotations) < len(annotations_nl) < len(annotations_cm_nl)\n plain_annotations = set(\n annotations_cm_nl.reset_index(['chemMod', 'neutralLoss'])[\n lambda df: (df.chemMod == '') & (df.neutralLoss == '')\n ].index\n )\n assert set(annotations.index) == plain_annotations\n\n\ndef test_isotope_images(dataset: SMDataset):\n sf, adduct = dataset.annotations(neutralLoss='', chemMod='')[0]\n\n images = dataset.isotope_images(sf, adduct)\n\n assert len(images) > 1\n assert isinstance(images[0], np.ndarray)\n\n\ndef test_isotope_images_advanced(advanced_dataset: SMDataset):\n sf, cm, nl, adduct = advanced_dataset.annotations(\n return_vals=('sumFormula', 'chemMod', 'neutralLoss', 'adduct'),\n neutralLoss='-H2O',\n chemMod='-H+C',\n )[0]\n\n images = advanced_dataset.isotope_images(sf, adduct, chem_mod=cm, neutral_loss=nl)\n\n assert len(images) > 1\n assert isinstance(images[0], np.ndarray)\n\n\ndef test_isotope_images_scaling(dataset: SMDataset):\n ann = dataset.results(neutralLoss='', chemMod='').iloc[0]\n formula, adduct = ann.name\n\n scaled_img = dataset.isotope_images(formula, adduct)[0]\n unscaled_img = dataset.isotope_images(formula, adduct, scale_intensity=False)[0]\n clipped_img = dataset.isotope_images(formula, adduct, hotspot_clipping=True)[0]\n clipped_unscaled_img = dataset.isotope_images(\n formula, adduct, scale_intensity=False, hotspot_clipping=True\n )[0]\n\n assert np.max(scaled_img) == pytest.approx(ann.intensity)\n assert np.max(unscaled_img) == pytest.approx(1)\n assert np.max(clipped_img) < ann.intensity\n assert np.max(clipped_img) > ann.intensity / 2 # Somewhat arbitrary, but generally holds true\n assert np.max(clipped_unscaled_img) == pytest.approx(1)\n\n\ndef test_all_annotation_images(dataset: SMDataset):\n image_list = dataset.all_annotation_images(only_first_isotope=True)\n\n assert isinstance(image_list[0], IsotopeImages)\n assert len(image_list) > 0\n assert all(len(isotope_images) == 1 for isotope_images in image_list)\n assert isinstance(image_list[0][0], np.ndarray)\n\n\ndef test_all_annotation_images_tic(dataset: SMDataset):\n image_list = dataset.all_annotation_images(\n only_first_isotope=True, scale_intensity='TIC', fdr=0.5\n )\n\n all_images = np.stack(images[0] for images in image_list if images[0] is not None)\n pixel_sums = np.sum(all_images, axis=0)\n pixel_sums = pixel_sums[~np.isnan(all_images[0])]\n # The sum of annotations generally shouldn't substantially exceed the TIC\n assert (pixel_sums < 1.5).all()\n assert (pixel_sums >= 0).all() # There should be no negative values\n assert (pixel_sums > 0).any() # There should be positive values\n\n\ndef test_all_annotation_images_advanced(advanced_dataset: SMDataset):\n image_list = advanced_dataset.all_annotation_images(only_first_isotope=True)\n\n # Assert images were returned for annotations with and without CMs / NLs\n assert any(isotope_images.chem_mod for isotope_images in image_list)\n assert any(not isotope_images.chem_mod for isotope_images in image_list)\n assert any(isotope_images.neutral_loss for isotope_images in image_list)\n assert any(not isotope_images.neutral_loss for isotope_images in image_list)\n\n\ndef test_download(sm: SMInstance, downloadable_dataset_id: str):\n # NOTE: In order to get a downloadable dataset, you will need to set your local installation\n # to upload to S3 and upload a dataset.\n dataset = sm.dataset(id=downloadable_dataset_id)\n\n with TemporaryDirectory() as tmpdir:\n dataset.download_to_dir(tmpdir, 'base_name')\n\n files = [f.name for f in Path(tmpdir).iterdir()]\n assert 'base_name.imzML' in files\n assert 'base_name.ibd' in files\n\n\ndef test_metadata(dataset: SMDataset):\n metadata = dataset.metadata\n\n # Make sure it behaves like a Dict\n assert 'Sample_Information' in metadata\n assert 'Sample_Preparation' in metadata\n assert 'MS_Analysis' in metadata\n\n assert len(metadata) > 0\n assert len(list(metadata.keys())) > 0\n\n # Make sure nested items work\n assert 'Organism' in metadata['Sample_Information']\n assert 'Xaxis' in metadata['MS_Analysis']['Pixel_Size']\n\n # Make sure it re-serializes in a way that matches the original JSON.\n # Use sort_keys to ensure they're both ordered the same way\n serialized = json.dumps(dataset.metadata, sort_keys=True)\n original_json = dataset.metadata.json # type: ignore\n sorted_json = json.dumps(json.loads(original_json), sort_keys=True)\n\n assert serialized == sorted_json\n\n\ndef test_dataset_info_fields(dataset: SMDataset):\n # Ensures that the graphql query includes all fields required for these properties,\n # and that the TypedDicts have the right keys\n\n assert isinstance(dataset.id, str)\n assert isinstance(dataset.name, str)\n assert isinstance(dataset.s3dir, str)\n\n assert isinstance(dataset.config['database_ids'][0], int)\n assert isinstance(dataset.config['analysis_version'], int)\n assert isinstance(dataset.config['isotope_generation']['adducts'][0], str)\n assert isinstance(dataset.config['isotope_generation']['charge'], int)\n assert isinstance(dataset.config['isotope_generation']['isocalc_sigma'], float)\n assert isinstance(dataset.config['isotope_generation']['instrument'], str)\n assert isinstance(dataset.config['isotope_generation']['n_peaks'], int)\n assert isinstance(dataset.config['isotope_generation']['neutral_losses'], list)\n assert isinstance(dataset.config['isotope_generation']['chem_mods'], list)\n assert isinstance(dataset.config['fdr']['decoy_sample_size'], int)\n assert isinstance(dataset.config['image_generation']['ppm'], (int, float))\n assert isinstance(dataset.config['image_generation']['n_levels'], int)\n assert isinstance(dataset.config['image_generation']['min_px'], int)\n\n assert isinstance(dataset.adducts[0], str)\n\n assert dataset.polarity in ('Positive', 'Negative')\n\n assert isinstance(dataset.submitter['id'], str)\n assert isinstance(dataset.submitter['name'], str)\n\n assert isinstance(dataset.database_details[0], MolecularDB)\n assert isinstance(dataset.database_details[0].id, int)\n assert isinstance(dataset.database_details[0].name, str)\n assert isinstance(dataset.database_details[0].version, str)\n assert isinstance(dataset.database_details[0].is_public, bool)\n assert isinstance(dataset.database_details[0].archived, bool)\n\n # Accessing by the dict interface is deprecated, but still probably relied upon\n assert isinstance(dataset.database_details[0]['id'], int)\n assert isinstance(dataset.database_details[0]['name'], str)\n assert isinstance(dataset.database_details[0]['version'], str)\n assert isinstance(dataset.database_details[0]['isPublic'], bool)\n assert isinstance(dataset.database_details[0]['archived'], bool)\n\n assert isinstance(dataset.status, str)\n\n if len(dataset.projects) > 0:\n assert isinstance(dataset.projects[0]['id'], str)\n assert isinstance(dataset.projects[0]['name'], str)\n assert isinstance(dataset.projects[0]['publicationStatus'], str)\n else:\n print('Skipping check for dataset.projects fields as dataset has no projects')\n\n assert isinstance(dataset.group['id'], str)\n assert isinstance(dataset.group['name'], str)\n assert isinstance(dataset.group['shortName'], str)\n\n assert isinstance(dataset.principal_investigator, (str, type(None)))\n\n\ndef test_diagnostics(dataset: SMDataset):\n diagnostics = dataset.diagnostics()\n tic_diag = dataset.diagnostic('TIC')\n imzml_diag = dataset.diagnostic('IMZML_METADATA')\n tic_image = dataset.tic_image()\n\n assert any(diag['type'] == 'TIC' for diag in diagnostics)\n assert isinstance(tic_diag['images'][0]['image'], np.ndarray)\n assert imzml_diag is not None\n assert isinstance(tic_image, np.ndarray)\n",
"import numpy as np\nfrom cpyImagingMSpec import measure_of_chaos\nfrom pyImagingMSpec.image_measures import isotope_pattern_match, isotope_image_correlation\nfrom scipy.ndimage import maximum_filter, minimum_filter, grey_closing\n\n\ndef spectral_metric(iso_imgs_flat, formula_ints):\n # Ignore div-by-zero / NaN errors - they're handled internally\n with np.errstate(divide='ignore', invalid='ignore'):\n return np.nan_to_num(isotope_pattern_match(iso_imgs_flat, formula_ints))\n\n\ndef spatial_metric(iso_imgs_flat, n_spectra, intensities, v1_impl=False):\n \"\"\"Reimplementation of pyImagingMSpec.image_measures.isotope_image_correlation supporting\n a variable denominator when calculating the corrcoef (to compensate for the removed zero-valued\n pixels). This allows it to work on images that have had empty areas removed, without impacting\n the results, which can improve speed significantly.\n\n This returns values that can be very slightly different from the original pyImagingMSpec due to\n floating point imprecision, but the results never seemed to differ by more than 0.0000001.\n Specify v1_impl=True to use the original pyImagingMSpec implementation.\n \"\"\"\n\n if v1_impl:\n # Ignore div-by-zero / NaN errors - they're handled internally\n with np.errstate(divide='ignore', invalid='ignore'):\n if np.sum(intensities[1:]) == 0:\n return 0\n else:\n return isotope_image_correlation(iso_imgs_flat, weights=intensities[1:])\n\n if (\n len(iso_imgs_flat) < 2\n or np.count_nonzero(iso_imgs_flat[0]) < 2\n or np.sum(intensities[1:]) == 0\n ):\n return 0\n\n iso_imgs_flat = iso_imgs_flat[:, iso_imgs_flat.any(axis=0)]\n\n iso_correlation = spatial_corr(iso_imgs_flat, n_spectra, None)\n\n try:\n # coerce between [0 1]\n return np.clip(np.average(iso_correlation, weights=intensities[1:]), 0, 1)\n except TypeError as exc:\n raise ValueError(\"Number of images is not equal to the number of weights + 1\") from exc\n\n\ndef spatial_corr(iso_imgs_flat, n_spectra, weights=None):\n \"\"\"Reimplementation of pyImagingMSpec.image_measures.isotope_image_correlation supporting\n a variable denominator when calculating the corrcoef (to compensate for the removed zero-valued\n pixels).\n \"\"\"\n if weights is not None:\n # Calculate np.cov (with weights)\n weights = weights / np.sum(weights)\n # iso_imgs_weighted = iso_imgs_flat * weights[np.newaxis, :]\n # iso_imgs_flat = iso_imgs_flat / np.sum(iso_imgs_flat, axis=0)[:, np.newaxis]\n avg = np.sum(iso_imgs_flat * weights[np.newaxis, :], axis=1, keepdims=True)\n X = iso_imgs_flat - avg\n # Only the diagonal and left column of the covariance matrix are needed\n covdiag = np.sum(X * X * weights, axis=1)\n covleft = np.sum(X[0:1] * X * weights, axis=1)\n else:\n # Calculate np.cov (with custom denominator)\n avg = np.sum(iso_imgs_flat, axis=1) / n_spectra\n X = iso_imgs_flat - avg[:, np.newaxis]\n padding = n_spectra - iso_imgs_flat.shape[1]\n # Only the diagonal and left column of the covariance matrix are needed\n covdiag = (np.sum(X * X, axis=1) + (avg * avg * padding)) / (n_spectra - 1)\n covleft = (np.sum(X[0:1] * X, axis=1) + (avg[0] * avg * padding)) / (n_spectra - 1)\n # Calculate np.corrcoef from np.cof results\n # iso_correlation = np.corrcoef(flt_images_flat)[1:, 0]\n with np.errstate(divide='ignore', invalid='ignore'):\n iso_correlation = covleft[1:] / np.sqrt(covdiag[0] * covdiag[1:])\n # When all values are the same (e.g. zeros) then the covariance matrix can have zeros or nans.\n # Replace these with 0s to avoid downstream errors\n iso_correlation[np.isinf(iso_correlation) | np.isnan(iso_correlation)] = 0\n return iso_correlation\n\n\ndef _chaos_dilate(arr):\n \"\"\"NOTE: This mutates the input. It's equivalent to\n scipy.ndimage.binary_dilation(arr, iterations=2), but it's about 10x faster.\n It dilates a row/col mask such that the masked image will get the same measure of chaos score,\n as measure-of-chaos does its own dilation on the image which can cause connected regions\n to merge if gaps in the image are made too small.\n \"\"\"\n arr = arr.copy()\n arr[1:] |= arr[:-1]\n arr[1:] |= arr[:-1]\n arr[:-2] |= arr[2:]\n if np.count_nonzero(arr) >= 0.9 * len(arr):\n # Not sparse enough to justify compaction - return a slice so that numpy can skip copying\n return slice(None)\n else:\n return arr\n\n\ndef chaos_metric(iso_img, n_levels):\n # Shrink image if possible, as chaos performance is highly resolution-dependent\n iso_img = iso_img[_chaos_dilate(np.any(iso_img, axis=1)), :]\n iso_img = iso_img[:, _chaos_dilate(np.any(iso_img, axis=0))]\n\n if iso_img.size == 0:\n # measure_of_chaos segfaults if the image has no elements - in Lithops this appears as a\n # MemoryError. Skip empty images.\n return 0\n\n # measure_of_chaos behaves weirdly on Fortran-ordered arrays, which happen sometimes due to the\n # above slicing operations. this makes it a C-ordered, aligned copy if needed.\n iso_img = np.require(iso_img, requirements=['A', 'C'])\n\n moc = measure_of_chaos(iso_img, n_levels)\n return 0 if np.isclose(moc, 1.0) else moc\n\n\ndef v2_chaos(iso_img, n_levels=30, geom_scaling=False, full_dilate=False):\n # GCOVR_EXCL_START # Disable code coverage for this function as it's not prod code\n \"\"\"\n WIP code for experimenting with improvements to the measure of chaos metric.\n\n Arguments:\n iso_img: first isotopic image\n n_levels: number of intensity thresholds to sample at.\n geom_scaling: whether to use geometric scaling instead of linear scaling for selecting\n the intensity thresholds\n full_dilate: whether to do an 8-way dilation instead of a 4-way dilation, which causes\n small islands of 1-2 pixels to be kept\n\n This returns 2 candidate metric values: the ratio-based metric and the count-based metric\n Only one metric value is expected to actually be used in the end. They're just all calculated in\n parallel because it's faster to do them in parallel the optimal method hasn't been decided yet.\n\n The \"count-based\" values match the old algorithm - it's the mean number of connected components\n divided by the total number of pixels with non-zero intensity.\n The \"ratio-based\" values are the potentially improved version - it calculates the ratio of\n connected components to pixels at every intensity threshold. This should work better on\n images that have a several regions of different intensities.\n Linear vs log-scaling are just different ways to choose the intensity thresholds where connected\n components are counted. n_levels directly affects performance, so choosing better thresholds\n would allow the n_levels to be reduced. Log-scaling usually makes more sense with mass spec\n intensities.\n\n Findings:\n * This python reimplementation is faster than the C++ implementation as long as numba is\n used to accelerate count_connected_components\n * full_dilate doesn't seem to help. It's also much slower\n * n_levels=10, geom_scaling=True is much faster than n_levels=30, geom_scaling=False\n and gives very similar results. It's probably a worthwhile change\n\n\n\n Original implementation:\n https://github.com/alexandrovteam/ims-cpp/blob/dcc12b4c50dbfdcde3f765af85fb8b3bb5cd7ec3/ims/image_measures.cpp#L89\n Old way: level-thresholding -> dilation -> erosion -> connected component count\n New way: \"dilation\" via maximum-filter -> \"erosion\" via minimum-filter -> level-thresholding\n -> connected component count\n\n \"\"\"\n\n # Local import of image_manip because numba isn't included in the Lithops Docker image\n # as it adds ~25MB. If this metric is ever used again, numba will need to be added to the image.\n # pylint: disable=import-outside-toplevel # Avoid pulling numba into lithops\n from sm.engine.annotation.image_manip import count_connected_components\n\n if full_dilate:\n iso_img = grey_closing(iso_img, size=(3, 3))\n else:\n dilated = maximum_filter(iso_img, footprint=np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]))\n # Old way: mode='nearest', new way: mode='constant'\n iso_img = minimum_filter(dilated, size=(3, 3), mode='constant')\n\n if not iso_img.any():\n # Old way - detect this case when the return value is exactly 1.0\n return [0.0, 0.0]\n\n # Old way: np.linspace(0, max_ints, n_levels)\n mask = np.empty(iso_img.shape, dtype='bool')\n\n def calc_chaos_metrics(thresholds):\n if len(thresholds):\n pixel_counts = np.ones(len(thresholds), 'i')\n component_counts = np.zeros(len(thresholds), 'i')\n for i, threshold in enumerate(thresholds):\n np.greater(iso_img, threshold, out=mask)\n if mask.any():\n pixel_counts[i] = np.count_nonzero(mask)\n component_counts[i] = count_connected_components(mask)\n\n mean_ratio = 1 - np.mean((component_counts / pixel_counts)[component_counts > 0])\n mean_count = 1 - np.mean(component_counts) / np.count_nonzero(iso_img)\n\n return [mean_ratio, mean_count]\n else:\n return [-1.0, -1.0]\n\n max_ints = np.max(iso_img)\n if geom_scaling:\n levels = np.geomspace(max_ints / 1000, max_ints, n_levels, endpoint=False)\n else:\n levels = np.linspace(0, max_ints, n_levels, endpoint=False)\n return calc_chaos_metrics(levels)\n # GCOVR_EXCL_STOP\n\n\ndef v2_chaos_orig(iso_img, n_levels=30):\n # GCOVR_EXCL_START # Disable code coverage for this function as it's not prod code\n \"\"\"Reimplementation of the chaos metric. I didn't manage to get it to exactly match the original\n implementation. This one seems to have significantly better dynamic range - it often produces\n values like 0.6 when the original implementation rarely produces values below 0.95\n\n Original implementation:\n https://github.com/alexandrovteam/ims-cpp/blob/dcc12b4c50dbfdcde3f765af85fb8b3bb5cd7ec3/ims/image_measures.cpp#L89\n Old way: level-thresholding -> dilation -> erosion -> connected component count\n New way: \"dilation\" via maximum-filter -> \"erosion\" via minimum-filter -> level-thresholding\n -> connected component count\n \"\"\"\n # Local import of image_manip because numba isn't included in the Lithops Docker image\n # as it adds ~25MB. If this metric is ever used again, numba will need to be added to the image.\n # pylint: disable=import-outside-toplevel # Avoid pulling numba into lithops\n from sm.engine.annotation.image_manip import count_connected_components\n\n dilated = maximum_filter(iso_img, footprint=np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]))\n # Old way: mode='nearest', new way: mode='constant'\n iso_img = minimum_filter(dilated, size=(3, 3), mode='nearest')\n\n if not iso_img.any():\n # Old way - detect this case when the return value is exactly 1.0\n return 0.0\n\n # Old way: np.linspace(0, max_ints, n_levels)\n mask = np.empty(iso_img.shape, dtype='bool')\n\n thresholds = np.linspace(0, np.max(iso_img), n_levels)\n\n pixel_counts = np.ones(len(thresholds), 'i')\n component_counts = np.zeros(len(thresholds), 'i')\n for i, threshold in enumerate(thresholds):\n np.greater(iso_img, threshold, out=mask)\n if mask.any():\n pixel_counts[i] = np.count_nonzero(mask)\n component_counts[i] = count_connected_components(mask)\n\n mean_count = 1 - np.mean(component_counts) / np.count_nonzero(iso_img)\n\n return mean_count\n # GCOVR_EXCL_STOP\n\n\ndef weighted_stddev(values, weights):\n # Numpy's weighted average is extremely dependent on the order of items, and gives inconsistent\n # results even with 64-bit precision. np.longdouble (80-bit precision on x86 platforms)\n # significantly reduces the order-dependent error, but beware that results may still differ\n # depending on how the input values are ordered.\n # The Spark and Lithops pipelines often collect pixels into a coo_matrix in a different order.\n values = values.astype(np.longdouble)\n weights = weights.astype(np.longdouble)\n\n average = np.average(values, weights=weights)\n stddev = np.sqrt(np.average((values - average) ** 2, weights=weights))\n return average.astype(np.float64), stddev.astype(np.float64)\n\n\ndef calc_mz_stddev(iso_images_sparse, iso_mzs_sparse, formula_mzs):\n mz_mean = []\n mz_stddev = []\n for ints_img, mzs_img, theo_mz in zip(iso_images_sparse, iso_mzs_sparse, formula_mzs):\n if mzs_img is not None and mzs_img.nnz > 0:\n mz, stddev = weighted_stddev(mzs_img.data, ints_img.data)\n mz_mean.append(mz)\n mz_stddev.append(stddev)\n else:\n mz_mean.append(theo_mz)\n mz_stddev.append(0.0)\n return mz_mean, mz_stddev\n\n\ndef calc_mass_errs(mz_mean, formula_mzs, formula_ints):\n mz_err_abs = mz_mean[0] - formula_mzs[0]\n if formula_ints[1:].sum() > 0:\n mz_err_rel = np.average(\n (mz_mean[1:] - formula_mzs[1:] - mz_err_abs), weights=formula_ints[1:]\n )\n else:\n mz_err_rel = 0\n\n return mz_err_abs, mz_err_rel\n\n\ndef mass_metrics(iso_images_sparse, iso_mzs_sparse, formula_mzs, formula_ints):\n formula_mzs = np.asarray(formula_mzs)\n formula_ints = np.asarray(formula_ints)\n mz_mean, mz_stddev = calc_mz_stddev(iso_images_sparse, iso_mzs_sparse, formula_mzs)\n mz_err_abs, mz_err_rel = calc_mass_errs(mz_mean, formula_mzs, formula_ints)\n\n return mz_mean, mz_stddev, mz_err_abs, mz_err_rel\n",
"import logging\nfrom itertools import product\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\n\nfrom sm.engine.annotation.scoring_model import ScoringModel, MsmScoringModel\nfrom sm.engine.formula_parser import format_modifiers\n\nlogger = logging.getLogger('engine')\n\nDECOY_ADDUCTS = [\n # fmt: off\n '+He', '+Li', '+Be', '+B', '+C', '+N', '+O', '+F', '+Ne', '+Mg',\n '+Al', '+Si', '+P', '+S', '+Cl', '+Ar', '+Ca', '+Sc', '+Ti', '+V',\n '+Cr', '+Mn', '+Fe', '+Co', '+Ni', '+Cu', '+Zn', '+Ga', '+Ge', '+As',\n '+Se', '+Br', '+Kr', '+Rb', '+Sr', '+Y', '+Zr', '+Nb', '+Mo', '+Ru',\n '+Rh', '+Pd', '+Ag', '+Cd', '+In', '+Sn', '+Sb', '+Te', '+I', '+Xe',\n '+Cs', '+Ba', '+La', '+Ce', '+Pr', '+Nd', '+Sm', '+Eu', '+Gd', '+Tb',\n '+Dy', '+Ho', '+Ir', '+Th', '+Pt', '+Os', '+Yb', '+Lu', '+Bi', '+Pb',\n '+Re', '+Tl', '+Tm', '+U', '+W', '+Au', '+Er', '+Hf', '+Hg', '+Ta',\n # fmt: on\n]\n\n\ndef _make_target_modifiers_df(chem_mods, neutral_losses, target_adducts):\n \"\"\"\n All combinations of chemical modification, neutral loss or target adduct.\n Note that the combination order matters as these target modifiers are used later\n to map back to separated chemical modification, neutral loss and target adduct fields.\n \"\"\"\n rows = [\n (cm, nl, ta, format_modifiers(cm, nl, ta), format_modifiers(cm, nl))\n for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)\n ]\n df = pd.DataFrame(\n rows,\n columns=['chem_mod', 'neutral_loss', 'adduct', 'tm', 'decoy_modifier_prefix'],\n dtype='O',\n )\n df = df.set_index('tm')\n return df\n\n\ndef score_to_fdr_map(\n target_scores: np.ndarray,\n decoy_scores: np.ndarray,\n decoy_ratio: float,\n rule_of_succession: bool,\n monotonic: bool,\n) -> pd.Series:\n \"\"\"Returns a Series where the index is the target/decoy scores and the value is the FDR.\n Scores can have any magnitude, but must be floating point numbers where higher values indicate\n higher confidence.\n\n target/decoy scores can have any finite values, but it's assumed that higher values indicate\n higher confidence.\n\n Args:\n target_scores: scores for all targets\n decoy_scores: scores for all decoys\n decoy_ratio: ratio of decoys to targets for the given ranking. This has to be provided\n because `target_scores` and `decoy_scores` usually exclude zero-scored annotations,\n but those excluded values need to be taken into account for the FDR calculation.\n In analysis_version=1, many rankings with matched target/decoy sizes are used,\n so this should be 1\n In analysis_version=3, a single ranking with many decoys is done per target,\n so this should be the decoy_sample_size\n rule_of_succession: When true, starts the sequence with 1 target and 1 decoy,\n which improves stability and solves the overoptimistic \"0% FDR\" problem.\n monotonic: When true, ensures that there are no cases where having a lower score would\n have a lower FDR. This is generally preferred - false only makes sense if the FDRs\n are going to be somehow manipulated (e.g. averaged over several rankings) before being\n made monotonic.\n \"\"\"\n target_hits = pd.Series(target_scores, name='target').value_counts()\n decoy_hits = pd.Series(decoy_scores, name='decoy').value_counts()\n counts_df = pd.concat([target_hits, decoy_hits], axis=1).fillna(0).sort_index(ascending=False)\n cumulative_targets = counts_df.target.cumsum()\n cumulative_decoys = counts_df.decoy.cumsum()\n if rule_of_succession:\n # Per the Rule of Succession, to find the best estimate of a\n # Bernoulli distribution's mean, add one to the number of observations of each class.\n # Other FDR algorithms don't seem to do this, and technically this isn't actually a\n # Bernoulli distribution, but it's the best approach I could find to integrate\n # uncertainty into the FDR values to avoid producing misleading 0% FDR values\n # (which likely have a large-but-unreported margin of error).\n cumulative_targets = cumulative_targets + 1\n cumulative_decoys = cumulative_decoys + 1\n\n fdrs = cumulative_decoys / decoy_ratio / cumulative_targets\n fdrs[cumulative_targets == 0] = 1 # Fix NaNs when decoys come before targets\n\n if monotonic:\n # FDRs is already sorted by score descending, so reverse it, take the running-minimum,\n # then reverse it again to get the original order.\n fdrs = pd.Series(np.minimum.accumulate(fdrs.values[::-1])[::-1], index=fdrs.index)\n\n return fdrs\n\n\ndef run_fdr_ranking(\n target_scores: pd.Series,\n decoy_scores: pd.Series,\n decoy_ratio: float,\n rule_of_succession: bool,\n monotonic: bool,\n):\n fdr_map = score_to_fdr_map(\n target_scores.values, decoy_scores.values, decoy_ratio, rule_of_succession, monotonic\n )\n\n fdrs = fdr_map.loc[target_scores.values].values\n\n return pd.Series(fdrs, index=target_scores.index)\n\n\ndef run_fdr_ranking_labeled(\n scores: pd.Series,\n target: pd.Series,\n decoy_ratio: float,\n rule_of_succession: bool,\n monotonic: bool,\n):\n \"\"\"Runs an FDR ranking for a list of scores with a separate target/decoy flag.\n Returns calculated FDRs for both targets and decoys.\"\"\"\n fdr_map = score_to_fdr_map(\n scores[target].values, scores[~target].values, decoy_ratio, rule_of_succession, monotonic\n )\n\n return pd.Series(fdr_map.loc[scores.values].values, index=scores.index)\n\n\nclass FDR:\n fdr_levels = [0.05, 0.1, 0.2, 0.5]\n\n def __init__(self, fdr_config, chem_mods, neutral_losses, target_adducts, analysis_version):\n self.decoy_adduct_cand = [ad for ad in DECOY_ADDUCTS if ad not in target_adducts]\n self.decoy_sample_size = min(fdr_config['decoy_sample_size'], len(self.decoy_adduct_cand))\n\n self.chem_mods = chem_mods\n self.neutral_losses = neutral_losses\n self.target_adducts = target_adducts\n self.analysis_version = analysis_version\n self.td_df = None\n self.random_seed = 42\n self.target_modifiers_df = _make_target_modifiers_df(\n chem_mods, neutral_losses, target_adducts\n )\n\n def _choose_decoys(self):\n copy = self.decoy_adduct_cand.copy()\n np.random.shuffle(copy)\n return copy[: self.decoy_sample_size]\n\n def _decoy_adduct_gen(self, target_formulas):\n np.random.seed(self.random_seed)\n target_modifiers = list(self.target_modifiers_df.decoy_modifier_prefix.items())\n if self.analysis_version < 3:\n # NOTE: These are later selected by index % decoy_sample_size. Generation order matters.\n # pylint: disable=invalid-name\n for formula, (target_modifier, decoy_prefix) in product(\n target_formulas, target_modifiers\n ):\n for decoy_adduct in self._choose_decoys():\n yield formula, target_modifier, decoy_prefix + decoy_adduct\n else:\n # In v3, share the decoy adducts, as there's no benefit to re-sampling decoys\n # for each target modifier, but it's significantly expensive to do so.\n for formula in target_formulas:\n decoys = self._choose_decoys()\n for target_modifier, decoy_prefix in target_modifiers:\n for decoy_adduct in decoys:\n yield formula, target_modifier, decoy_prefix + decoy_adduct\n\n def decoy_adducts_selection(self, target_formulas):\n self.td_df = pd.DataFrame(\n self._decoy_adduct_gen(target_formulas),\n columns=['formula', 'tm', 'dm'],\n )\n\n def ion_tuples(self):\n \"\"\"Returns list of tuples in List[(formula, modifier)] form.\n\n All ions needed for FDR calculation as a list of (formula, modifier),\n where modifier is a combination of chemical modification, neutral loss and adduct\n \"\"\"\n d_ions = self.td_df[['formula', 'dm']].drop_duplicates().values.tolist()\n t_ions = self.td_df[['formula', 'tm']].drop_duplicates().values.tolist()\n return list(map(tuple, t_ions + d_ions))\n\n def target_modifiers(self):\n \"\"\" List of possible modifier values for target ions \"\"\"\n return self.target_modifiers_df.index.tolist()\n\n @classmethod\n def nearest_fdr_level(cls, fdr):\n for level in cls.fdr_levels:\n if round(fdr, 2) <= level:\n return level\n return 1.0\n\n def _digitize_fdr(self, fdr_df):\n # Bin annotations by predefined FDR thresholds, also making them monotonic\n # This is only used in analysis_version==1\n df = fdr_df.sort_values(by='msm', ascending=False)\n msm_levels = [df[df.fdr < fdr_thr].msm.min() for fdr_thr in self.fdr_levels]\n df['fdr_d'] = 1.0\n for msm_thr, fdr_thr in zip(msm_levels, self.fdr_levels):\n row_mask = np.isclose(df.fdr_d, 1.0) & np.greater_equal(df.msm, msm_thr)\n df.loc[row_mask, 'fdr_d'] = fdr_thr\n df['fdr'] = df.fdr_d\n return df.drop('fdr_d', axis=1)\n\n def estimate_fdr(\n self, formula_msm: pd.DataFrame, scoring_model: Optional[ScoringModel]\n ) -> pd.DataFrame:\n logger.info('Estimating FDR')\n\n if scoring_model is None:\n scoring_model = MsmScoringModel()\n\n td_df = self.td_df.set_index('tm')\n\n target_fdr_df_list = []\n for tm in self.target_modifiers_df.index.drop_duplicates(): # pylint: disable=invalid-name\n target_msm = formula_msm[formula_msm.modifier == tm]\n full_decoy_df = td_df.loc[tm, ['formula', 'dm']].rename(columns={'dm': 'modifier'})\n\n if self.analysis_version >= 3:\n # Do a single big ranking with all the decoys, numerically compensating for the\n # imbalanced sets sizes. This is equivalent to averaging across the different random\n # sets of decoys.\n decoy_msm = pd.merge(formula_msm, full_decoy_df, on=['formula', 'modifier'])\n target_df, decoy_df = scoring_model.score(\n target_msm, decoy_msm, self.decoy_sample_size\n )\n\n fdr_vals = run_fdr_ranking(\n target_df.msm, decoy_df.msm, self.decoy_sample_size, True, True\n )\n target_fdr = target_df.assign(fdr=fdr_vals)\n else:\n # Do a separate ranking for each of the 20 target:decoy pairings, then take the\n # median FDR for each target\n fdr_vals_list = []\n for i in range(self.decoy_sample_size):\n decoy_subset_df = full_decoy_df[i :: self.decoy_sample_size]\n decoy_msm = pd.merge(formula_msm, decoy_subset_df, on=['formula', 'modifier'])\n\n # Extra columns added by scoring_model are discarded for simplicity,\n # as it's unlikely anyone will use this codepath with a CatBoost model\n target_df, decoy_df = scoring_model.score(target_msm, decoy_msm, 1)\n\n fdr_vals = run_fdr_ranking(target_df.msm, decoy_df.msm, 1, False, False)\n fdr_vals_list.append(fdr_vals)\n\n msm_to_fdr = pd.Series(pd.concat(fdr_vals_list, axis=1).median(axis=1), name='fdr')\n target_fdr = self._digitize_fdr(target_msm.join(msm_to_fdr))\n target_fdr_df_list.append(target_fdr)\n\n return pd.concat(target_fdr_df_list)\n",
"from pathlib import Path\nfrom unittest.mock import Mock, patch\nimport numpy as np\nimport pandas as pd\nfrom itertools import product\nfrom numpy.testing import assert_array_almost_equal\n\nfrom sm.engine.annotation.imzml_reader import FSImzMLReader\nfrom sm.engine.annotation_spark.segmenter import (\n segment_centroids,\n define_ds_segments,\n segment_ds,\n calculate_chunk_sp_n,\n fetch_chunk_spectra_data,\n)\nfrom tests.conftest import make_imzml_reader_mock\n\n\ndef test_calculate_chunk_sp_n():\n sample_mzs_bytes = 25 * 2 ** 20\n sample_sp_n = 10\n max_chunk_size_mb = 500\n\n chunk_sp_n = calculate_chunk_sp_n(sample_mzs_bytes, sample_sp_n, max_chunk_size_mb)\n\n assert chunk_sp_n == 50\n\n\ndef test_fetch_chunk_spectra_data():\n mz_n = 10\n imzml_reader = make_imzml_reader_mock(\n [(1, 1, 1), (2, 1, 1)], (np.linspace(0, 90, num=mz_n), np.ones(mz_n))\n )\n\n sp_chunk_df = fetch_chunk_spectra_data(sp_ids=[0, 1], imzml_reader=imzml_reader)\n\n exp_mzs, exp_ints = [\n np.sort([mz for mz in np.linspace(0, 90, num=mz_n) for _ in range(2)]),\n np.ones(2 * mz_n),\n ]\n\n assert sp_chunk_df.mz.dtype == 'f'\n assert_array_almost_equal(sp_chunk_df.mz, exp_mzs)\n assert_array_almost_equal(sp_chunk_df.int, exp_ints)\n\n\ndef test_define_ds_segments():\n imzml_reader = make_imzml_reader_mock(mz_precision='d')\n\n mz_max = 100\n sample_mzs = np.linspace(0, mz_max, 100)\n ds_segm_size_mb = 800 / (2 ** 20) # 1600 b total data size / 2 segments, converted to MB\n ds_segments = define_ds_segments(\n sample_mzs, sample_ratio=1, imzml_reader=imzml_reader, ds_segm_size_mb=ds_segm_size_mb\n )\n\n exp_ds_segm_n = 8\n exp_bounds = [i * mz_max / exp_ds_segm_n for i in range(exp_ds_segm_n + 1)]\n exp_ds_segments = np.array(list(zip(exp_bounds[:-1], exp_bounds[1:])))\n assert ds_segments.shape == exp_ds_segments.shape\n assert np.allclose(ds_segments, exp_ds_segments)\n\n\n@patch('sm.engine.annotation_spark.segmenter.pickle.dump')\ndef test_segment_ds(dump_mock):\n imzml_reader = make_imzml_reader_mock(\n list(product([0], range(10))), (np.linspace(0, 90, num=10), np.ones(10))\n )\n ds_segments = np.array([[0, 50], [50, 90.0]])\n\n chunk_sp_n = 1000\n segment_ds(imzml_reader, chunk_sp_n, ds_segments, Path('/tmp/abc'))\n\n for segm_i, ((sp_chunk_df, f), _) in enumerate(dump_mock.call_args_list):\n min_mz, max_mz = ds_segments[segm_i]\n\n assert sp_chunk_df.shape == (50, 3)\n assert np.all(min_mz <= sp_chunk_df.mz)\n assert np.all(sp_chunk_df.mz <= max_mz)\n\n\n@patch('sm.engine.annotation_spark.segmenter.pickle.dump')\ndef test_segment_centroids(dump_mock):\n centr_df = pd.DataFrame(\n [\n (0, 0, 90),\n (0, 1, 100),\n (0, 2, 110),\n (1, 0, 100),\n (1, 1, 110),\n (1, 2, 120),\n (2, 0, 110),\n (2, 1, 120),\n (2, 2, 130),\n ],\n columns=['formula_i', 'peak_i', 'mz'],\n )\n segm_n = 3\n segment_centroids(centr_df, segm_n, Path('/tmp/abc'))\n\n for segm_i in range(segm_n):\n args, _ = dump_mock.call_args_list[segm_i]\n df, _ = args\n\n assert df.shape == (3, 4)\n assert set(df.formula_i) == {segm_i}\n",
"from typing import List, overload\n\nimport numpy as np\nimport pandas as pd\n\n\ndef clip_hotspots(img: np.ndarray):\n \"\"\"\n Performs hotspot removal on an ion image to match the METASPACE website's ion image rendering\n \"\"\"\n min_visible = np.max(img) / 256\n if min_visible > 0:\n hotspot_threshold = np.quantile(img[img > min_visible], 0.99)\n return np.clip(img, None, hotspot_threshold)\n else:\n return img\n\n\ndef colocalization(img_a: np.ndarray, img_b: np.ndarray):\n \"\"\"\n Calculates degree of colocalization between two ion images, using the same algorithm METASPACE uses.\n Returns a float between 0 (no colocalization) and 1 (full colocalization).\n\n Citation: Ovchinnikova et al. (2020) ColocML. https://doi.org/10.1093/bioinformatics/btaa085\n\n Requires additional packages to be installed: scipy, scikit-learn\n \"\"\"\n from scipy.ndimage import median_filter\n from sklearn.metrics.pairwise import cosine_similarity\n\n h, w = img_a.shape\n\n def preprocess(img):\n img = img.copy().reshape((h, w))\n img[img < np.quantile(img, 0.5)] = 0\n return median_filter(img, (3, 3)).reshape([1, h * w])\n\n return cosine_similarity(preprocess(img_a), preprocess(img_b))[0, 0]\n\n\n@overload\ndef colocalization_matrix(images: List[np.ndarray], labels: None = None) -> np.ndarray:\n ...\n\n\n@overload\ndef colocalization_matrix(images: List[np.ndarray], labels: List[str]) -> pd.DataFrame:\n ...\n\n\ndef colocalization_matrix(images: List[np.ndarray], labels=None):\n \"\"\"\n Calculates level of colocalization between all pairs of images in a list of ion images.\n If many checks are needed, it is usually faster to generate the entire matrix than to do\n many separate calls to \"colocalization\".\n\n Citation: Ovchinnikova et al. (2020) ColocML. https://doi.org/10.1093/bioinformatics/btaa085\n\n Requires additional packages to be installed: scipy, scikit-learn\n\n :param images: A list of ion images\n :param labels: If supplied, output will be a pandas DataFrame where the labels are used to define\n the index and columns. It can be useful to pass ion formulas\n or (formula, adduct) pairs here, to facilitate easy lookup of colocalization values\n If not supplied, the output will be a numpy ndarray\n :return:\n \"\"\"\n from scipy.ndimage import median_filter\n from sklearn.metrics.pairwise import pairwise_kernels\n\n count = len(images)\n if count == 0:\n similarity_matrix = np.ones((0, 0))\n elif count == 1:\n similarity_matrix = np.ones((1, 1))\n else:\n h, w = images[0].shape\n flat_images = np.vstack([i.flatten() for i in images])\n flat_images[flat_images < np.quantile(flat_images, 0.5, axis=1, keepdims=True)] = 0\n filtered_images = median_filter(flat_images.reshape((count, h, w)), (1, 3, 3)).reshape(\n (count, h * w)\n )\n similarity_matrix = pairwise_kernels(filtered_images, metric='cosine')\n\n if labels is None:\n return similarity_matrix\n else:\n return pd.DataFrame(similarity_matrix, index=labels, columns=labels)\n"
] |
[
[
"numpy.isnan",
"numpy.max",
"numpy.sum",
"numpy.stack"
],
[
"numpy.sqrt",
"numpy.linspace",
"numpy.asarray",
"scipy.ndimage.grey_closing",
"numpy.max",
"numpy.mean",
"numpy.any",
"numpy.greater",
"numpy.count_nonzero",
"numpy.isclose",
"numpy.isnan",
"numpy.require",
"numpy.errstate",
"numpy.array",
"numpy.sum",
"scipy.ndimage.minimum_filter",
"numpy.geomspace",
"numpy.average",
"numpy.isinf",
"numpy.empty"
],
[
"pandas.concat",
"pandas.merge",
"pandas.Series",
"numpy.random.seed",
"numpy.minimum.accumulate",
"numpy.random.shuffle",
"pandas.DataFrame",
"numpy.greater_equal",
"numpy.isclose"
],
[
"numpy.allclose",
"numpy.linspace",
"pandas.DataFrame",
"numpy.ones",
"numpy.all",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.clip",
"numpy.quantile",
"scipy.ndimage.median_filter",
"pandas.DataFrame",
"numpy.ones",
"numpy.max",
"sklearn.metrics.pairwise.pairwise_kernels"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
tedunderwood/measureperspective
|
[
"46ba16e6ef3d4e17626a6366bcb98ec554f7bc83",
"46ba16e6ef3d4e17626a6366bcb98ec554f7bc83"
] |
[
"mungedata/simple_author_dedup.py",
"mungedata/dedup2earliest.py"
] |
[
"# simple_author_dedup.py\n\n# the goal here is simply to go\n# through a list of author names\n# and find close matches\n# that ought to be treated\n# as the same person\n\nimport sys, csv\nimport pandas as pd\nfrom difflib import SequenceMatcher\n\nargs = sys.argv[1:]\n\nauthorset = set()\n\nfor path in args:\n df = pd.read_csv('../metadata/' + path)\n for auth in df.author:\n if type(auth) == str and len(auth) > 1:\n authorset.add(auth)\n\ndelim = \"), \"\n\nauthorgroups = dict()\n\nfor auth in authorset:\n if len(auth) < 2:\n continue\n if auth.startswith('('):\n # a nasty thing sometimes happens\n # like (Herbert George), Wells, H. G.\n if delim in auth:\n parts = auth.split(delim)\n alternate = parts[1] + ' ' + parts[0] + ')'\n print(auth, alternate)\n lastname = alternate.split(',')[0].lower()\n else:\n print(\"ERROR\", auth)\n lastname = auth.split(',')[0].lower()\n else:\n lastname = auth.split(',')[0].lower()\n\n if lastname in authorgroups:\n authorgroups[lastname].append(auth)\n else:\n authorgroups[lastname] = [auth]\n\ntranslations = dict()\n\nwith open('translationtable.tsv', encoding = 'utf-8') as f:\n reader = csv.DictReader(f, delimiter = '\\t')\n for row in reader:\n translations[row['name']] = row['bettername']\n\nnotamatch = dict()\n\n# with open('notmatches.tsv', encoding = 'utf-8') as f:\n# for line in f:\n# parts = line.strip().split('\\t')\n# notamatch[parts[0]] = parts[1]\n\nfor lastname, group in authorgroups.items():\n for authA in group:\n if authA in translations:\n continue\n for authB in group:\n if authA == authB:\n continue\n # literal equality\n if authB in notamatch and authA in notamatch[authB]:\n continue\n\n a = authA.strip(\",. 0123456789\")\n b = authB.strip(\",. 0123456789\")\n\n if a == b:\n translations[authA] = a\n translations[authB] = b\n matched = True\n continue\n\n setA = set(a.replace(',', ' ').replace('(', ' ').lower().split())\n setB = set(b.replace(',', ' ').replace('(', ' ').lower().split())\n unionlen = len(setA.union(setB))\n\n intersectlen = 0\n for aword in setA:\n if aword in setB:\n intersectlen += 1\n elif aword[0] in setB:\n intersectlen += 1\n # because we count initials as matches\n\n if (intersectlen / unionlen) > 0.7:\n print(authA + \" || \" + authB)\n user = input('match? ')\n if user == 'y':\n translateto = input('best name: ')\n translations[authA] = translateto\n translations[authB] = translateto\n matched = True\n else:\n if authA not in notamatch:\n notamatch[authA] = set()\n if authB not in notamatch:\n notamatch[authB] = set()\n notamatch[authA].add(authB)\n notamatch[authB].add(authA)\n\nwith open('translationtable.tsv', mode = 'w', encoding = 'utf-8') as f:\n f.write('name\\tbettername\\n')\n for k, v in translations.items():\n if k != v:\n f.write(k + '\\t' + v + '\\n')\n\nwith open('notmatches.tsv', mode = 'w', encoding = 'utf-8') as f:\n for k, v in notamatch.items():\n for v1 in v:\n f.write(k + '\\t' + v1 + '\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"#!/usr/bin/env python3\n\n# dedup2earliest.py\n\nimport csv, sys\nfrom difflib import SequenceMatcher\nimport pandas as pd\n\ndef titleregularize(title):\n title = str(title)\n if title == 'nan':\n title = ''\n\n if '|' in title:\n title = title.split('|')[0]\n\n title = title.lower().strip('/ .,:-')\n title = title.replace('the', 'x')\n if len(title) < 4:\n title = title + \" \"\n if len(title) > 18:\n title = title[0 : 18]\n\n firstchars = title[0:3]\n\n return title, firstchars\n\ndef authorregularize(author):\n author = str(author)\n if author == 'nan':\n author = ''\n\n if '|' in author:\n author = author.split('|')[0]\n\n author = author.strip('/ ,.:123456789-').lower()\n return author\n\ndef isitamatch(t, a, title, author):\n m = SequenceMatcher(None, title, t)\n ratio = m.real_quick_ratio()\n if ratio > 0.8:\n betterratio = m.ratio()\n n = SequenceMatcher(None, author, a)\n authorratio = n.ratio()\n if betterratio > 0.9 and authorratio > 0.7:\n return True\n else:\n return False\n else:\n return False\n\n## MAIN\n\nargs = sys.argv\n\nif len(args) < 3:\n print('This script requires an infile and outfile as arguments.')\n print('It assumes they both are located in ../rawdata/')\n sys.exit(0)\n\ninfile = '../rawdata/' + args[1]\noutfile = '../rawdata/' + args[2]\n\ndata = pd.read_csv(infile,\n dtype = {'docid': object, 'title': object, 'author': object, 'enumcron': object})\ndata.set_index('docid', inplace = True)\n\nblocked_titles = dict()\n\nfor i in data.index:\n docid = i\n author = data.loc[i, 'author']\n title = data.loc[i, 'title']\n enumcron = str(data.loc[i, 'enumcron'])\n if len(enumcron) > 0 and enumcron != 'nan':\n title = title + ' ' + enumcron\n\n if len(title) < 2:\n continue\n\n title, firstchars = titleregularize(title)\n author = authorregularize(author)\n date = int(data.loc[i, 'inferreddate'])\n if date < 100:\n date = 3000\n\n # The reason being, that we want to accept the\n # earliest date, without treating the default\n # date of zero as an \"earliest.\" We'd like to\n # take a specific date if one is available.\n\n if firstchars not in blocked_titles:\n blocked_titles[firstchars] = set()\n\n blocked_titles[firstchars].add((title, author, date, docid))\n\nprint('Titles divided into blocks.')\n\nretained = set()\nretained_exact_titles = dict()\n\nfor i in data.index:\n docid = i\n author = data.loc[i, 'author']\n title = data.loc[i, 'title']\n enumcron = str(data.loc[i, 'enumcron'])\n if len(enumcron) > 0 and enumcron != 'nan':\n title = title + ' ' + enumcron\n\n includedbc = str(data.loc[i, 'includedbc'])\n\n exacttitle = title\n if exacttitle in retained_exact_titles:\n retained_exact_titles[exacttitle].add(includedbc)\n continue\n # if this exact title is already in our retained set,\n # then we already have the earliest copy\n\n if len(title) < 2:\n retained.add(docid)\n continue\n\n title, firstchars = titleregularize(title)\n\n author = authorregularize(author)\n date = int(data.loc[i, 'inferreddate'])\n if date < 100:\n date = 3000\n\n if firstchars in blocked_titles:\n\n mindate = date\n best_docid = docid\n\n candidates = blocked_titles[firstchars]\n for oth_title, oth_author, oth_date, oth_docid in candidates:\n if oth_date >= mindate:\n continue\n # we want the earliest example only\n # and the matching process is slow, so this\n # saves time\n\n is_matching = isitamatch(title, author, oth_title, oth_author)\n\n if is_matching:\n mindate = oth_date\n best_docid = oth_docid\n\n retained.add(best_docid)\n retained_exact_titles[exacttitle] = set()\n retained_exact_titles[exacttitle].add(includedbc)\n\n else:\n retained.add(docid)\n\nprint('Of the original ' + str(len(list(data.index))) + ' rows,')\nprint('only ' + str(len(retained)) + ' were retained.')\n\nnewdata = data.loc[list(retained)]\nfor i in newdata.index:\n title = newdata.loc[i, 'title']\n enumcron = str(newdata.loc[i, 'enumcron'])\n if len(enumcron) > 0 and enumcron != 'nan':\n title = title + ' ' + enumcron\n if title in retained_exact_titles:\n includedbc = str(newdata.loc[i, 'includedbc'])\n retained_exact_titles[title].add(includedbc)\n newincl = '|'.join(retained_exact_titles[title])\n newdata.loc[i, 'includedbc'] = newincl\n\nnewdata.to_csv(outfile)\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv"
],
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Jennifer-Rigdon/fvcore
|
[
"7e800a86f2df93da017e07380543b4060ab88c94",
"7e800a86f2df93da017e07380543b4060ab88c94"
] |
[
"fvcore/transforms/transform.py",
"fvcore/nn/jit_handles.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport inspect\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Callable, List, Optional, TypeVar\n\nimport numpy as np\nimport torch\n\nfrom .transform_util import to_float_tensor, to_numpy\n\n\n__all__ = [\n \"BlendTransform\",\n \"CropTransform\",\n \"GridSampleTransform\",\n \"HFlipTransform\",\n \"VFlipTransform\",\n \"NoOpTransform\",\n \"ScaleTransform\",\n \"Transform\",\n \"TransformList\",\n]\n\n\nclass Transform(metaclass=ABCMeta):\n \"\"\"\n Base class for implementations of __deterministic__ transformations for\n image and other data structures. \"Deterministic\" requires that the output of\n all methods of this class are deterministic w.r.t their input arguments. In\n training, there should be a higher-level policy that generates (likely with\n random variations) these transform ops. Each transform op may handle several\n data types, e.g.: image, coordinates, segmentation, bounding boxes. Some of\n them have a default implementation, but can be overwritten if the default\n isn't appropriate. The implementation of each method may choose to modify\n its input data in-place for efficient transformation.\n \"\"\"\n\n def _set_attributes(self, params: Optional[List[Any]] = None) -> None:\n \"\"\"\n Set attributes from the input list of parameters.\n\n Args:\n params (list): list of parameters.\n \"\"\"\n\n if params:\n for k, v in params.items():\n if k != \"self\" and not k.startswith(\"_\"):\n setattr(self, k, v)\n\n @abstractmethod\n def apply_image(self, img: np.ndarray):\n \"\"\"\n Apply the transform on an image.\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: image after apply the transformation.\n \"\"\"\n\n @abstractmethod\n def apply_coords(self, coords: np.ndarray):\n \"\"\"\n Apply the transform on coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is (x, y).\n\n Returns:\n ndarray: coordinates after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates inside an image of\n shape (H, W) are in range [0, W] or [0, H].\n This function should correctly transform coordinates outside the image as well.\n \"\"\"\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform on a full-image segmentation.\n By default will just perform \"apply_image\".\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n\n Returns:\n ndarray: segmentation after apply the transformation.\n \"\"\"\n return self.apply_image(segmentation)\n\n def apply_box(self, box: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform on an axis-aligned box.\n By default will transform the corner points and use their\n minimum/maximum to create a new axis-aligned box.\n Note that this default may change the size of your box, e.g. in\n rotations.\n\n Args:\n box (ndarray): Nx4 floating point array of XYXY format in absolute\n coordinates.\n Returns:\n ndarray: box after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates inside an image of\n shape (H, W) are in range [0, W] or [0, H].\n\n This function does not clip boxes to force them inside the image.\n It is up to the application that uses the boxes to decide.\n \"\"\"\n # Indexes of converting (x0, y0, x1, y1) box into 4 coordinates of\n # ([x0, y0], [x1, y0], [x0, y1], [x1, y1]).\n idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()\n coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)\n coords = self.apply_coords(coords).reshape((-1, 4, 2))\n minxy = coords.min(axis=1)\n maxxy = coords.max(axis=1)\n trans_boxes = np.concatenate((minxy, maxxy), axis=1)\n return trans_boxes\n\n def apply_polygons(self, polygons: list) -> list:\n \"\"\"\n Apply the transform on a list of polygons, each represented by a Nx2\n array.\n By default will just transform all the points.\n\n Args:\n polygon (list[ndarray]): each is a Nx2 floating point array of\n (x, y) format in absolute coordinates.\n Returns:\n list[ndarray]: polygon after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates on an image of\n shape (H, W) are in range [0, W] or [0, H].\n \"\"\"\n return [self.apply_coords(p) for p in polygons]\n\n @classmethod\n def register_type(cls, data_type: str, func: Callable):\n \"\"\"\n Register the given function as a handler that this transform will use\n for a specific data type.\n\n Args:\n data_type (str): the name of the data type (e.g., box)\n func (callable): takes a transform and a data, returns the\n transformed data.\n\n Examples:\n\n .. code-block:: python\n\n def func(flip_transform, voxel_data):\n return transformed_voxel_data\n HFlipTransform.register_type(\"voxel\", func)\n\n # ...\n transform = HFlipTransform(...)\n transform.apply_voxel(voxel_data) # func will be called\n \"\"\"\n assert callable(\n func\n ), \"You can only register a callable to a Transform. Got {} instead.\".format(\n func\n )\n argspec = inspect.getfullargspec(func)\n assert len(argspec.args) == 2, (\n \"You can only register a function that takes two positional \"\n \"arguments to a Transform! Got a function with spec {}\".format(str(argspec))\n )\n setattr(cls, \"apply_\" + data_type, func)\n\n def inverse(self) -> \"Transform\":\n \"\"\"\n Create a transform that inverts the geometric changes (i.e. change of\n coordinates) of this transform.\n\n Note that the inverse is meant for geometric changes only.\n The inverse of photometric transforms that do not change coordinates\n is defined to be a no-op, even if they may be invertible.\n\n Returns:\n Transform:\n \"\"\"\n raise NotImplementedError\n\n\n_T = TypeVar(\"_T\")\n\n\n# pyre-ignore-all-errors\nclass TransformList:\n \"\"\"\n Maintain a list of transform operations which will be applied in sequence.\n Attributes:\n transforms (list[Transform])\n \"\"\"\n\n def __init__(self, transforms: list):\n \"\"\"\n Args:\n transforms (list[Transform]): list of transforms to perform.\n \"\"\"\n super().__init__()\n for t in transforms:\n assert isinstance(t, Transform), t\n self.transforms = transforms\n\n def _apply(self, x: _T, meth: str) -> _T:\n \"\"\"\n Apply the transforms on the input.\n Args:\n x: input to apply the transform operations.\n meth (str): meth.\n Returns:\n x: after apply the transformation.\n \"\"\"\n for t in self.transforms:\n x = getattr(t, meth)(x)\n return x\n\n def __getattr__(self, name: str):\n \"\"\"\n Args:\n name (str): name of the attribute.\n \"\"\"\n if name.startswith(\"apply_\"):\n return lambda x: self._apply(x, name)\n raise AttributeError(\"TransformList object has no attribute {}\".format(name))\n\n def __add__(self, other: \"TransformList\") -> \"TransformList\":\n \"\"\"\n Args:\n other (TransformList): transformation to add.\n Returns:\n TransformList: list of transforms.\n \"\"\"\n others = other.transforms if isinstance(other, TransformList) else [other]\n return TransformList(self.transforms + others)\n\n def __iadd__(self, other: \"TransformList\") -> \"TransformList\":\n \"\"\"\n Args:\n other (TransformList): transformation to add.\n Returns:\n TransformList: list of transforms.\n \"\"\"\n others = other.transforms if isinstance(other, TransformList) else [other]\n self.transforms.extend(others)\n return self\n\n def __radd__(self, other: \"TransformList\") -> \"TransformList\":\n \"\"\"\n Args:\n other (TransformList): transformation to add.\n Returns:\n TransformList: list of transforms.\n \"\"\"\n others = other.transforms if isinstance(other, TransformList) else [other]\n return TransformList(others + self.transforms)\n\n def __len__(self) -> int:\n \"\"\"\n Returns:\n Number of transforms contained in the TransformList.\n \"\"\"\n return len(self.transforms)\n\n def inverse(self) -> \"TransformList\":\n \"\"\"\n Invert each transform in reversed order.\n \"\"\"\n return TransformList([x.inverse() for x in self.transforms[::-1]])\n\n\nclass HFlipTransform(Transform):\n \"\"\"\n Perform horizontal flip.\n \"\"\"\n\n def __init__(self, width: int):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the image(s).\n\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: the flipped image(s).\n \"\"\"\n # NOTE: opencv would be faster:\n # https://github.com/pytorch/pytorch/issues/16424#issuecomment-580695672\n if img.ndim <= 3: # HxW, HxWxC\n return np.flip(img, axis=1)\n else:\n return np.flip(img, axis=-2)\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H - 1 - y)`.\n \"\"\"\n coords[:, 0] = self.width - coords[:, 0]\n return coords\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is to flip again\n \"\"\"\n return self\n\n\nclass VFlipTransform(Transform):\n \"\"\"\n Perform vertical flip.\n \"\"\"\n\n def __init__(self, height: int):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the image(s).\n\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: the flipped image(s).\n \"\"\"\n tensor = torch.from_numpy(np.ascontiguousarray(img))\n if len(tensor.shape) == 2:\n # For dimension of HxW.\n tensor = tensor.flip((-2))\n elif len(tensor.shape) > 2:\n # For dimension of HxWxC, NxHxWxC.\n tensor = tensor.flip((-3))\n return tensor.numpy()\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H - 1 - y)`.\n \"\"\"\n coords[:, 1] = self.height - coords[:, 1]\n return coords\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is to flip again\n \"\"\"\n return self\n\n\nclass NoOpTransform(Transform):\n \"\"\"\n A transform that does nothing.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n return img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n return coords\n\n def inverse(self) -> Transform:\n return self\n\n\nclass ScaleTransform(Transform):\n \"\"\"\n Resize the image to a target size.\n \"\"\"\n\n def __init__(self, h: int, w: int, new_h: int, new_w: int, interp: str = None):\n \"\"\"\n Args:\n h, w (int): original image size.\n new_h, new_w (int): new image size.\n interp (str): interpolation methods. Options includes `nearest`, `linear`\n (3D-only), `bilinear`, `bicubic` (4D-only), and `area`.\n Details can be found in:\n https://pytorch.org/docs/stable/nn.functional.html\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:\n \"\"\"\n Resize the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n interp (str): interpolation methods. Options includes `nearest`, `linear`\n (3D-only), `bilinear`, `bicubic` (4D-only), and `area`.\n Details can be found in:\n https://pytorch.org/docs/stable/nn.functional.html\n\n Returns:\n ndarray: resized image(s).\n \"\"\"\n if len(img.shape) == 4:\n h, w = img.shape[1:3]\n elif len(img.shape) in (2, 3):\n h, w = img.shape[:2]\n else:\n raise (\"Unsupported input with shape of {}\".format(img.shape))\n assert (\n self.h == h and self.w == w\n ), \"Input size mismatch h w {}:{} -> {}:{}\".format(self.h, self.w, h, w)\n interp_method = interp if interp is not None else self.interp\n # Option of align_corners is only supported for linear, bilinear,\n # and bicubic.\n if interp_method in [\"linear\", \"bilinear\", \"bicubic\"]:\n align_corners = False\n else:\n align_corners = None\n\n # note: this is quite slow for int8 images because torch does not\n # support it https://github.com/pytorch/pytorch/issues/5580\n float_tensor = torch.nn.functional.interpolate(\n to_float_tensor(img),\n size=(self.new_h, self.new_w),\n mode=interp_method,\n align_corners=align_corners,\n )\n return to_numpy(float_tensor, img.shape, img.dtype)\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute the coordinates after resize.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: resized coordinates.\n \"\"\"\n coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)\n coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)\n return coords\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply resize on the full-image segmentation.\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n Returns:\n ndarray: resized segmentation.\n \"\"\"\n segmentation = self.apply_image(segmentation, interp=\"nearest\")\n return segmentation\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is to resize it back.\n \"\"\"\n return ScaleTransform(self.new_h, self.new_w, self.h, self.w, self.interp)\n\n\nclass GridSampleTransform(Transform):\n def __init__(self, grid: np.ndarray, interp: str):\n \"\"\"\n Args:\n grid (ndarray): grid has x and y input pixel locations which are\n used to compute output. Grid has values in the range of [-1, 1],\n which is normalized by the input height and width. The dimension\n is `N x H x W x 2`.\n interp (str): interpolation methods. Options include `nearest` and\n `bilinear`.\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:\n \"\"\"\n Apply grid sampling on the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n interp (str): interpolation methods. Options include `nearest` and\n `bilinear`.\n Returns:\n ndarray: grid sampled image(s).\n \"\"\"\n interp_method = interp if interp is not None else self.interp\n float_tensor = torch.nn.functional.grid_sample(\n to_float_tensor(img), # NxHxWxC -> NxCxHxW.\n torch.from_numpy(self.grid),\n mode=interp_method,\n padding_mode=\"border\",\n align_corners=False,\n )\n return to_numpy(float_tensor, img.shape, img.dtype)\n\n def apply_coords(self, coords: np.ndarray):\n \"\"\"\n Not supported.\n \"\"\"\n raise NotImplementedError()\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply grid sampling on the full-image segmentation.\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n Returns:\n ndarray: grid sampled segmentation.\n \"\"\"\n segmentation = self.apply_image(segmentation, interp=\"nearest\")\n return segmentation\n\n\nclass CropTransform(Transform):\n def __init__(self, x0: int, y0: int, w: int, h: int):\n # TODO: flip the order of w and h.\n \"\"\"\n Args:\n x0, y0, w, h (int): crop the image(s) by img[y0:y0+h, x0:x0+w].\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Crop the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: cropped image(s).\n \"\"\"\n if len(img.shape) <= 3:\n return img[self.y0 : self.y0 + self.h, self.x0 : self.x0 + self.w]\n else:\n return img[..., self.y0 : self.y0 + self.h, self.x0 : self.x0 + self.w, :]\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply crop transform on coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: cropped coordinates.\n \"\"\"\n coords[:, 0] -= self.x0\n coords[:, 1] -= self.y0\n return coords\n\n def apply_polygons(self, polygons: list) -> list:\n \"\"\"\n Apply crop transform on a list of polygons, each represented by a Nx2 array.\n It will crop the polygon with the box, therefore the number of points in the\n polygon might change.\n\n Args:\n polygon (list[ndarray]): each is a Nx2 floating point array of\n (x, y) format in absolute coordinates.\n Returns:\n ndarray: cropped polygons.\n \"\"\"\n import shapely.geometry as geometry\n\n # Create a window that will be used to crop\n crop_box = geometry.box(\n self.x0, self.y0, self.x0 + self.w, self.y0 + self.h\n ).buffer(0.0)\n\n cropped_polygons = []\n\n for polygon in polygons:\n polygon = geometry.Polygon(polygon).buffer(0.0)\n # polygon must be valid to perform intersection.\n assert polygon.is_valid, polygon\n cropped = polygon.intersection(crop_box)\n if cropped.is_empty:\n continue\n if not isinstance(cropped, geometry.collection.BaseMultipartGeometry):\n cropped = [cropped]\n # one polygon may be cropped to multiple ones\n for poly in cropped:\n # It could produce lower dimensional objects like lines or\n # points, which we want to ignore\n if not isinstance(poly, geometry.Polygon) or not poly.is_valid:\n continue\n coords = np.asarray(poly.exterior.coords)\n # NOTE This process will produce an extra identical vertex at\n # the end. So we remove it. This is tested by\n # `tests/test_data_transform.py`\n cropped_polygons.append(coords[:-1])\n return [self.apply_coords(p) for p in cropped_polygons]\n\n\nclass BlendTransform(Transform):\n \"\"\"\n Transforms pixel colors with PIL enhance functions.\n \"\"\"\n\n def __init__(self, src_image: np.ndarray, src_weight: float, dst_weight: float):\n \"\"\"\n Blends the input image (dst_image) with the src_image using formula:\n ``src_weight * src_image + dst_weight * dst_image``\n\n Args:\n src_image (ndarray): Input image is blended with this image\n src_weight (float): Blend weighting of src_image\n dst_weight (float): Blend weighting of dst_image\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:\n \"\"\"\n Apply blend transform on the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n interp (str): keep this option for consistency, perform blend would not\n require interpolation.\n Returns:\n ndarray: blended image(s).\n \"\"\"\n if img.dtype == np.uint8:\n img = img.astype(np.float32)\n img = self.src_weight * self.src_image + self.dst_weight * img\n return np.clip(img, 0, 255).astype(np.uint8)\n else:\n return self.src_weight * self.src_image + self.dst_weight * img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply no transform on the coordinates.\n \"\"\"\n return coords\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply no transform on the full-image segmentation.\n \"\"\"\n return segmentation\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is a no-op.\n \"\"\"\n return NoOpTransform()\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport typing\nfrom collections import Counter, OrderedDict\n\nimport torch\nimport torch.nn as nn\nfrom numpy import prod\n\n\n# A list that contains ignored operations.\n_IGNORED_OPS: typing.List[str] = [\n \"aten::Int\",\n \"aten::__and__\",\n \"aten::arange\",\n \"aten::cat\",\n \"aten::clamp\",\n \"aten::clamp_\",\n \"aten::contiguous\",\n \"aten::copy_\",\n \"aten::detach\",\n \"aten::empty\",\n \"aten::eq\",\n \"aten::floor_divide\",\n \"aten::ScalarImplicit\",\n \"aten::expand\",\n \"aten::flatten\",\n \"aten::floor\",\n \"aten::ge\",\n \"aten::chunk\",\n \"aten::split\",\n \"aten::stack\",\n \"aten::full\",\n \"aten::gt\",\n \"aten::index\",\n \"aten::index_put_\",\n \"aten::max\",\n \"aten::nonzero\",\n \"aten::permute\",\n \"aten::remainder\",\n \"aten::reshape\",\n \"aten::select\",\n \"aten::size\",\n \"aten::slice\",\n \"aten::split_with_sizes\",\n \"aten::squeeze\",\n \"aten::t\",\n \"aten::to\",\n \"aten::transpose\",\n \"aten::unsqueeze\",\n \"aten::unsqueeze_\",\n \"aten::constant_pad_nd\",\n \"aten::view\",\n \"aten::zeros\",\n \"aten::zeros_like\",\n \"prim::Constant\",\n \"prim::Int\",\n \"prim::ListConstruct\",\n \"prim::ListUnpack\",\n \"prim::NumToTensor\",\n \"prim::TupleConstruct\",\n \"prim::ImplicitTensorToNum\",\n]\n\n\ndef get_jit_model_analysis(\n model: nn.Module,\n inputs: typing.Tuple[object, ...],\n ops_handles: typing.Dict[str, typing.Callable],\n) -> typing.Tuple[typing.Counter[str], typing.Counter[str]]:\n \"\"\"\n Given a model, the inputs and the handles for each operation, return the\n results for the model analysis.\n\n Args:\n model (nn.Module): The model for torch script to trace.\n inputs (tuple): Inputs that are passed to `model` to trace. Inputs need\n to be in a tuple.\n ops_handles (typing.Dict[str, typing.Callable]): A dictionary of handles\n for model analysis.\n\n Returns:\n typing.Tuple[typing.Counter[str], typing.Counter[str]]: A counter that\n contains the results of per operation analysis of the model and a\n Counter of ignored operations.\n \"\"\"\n # Torch script does not support parallel torch models.\n if isinstance(\n model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)\n ):\n model = model.module # pyre-ignore\n\n # Compatibility with torch.jit.\n if hasattr(torch.jit, \"get_trace_graph\"):\n trace, _ = torch.jit.get_trace_graph(model, inputs)\n trace_nodes = trace.graph().nodes()\n else:\n trace, _ = torch.jit._get_trace_graph(model, inputs)\n trace_nodes = trace.nodes()\n\n skipped_ops = Counter()\n total_count = Counter()\n\n for node in trace_nodes:\n kind = node.kind()\n if kind not in ops_handles.keys():\n # If the operation is not in _IGNORED_OPS, count skipped operations.\n if kind not in _IGNORED_OPS:\n skipped_ops[kind] += 1\n continue\n\n handle_count = ops_handles.get(kind, None)\n if handle_count is None:\n continue\n # pyre-ignore\n inputs, outputs = list(node.inputs()), list(node.outputs())\n op_count = handle_count(inputs, outputs)\n total_count += op_count\n return total_count, skipped_ops\n\n\ndef generic_activation_jit(\n op_name: str,\n) -> typing.Callable[[typing.List[object], typing.List[object]], typing.Counter[str]]:\n \"\"\"\n This method return a handle that counts the number of activation from the\n output shape for the specified operation.\n\n Args:\n op_name (str): The name of the operation.\n\n Returns:\n typing.Callable: An activation handle for the given operation.\n \"\"\"\n\n def _generic_activation_jit(outputs: typing.List[object]) -> int:\n \"\"\"\n This is a generic jit handle that counts the number of activations for any\n operation given the output shape.\n\n Args:\n outputs (list(torch._C.Value)): The output shape in the form of a list\n of jit object.\n\n Returns:\n int: Total number of activations for each operation.\n \"\"\"\n out_shape = get_shape(outputs[0])\n ac_count = prod(out_shape)\n return ac_count\n\n return lambda inputs, outputs: Counter({op_name: _generic_activation_jit(outputs)})\n\n\ndef get_shape(val: object) -> typing.List[int]:\n \"\"\"\n Get the shapes from a jit value object.\n\n Args:\n val (torch._C.Value): jit value object.\n\n Returns:\n list(int): return a list of ints.\n \"\"\"\n if val.isCompleteTensor(): # pyre-ignore\n return val.type().sizes() # pyre-ignore\n else:\n raise ValueError()\n\n\ndef addmm_flop_jit(\n inputs: typing.List[object], outputs: typing.List[object]\n) -> typing.Counter[str]:\n \"\"\"\n This method counts the flops for fully connected layers with torch script.\n\n Args:\n inputs (list(torch._C.Value)): The input shape in the form of a list of\n jit object.\n outputs (list(torch._C.Value)): The output shape in the form of a list\n of jit object.\n\n Returns:\n Counter: A Counter dictionary that records the number of flops for each\n operation.\n \"\"\"\n # Count flop for nn.Linear\n # inputs is a list of length 3.\n input_shapes = [get_shape(v) for v in inputs[1:3]]\n # input_shapes[0]: [batch size, input feature dimension]\n # input_shapes[1]: [batch size, output feature dimension]\n assert len(input_shapes[0]) == 2, input_shapes[0]\n assert len(input_shapes[1]) == 2, input_shapes[1]\n batch_size, input_dim = input_shapes[0]\n output_dim = input_shapes[1][1]\n flop = batch_size * input_dim * output_dim\n flop_counter = Counter({\"addmm\": flop})\n return flop_counter\n\n\ndef conv_flop_count(\n x_shape: typing.List[int], w_shape: typing.List[int], out_shape: typing.List[int]\n) -> typing.Counter[str]:\n \"\"\"\n This method counts the flops for convolution. Note only multiplication is\n counted. Computation for addition and bias is ignored.\n\n Args:\n x_shape (list(int)): The input shape before convolution.\n w_shape (list(int)): The filter shape.\n out_shape (list(int)): The output shape after convolution.\n Returns:\n Counter: A Counter dictionary that records the number of flops for each\n operation.\n \"\"\"\n batch_size, Cin_dim, Cout_dim = x_shape[0], w_shape[1], out_shape[1]\n out_size = prod(out_shape[2:])\n kernel_size = prod(w_shape[2:])\n flop = batch_size * out_size * Cout_dim * Cin_dim * kernel_size\n flop_counter = Counter({\"conv\": flop})\n return flop_counter\n\n\ndef conv_flop_jit(\n inputs: typing.List[object], outputs: typing.List[object]\n) -> typing.Counter[str]:\n \"\"\"\n This method counts the flops for convolution using torch script.\n\n Args:\n inputs (list(torch._C.Value)): The input shape in the form of a list of\n jit object before convolution.\n outputs (list(torch._C.Value)): The output shape in the form of a list\n of jit object after convolution.\n\n Returns:\n Counter: A Counter dictionary that records the number of flops for each\n operation.\n \"\"\"\n # Inputs of Convolution should be a list of length 12. They represent:\n # 0) input tensor, 1) convolution filter, 2) bias, 3) stride, 4) padding,\n # 5) dilation, 6) transposed, 7) out_pad, 8) groups, 9) benchmark_cudnn,\n # 10) deterministic_cudnn and 11) user_enabled_cudnn.\n assert len(inputs) == 12, len(inputs)\n x, w = inputs[:2]\n x_shape, w_shape, out_shape = (get_shape(x), get_shape(w), get_shape(outputs[0]))\n return conv_flop_count(x_shape, w_shape, out_shape)\n\n\ndef einsum_flop_jit(\n inputs: typing.List[object], outputs: typing.List[object]\n) -> typing.Counter[str]:\n \"\"\"\n This method counts the flops for the einsum operation. We currently support\n two einsum operations: \"nct,ncp->ntp\" and \"ntg,ncg->nct\".\n\n Args:\n inputs (list(torch._C.Value)): The input shape in the form of a list of\n jit object before einsum.\n outputs (list(torch._C.Value)): The output shape in the form of a list\n of jit object after einsum.\n\n Returns:\n Counter: A Counter dictionary that records the number of flops for each\n operation.\n \"\"\"\n # Inputs of einsum should be a list of length 2.\n # Inputs[0] stores the equation used for einsum.\n # Inputs[1] stores the list of input shapes.\n assert len(inputs) == 2, len(inputs)\n equation = inputs[0].toIValue() # pyre-ignore\n # Get rid of white space in the equation string.\n equation = equation.replace(\" \", \"\")\n # Re-map equation so that same equation with different alphabet\n # representations will look the same.\n letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys()\n mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)}\n equation = equation.translate(mapping)\n input_shapes_jit = inputs[1].node().inputs() # pyre-ignore\n input_shapes = [get_shape(v) for v in input_shapes_jit]\n\n if equation == \"abc,abd->acd\":\n n, c, t = input_shapes[0]\n p = input_shapes[-1][-1]\n flop = n * c * t * p\n flop_counter = Counter({\"einsum\": flop})\n return flop_counter\n\n elif equation == \"abc,adc->adb\":\n n, t, g = input_shapes[0]\n c = input_shapes[-1][1]\n flop = n * t * g * c\n flop_counter = Counter({\"einsum\": flop})\n return flop_counter\n\n else:\n raise NotImplementedError(\"Unsupported einsum operation.\")\n\n\ndef matmul_flop_jit(\n inputs: typing.List[object], outputs: typing.List[object]\n) -> typing.Counter[str]:\n \"\"\"\n This method counts the flops for matmul.\n\n Args:\n inputs (list(torch._C.Value)): The input shape in the form of a list of\n jit object before matmul.\n outputs (list(torch._C.Value)): The output shape in the form of a list\n of jit object after matmul.\n\n Returns:\n Counter: A Counter dictionary that records the number of flops for each\n operation.\n \"\"\"\n # Inputs should be a list of length 2.\n # Inputs contains the shapes of two matrices.\n input_shapes = [get_shape(v) for v in inputs]\n assert len(input_shapes) == 2, input_shapes\n assert len(input_shapes[1]) == 2, input_shapes\n assert input_shapes[0][-1] == input_shapes[1][0], input_shapes\n batch_dim = input_shapes[0][0]\n m1_dim, m2_dim = input_shapes[1]\n flop = m1_dim * m2_dim * batch_dim\n flop_counter = Counter({\"matmul\": flop})\n return flop_counter\n\n\ndef batchnorm_flop_jit(\n inputs: typing.List[object], outputs: typing.List[object]\n) -> typing.Counter[str]:\n \"\"\"\n This method counts the flops for batch norm.\n\n Args:\n inputs (list(torch._C.Value)): The input shape in the form of a list of\n jit object before batch norm.\n outputs (list(torch._C.Value)): The output shape in the form of a list\n of jit object after batch norm.\n\n Returns:\n Counter: A Counter dictionary that records the number of flops for each\n operation.\n \"\"\"\n # Inputs[0] contains the shape of the input.\n input_shape = get_shape(inputs[0])\n assert 2 <= len(input_shape) <= 5, input_shape\n flop = prod(input_shape) * 4\n flop_counter = Counter({\"batchnorm\": flop})\n return flop_counter\n"
] |
[
[
"numpy.clip",
"numpy.ascontiguousarray",
"numpy.asarray",
"torch.from_numpy",
"numpy.concatenate",
"numpy.array",
"numpy.flip"
],
[
"torch.jit._get_trace_graph",
"torch.jit.get_trace_graph",
"numpy.prod"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PaulPan00/donkey_wrapper
|
[
"a03cf0f42f65625fbce792b06c98acd153c5d6c8",
"a03cf0f42f65625fbce792b06c98acd153c5d6c8",
"a03cf0f42f65625fbce792b06c98acd153c5d6c8",
"a03cf0f42f65625fbce792b06c98acd153c5d6c8",
"a03cf0f42f65625fbce792b06c98acd153c5d6c8",
"a03cf0f42f65625fbce792b06c98acd153c5d6c8"
] |
[
"Python Tutorial Reinforcement Learning/13_a3c_continuous/shared_optim.py",
"Python Tutorial Machine Learning/Regression & Classification Models/Regression/random_forest_regression.py",
"Python Tutorial Machine Learning/Regression & Classification Models/Regression/xg_boost_regression.py",
"Python Tutorial Reinforcement Learning/10_mario_a3c/src/process.py",
"Python Tutorial Reinforcement Learning/12_stable_baseline3/pool_game_posvr/envs/pool_env.py",
"Python Tutorial Reinforcement Learning/13_a3c_continuous/utils.py"
] |
[
"# Create by Packetsss\n# Personal use is allowed\n# Commercial use is prohibited\n\nfrom __future__ import division\nimport math\nimport torch\nimport torch.optim as optim\nfrom collections import defaultdict\n\n\nclass SharedRMSprop(optim.Optimizer):\n \"\"\"Implements RMSprop algorithm with shared states.\n \"\"\"\n\n def __init__(self,\n params,\n lr=7e-4,\n alpha=0.99,\n eps=0.1,\n weight_decay=0,\n momentum=0,\n centered=False):\n defaults = defaultdict(lr=lr, alpha=alpha, eps=eps,\n weight_decay=weight_decay, momentum=momentum, centered=centered)\n super(SharedRMSprop, self).__init__(params, defaults)\n\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'] = torch.zeros(1)\n state['grad_avg'] = p.data.new().resize_as_(p.data).zero_()\n state['square_avg'] = p.data.new().resize_as_(p.data).zero_()\n state['momentum_buffer'] = p.data.new(\n ).resize_as_(p.data).zero_()\n\n def share_memory(self):\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['square_avg'].share_memory_()\n state['step'].share_memory_()\n state['grad_avg'].share_memory_()\n state['momentum_buffer'].share_memory_()\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'RMSprop does not support sparse gradients')\n state = self.state[p]\n\n square_avg = state['square_avg']\n alpha = group['alpha']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)\n\n if group['centered']:\n grad_avg = state['grad_avg']\n grad_avg.mul_(alpha).add_(1 - alpha, grad)\n avg = square_avg.addcmul(\n -1, grad_avg, grad_avg).sqrt().add_(group['eps'])\n else:\n avg = square_avg.sqrt().add_(group['eps'])\n\n if group['momentum'] > 0:\n buf = state['momentum_buffer']\n buf.mul_(group['momentum']).addcdiv_(grad, avg)\n p.data.add_(-group['lr'], buf)\n else:\n p.data.addcdiv_(-group['lr'], grad, avg)\n\n return loss\n\n\nclass SharedAdam(optim.Optimizer):\n \"\"\"Implements Adam algorithm with shared states.\n \"\"\"\n\n def __init__(self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-3,\n weight_decay=0, amsgrad=True):\n defaults = defaultdict(lr=lr, betas=betas, eps=eps,\n weight_decay=weight_decay, amsgrad=amsgrad)\n super(SharedAdam, self).__init__(params, defaults)\n\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'] = torch.zeros(1)\n state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()\n state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()\n state['max_exp_avg_sq'] = p.data.new(\n ).resize_as_(p.data).zero_()\n\n def share_memory(self):\n for group in self.param_groups:\n for p in group['params']:\n state = self.state[p]\n state['step'].share_memory_()\n state['exp_avg'].share_memory_()\n state['exp_avg_sq'].share_memory_()\n state['max_exp_avg_sq'].share_memory_()\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.is_sparse:\n raise RuntimeError(\n 'Adam does not support sparse gradients, please consider SparseAdam instead')\n amsgrad = group['amsgrad']\n\n state = self.state[p]\n\n exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n if amsgrad:\n max_exp_avg_sq = state['max_exp_avg_sq']\n beta1, beta2 = group['betas']\n\n state['step'] += 1\n\n if group['weight_decay'] != 0:\n grad = grad.add(group['weight_decay'], p.data)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till\n # now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group['eps'])\n else:\n denom = exp_avg_sq.sqrt().add_(group['eps'])\n\n bias_correction1 = 1 - beta1**state['step'].item()\n bias_correction2 = 1 - beta2**state['step'].item()\n step_size = group['lr'] * \\\n math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\n",
"# Random Forest Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('C:\\\\Users\\\\pyjpa\\\\Desktop\\\\Programming\\\\Python\\\\Python Tutorial Machine Learning\\\\Classification practice\\\\Regression\\\\Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Training the Random Forest Regression model on the whole dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor(n_estimators = 10, random_state = 0)\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\nnp.set_printoptions(precision=2)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\n\n# Evaluating the Model Performance\nfrom sklearn.metrics import r2_score\nprint(r2_score(y_test, y_pred))\n\n# 96.15",
"# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('C:\\\\Users\\\\pyjpa\\\\Desktop\\\\Programming\\\\Python\\\\Python Tutorial Machine Learning\\\\Classification practice\\\\Regression\\\\Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Training the XGBRegressor model on the Training set\nfrom xgboost import XGBRegressor\nregressor = XGBRegressor()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\nnp.set_printoptions(precision=2)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\n\n# Evaluating the Model Performance\nfrom sklearn.metrics import r2_score\nprint(r2_score(y_test, y_pred))\n\n# 96.71%",
"# Create by Packetsss\n# Personal use is allowed\n# Commercial use is prohibited\n\n\"\"\"\n@author: Viet Nguyen <[email protected]>\n\"\"\"\n\nimport torch\nfrom src.env import create_train_env\nfrom src.model import ActorCritic\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical\nfrom collections import deque\nfrom tensorboardX import SummaryWriter\nimport timeit\n\n\ndef local_train(index, opt, global_model, optimizer, save=False):\n torch.manual_seed(123 + index)\n if save:\n start_time = timeit.default_timer()\n writer = SummaryWriter(opt.log_path)\n env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)\n local_model = ActorCritic(num_states, num_actions)\n if opt.use_gpu:\n local_model.cuda()\n local_model.train()\n state = torch.from_numpy(env.reset())\n if opt.use_gpu:\n state = state.cuda()\n done = True\n curr_step = 0\n curr_episode = 0\n while True:\n if save:\n if curr_episode % opt.save_interval == 0 and curr_episode > 0:\n torch.save(global_model.state_dict(),\n \"{}/a3c_super_mario_bros_{}_{}\".format(opt.saved_path, opt.world, opt.stage))\n print(\"Process {}. Episode {}\".format(index, curr_episode))\n curr_episode += 1\n local_model.load_state_dict(global_model.state_dict())\n if done:\n h_0 = torch.zeros((1, 512), dtype=torch.float)\n c_0 = torch.zeros((1, 512), dtype=torch.float)\n else:\n h_0 = h_0.detach()\n c_0 = c_0.detach()\n if opt.use_gpu:\n h_0 = h_0.cuda()\n c_0 = c_0.cuda()\n\n log_policies = []\n values = []\n rewards = []\n entropies = []\n\n for _ in range(opt.num_local_steps):\n curr_step += 1\n logits, value, h_0, c_0 = local_model(state, h_0, c_0)\n policy = F.softmax(logits, dim=1)\n log_policy = F.log_softmax(logits, dim=1)\n entropy = -(policy * log_policy).sum(1, keepdim=True)\n\n m = Categorical(policy)\n action = m.sample().item()\n\n state, reward, done, _ = env.step(action)\n state = torch.from_numpy(state)\n if opt.use_gpu:\n state = state.cuda()\n if curr_step > opt.num_global_steps:\n done = True\n\n if done:\n curr_step = 0\n state = torch.from_numpy(env.reset())\n if opt.use_gpu:\n state = state.cuda()\n\n values.append(value)\n log_policies.append(log_policy[0, action])\n rewards.append(reward)\n entropies.append(entropy)\n\n if done:\n break\n\n R = torch.zeros((1, 1), dtype=torch.float)\n if opt.use_gpu:\n R = R.cuda()\n if not done:\n _, R, _, _ = local_model(state, h_0, c_0)\n\n gae = torch.zeros((1, 1), dtype=torch.float)\n if opt.use_gpu:\n gae = gae.cuda()\n actor_loss = 0\n critic_loss = 0\n entropy_loss = 0\n next_value = R\n\n for value, log_policy, reward, entropy in list(zip(values, log_policies, rewards, entropies))[::-1]:\n gae = gae * opt.gamma * opt.tau\n gae = gae + reward + opt.gamma * next_value.detach() - value.detach()\n next_value = value\n actor_loss = actor_loss + log_policy * gae\n R = R * opt.gamma + reward\n critic_loss = critic_loss + (R - value) ** 2 / 2\n entropy_loss = entropy_loss + entropy\n\n total_loss = -actor_loss + critic_loss - opt.beta * entropy_loss\n writer.add_scalar(\"Train_{}/Loss\".format(index), total_loss, curr_episode)\n optimizer.zero_grad()\n total_loss.backward()\n\n for local_param, global_param in zip(local_model.parameters(), global_model.parameters()):\n if global_param.grad is not None:\n break\n global_param._grad = local_param.grad\n\n optimizer.step()\n\n if curr_episode == int(opt.num_global_steps / opt.num_local_steps):\n print(\"Training process {} terminated\".format(index))\n if save:\n end_time = timeit.default_timer()\n print('The code runs for %.2f s ' % (end_time - start_time))\n return\n\n\ndef local_test(index, opt, global_model):\n torch.manual_seed(123 + index)\n env, num_states, num_actions = create_train_env(opt.world, opt.stage, opt.action_type)\n local_model = ActorCritic(num_states, num_actions)\n local_model.eval()\n state = torch.from_numpy(env.reset())\n done = True\n curr_step = 0\n actions = deque(maxlen=opt.max_actions)\n while True:\n curr_step += 1\n if done:\n local_model.load_state_dict(global_model.state_dict())\n with torch.no_grad():\n if done:\n h_0 = torch.zeros((1, 512), dtype=torch.float)\n c_0 = torch.zeros((1, 512), dtype=torch.float)\n else:\n h_0 = h_0.detach()\n c_0 = c_0.detach()\n\n logits, value, h_0, c_0 = local_model(state, h_0, c_0)\n policy = F.softmax(logits, dim=1)\n action = torch.argmax(policy).item()\n state, reward, done, _ = env.step(action)\n env.render()\n actions.append(action)\n if curr_step > opt.num_global_steps or actions.count(actions[0]) == actions.maxlen:\n done = True\n if done:\n curr_step = 0\n actions.clear()\n state = env.reset()\n state = torch.from_numpy(state)\n",
"# Create by Packetsss\n# Personal use is allowed\n# Commercial use is prohibited\n\nfrom os import environ\nenviron['SDL_VIDEO_WINDOW_POS'] = \"1500, 200\"\nenviron['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'\n\nimport sys\nsys.path.append('pool_game/pool')\n\nimport pygame as pg\nfrom ball import BallType\nfrom collisions import resolve_all_collisions\nimport event\nfrom gamestate import GameState, Player\nfrom graphics import draw_main_menu\nfrom config import *\n\nimport gym\nimport numpy as np\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\n\nclass PoolEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self):\n self.game = GameState()\n self.game.start_pool()\n super(PoolEnv, self).__init__()\n\n self.game.steps = 0\n self.game.reward = 0\n\n self.not_touching_countdown = []\n self.steps = 0\n self.max_episode_steps = 60\n self.w, self.h = resolution\n self.number_of_balls = 16\n\n # velocity_x, velocity_y\n v_limit = 550\n self.action_space = spaces.Box(low=np.array([-v_limit, -v_limit]), high=np.array([v_limit, v_limit]))\n\n # ball_x, ball_y, ball_type(solid, strips, 8-ball, cue-ball) x 16 balls\n self.observation_space = spaces.Box(\n low=np.repeat(np.array([0, 0, 0]), self.number_of_balls, axis=0).reshape(self.number_of_balls, 3), high=np.repeat(np.array([self.w, self.h, 3]), self.number_of_balls, axis=0).reshape(self.number_of_balls, 3)\n )\n\n def pre_process_observation(self):\n if self.game.ball_assignment is None:\n ball_dict = ball_unassigned_dict\n elif self.game.ball_assignment[self.game.current_player] == BallType.Solid:\n ball_dict = ball_solids_dict\n else:\n ball_dict = ball_strips_dict\n observation = np.array([np.array([*x.rect.center, ball_dict[x.number]]) for x in self.game.balls.sprites()])\n balls_to_fill = self.number_of_balls - observation.shape[0]\n if balls_to_fill > 0:\n # 1 is opposite color\n return np.vstack((observation, np.repeat(np.array([0, 0, 1]), balls_to_fill, axis=0).reshape(balls_to_fill, 3)))\n else:\n return observation\n \n def step(self, action):\n self.game.cue.ball_hit(new_velocity=action)\n\n # render\n _ = pg.event.get() # must get the env?\n resolve_all_collisions(self.game.balls, self.game.holes, self.game.table_sides)\n self.game.redraw_all()\n\n reward = 0\n done = False\n info = {}\n\n # initialize some consitions for reward evaluation\n self.game.hit_a_ball = False\n self.game.turned_over = False\n\n # wait for ball to stop\n while not self.game.all_not_moving():\n resolve_all_collisions(self.game.balls, self.game.holes, self.game.table_sides)\n self.game.redraw_all()\n\n # check game rules\n self.game.check_pool_rules()\n\n # check cue ball outside the table\n # if resolution[0] < self.game.white_ball.rect.center[0] or self.game.white_ball.rect.center[0] < 0\\\n # or resolution[1] < self.game.white_ball.rect.center[1] or self.game.white_ball.rect.center[1] < 0:\n # print(\"cue ball is outside\")\n # self.game.check_potted(ball_outside_table=True)\n\n # pot a ball\n if not self.game.turned_over:\n reward += 40\n # contact with correct ball type\n elif not self.game.can_move_white_ball:\n reward += 5\n # foul penalize\n else:\n reward -= 10\n \n # reward by hitting a ball (avoid horizontal/vertical hit by multiplying times)\n if not self.game.hit_a_ball:\n self.not_touching_countdown.append(0)\n reward -= 5 * len(self.not_touching_countdown)\n else:\n self.not_touching_countdown = []\n \n # if not touching any balls multiple times, reset env\n if len(self.not_touching_countdown) > 4:\n self.not_touching_countdown = []\n self.game.is_game_over = True\n self.game.winner = None\n\n # when game is over\n if self.game.is_game_over:\n done = True\n # check who wins\n if self.game.current_player == self.game.winner and self.game.potting_8ball[self.game.current_player]:\n reward += 500\n else:\n reward -= 300\n\n observation = self.pre_process_observation()\n\n # a ball is outside the env\n if observation.shape[0] > 16:\n print(observation)\n observation = observation[:16, :]\n\n \n self.steps += 1\n self.game.steps = self.steps\n self.game.reward = reward\n return observation, reward, done, info\n \n def reset(self):\n self.game.start_pool()\n return self.pre_process_observation()\n\n def render(self, mode='human'):\n self.game.redraw_all()\n\n def close(self):\n pg.quit()",
"# Create by Packetsss\n# Personal use is allowed\n# Commercial use is prohibited\n\nfrom __future__ import division\nimport math\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport json\nimport logging\n\n\ndef setup_logger(logger_name, log_file, level=logging.INFO):\n l = logging.getLogger(logger_name)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(formatter)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n\n l.setLevel(level)\n l.addHandler(fileHandler)\n l.addHandler(streamHandler)\n\n\ndef read_config(file_path):\n \"\"\"Read JSON config.\"\"\"\n json_object = json.load(open(file_path, 'r'))\n return json_object\n\n\ndef norm_col_init(weights, std=1.0):\n x = torch.randn(weights.size())\n x *= std / torch.sqrt((x**2).sum(1, keepdim=True))\n return x\n\n\ndef ensure_shared_grads(model, shared_model, gpu=False):\n for param, shared_param in zip(model.parameters(), shared_model.parameters()):\n if shared_param.grad is not None and not gpu:\n return\n elif not gpu:\n shared_param._grad = param.grad\n else:\n shared_param._grad = param.grad.cpu()\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = np.prod(weight_shape[1:4])\n fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n elif classname.find('Linear') != -1:\n weight_shape = list(m.weight.data.size())\n fan_in = weight_shape[1]\n fan_out = weight_shape[0]\n w_bound = np.sqrt(6. / (fan_in + fan_out))\n m.weight.data.uniform_(-w_bound, w_bound)\n m.bias.data.fill_(0)\n\n\ndef weights_init_mlp(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n m.weight.data.normal_(0, 1)\n m.weight.data *= 1 / \\\n torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))\n if m.bias is not None:\n m.bias.data.fill_(0)\n\n\ndef normal(x, mu, sigma, gpu_id, gpu=False):\n pi = np.array([math.pi])\n pi = torch.from_numpy(pi).float()\n if gpu:\n with torch.cuda.device(gpu_id):\n pi = Variable(pi).cuda()\n else:\n pi = Variable(pi)\n a = (-1 * (x - mu).pow(2) / (2 * sigma)).exp()\n b = 1 / (2 * sigma * pi.expand_as(sigma)).sqrt()\n return a * b\n"
] |
[
[
"torch.max",
"torch.zeros"
],
[
"sklearn.ensemble.RandomForestRegressor",
"pandas.read_csv",
"sklearn.metrics.r2_score",
"numpy.set_printoptions",
"sklearn.model_selection.train_test_split"
],
[
"numpy.set_printoptions",
"pandas.read_csv",
"sklearn.metrics.r2_score",
"sklearn.model_selection.train_test_split"
],
[
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.manual_seed",
"torch.from_numpy",
"torch.distributions.Categorical",
"torch.no_grad",
"torch.argmax"
],
[
"numpy.array"
],
[
"numpy.sqrt",
"torch.from_numpy",
"numpy.prod",
"torch.cuda.device",
"numpy.array",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gavin971/pyro2
|
[
"55c6d98b9c5d9372badc703ad5deb4a9d2cb8b06"
] |
[
"multigrid/MG.py"
] |
[
"\"\"\"\nThe multigrid module provides a framework for solving elliptic\nproblems. A multigrid object is just a list of grids, from the finest\nmesh down (by factors of two) to a single interior zone (each grid has\nthe same number of guardcells).\n\nThe main multigrid class is setup to solve a constant-coefficient\nHelmholtz equation:\n\n(alpha - beta L) phi = f\n\nwhere L is the Laplacian and alpha and beta are constants. If alpha =\n0 and beta = -1, then this is the Poisson equation.\n\nWe support Dirichlet or Neumann BCs, or a periodic domain.\n\nThe general usage is as follows:\n\n> a = multigrid.CellCenterMG2d(nx, ny, verbose=1, alpha=alpha, beta=beta)\n\nthis creates the multigrid object a, with a finest grid of nx by ny\nzones and the default boundary condition types. alpha and beta are\nthe coefficients of the Helmholtz equation. Setting verbose = 1\ncausing debugging information to be output, so you can see the\nresidual errors in each of the V-cycles.\n\n> a.init_zeros()\n\nthis initializes the solution vector with zeros (this is not necessary\nif you just created the multigrid object, but it can be used to reset\nthe solution between runs on the same object).\n\n> a.init_RHS(zeros((nx, ny), numpy.float64))\n\nthis initializes the RHS on the finest grid to 0 (Laplace's equation).\nAny RHS can be set by passing through an array of (nx, ny) values here.\n\nThen to solve, you just do:\n\n> a.solve(rtol = 1.e-10)\n\nwhere rtol is the desired tolerance (residual norm / source norm)\n\nto access the final solution, use the getSolution method\n\nv = a.get_solution()\n\nFor convenience, the grid information on the solution level is available as\nattributes to the class,\n\na.ilo, a.ihi, a.jlo, a.jhi are the indices bounding the interior\nof the solution array (i.e. excluding the ghost cells).\n\na.x and a.y are the coordinate arrays\na.dx and a.dy are the grid spacings\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport math\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nimport mesh.boundary as bnd\nimport mesh.patch as patch\nfrom util import msg\n\n\nclass CellCenterMG2d(object):\n \"\"\"\n The main multigrid class for cell-centered data.\n\n We require that nx = ny be a power of 2 and dx = dy, for\n simplicity\n \"\"\"\n\n def __init__(self, nx, ny, ng=1,\n xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0,\n xl_BC_type=\"dirichlet\", xr_BC_type=\"dirichlet\",\n yl_BC_type=\"dirichlet\", yr_BC_type=\"dirichlet\",\n xl_BC=None, xr_BC=None,\n yl_BC=None, yr_BC=None,\n alpha=0.0, beta=-1.0,\n nsmooth=10, nsmooth_bottom=50,\n verbose=0,\n aux_field=None, aux_bc=None,\n true_function=None, vis=0, vis_title=\"\"):\n \"\"\"\n Create the CellCenterMG2d object. Note that this requires a\n grid to be a power of 2 in size and square.\n\n Parameters\n ----------\n nx : int\n number of cells in x-direction\n ny : int\n number of cells in y-direction.\n xmin : float, optional\n minimum physical coordinate in x-direction\n xmax : float, optional\n maximum physical coordinate in x-direction\n ymin : float, optional\n minimum physical coordinate in y-direction\n ymax : float, optional\n maximum physical coordinate in y-direction\n xl_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional\n boundary condition to enforce on lower x face\n xr_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional\n boundary condition to enforce on upper x face\n yl_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional\n boundary condition to enforce on lower y face\n yr_BC_type : {'neumann', 'dirichlet', 'periodic'}, optional\n boundary condition to enforce on upper y face\n xl_BC : function, optional\n function (of y) to call to get -x boundary values\n (homogeneous assumed otherwise)\n xr_BC : function, optional\n function (of y) to call to get +x boundary values\n (homogeneous assumed otherwise)\n yl_BC : function, optional\n function (of x) to call to get -y boundary values\n (homogeneous assumed otherwise)\n yr_BC : function, optional\n function (of x) to call to get +y boundary values\n (homogeneous assumed otherwise)\n alpha : float, optional\n coefficient in Helmholtz equation (alpha - beta L) phi = f\n beta : float, optional\n coefficient in Helmholtz equation (alpha - beta L) phi = f\n nsmooth : int, optional\n number of smoothing iterations to be done at each intermediate\n level in the V-cycle (up and down)\n nsmooth_bottom : int, optional\n number of smoothing iterations to be done during the bottom\n solve\n verbose : int, optional\n increase verbosity during the solve (for verbose=1)\n aux_field : list of str, optional\n extra fields to define and carry at each level.\n Useful for subclassing.\n aux_bc : list of BC objects, optional\n the boundary conditions corresponding to the aux fields\n true_function : function, optional\n a function (of x,y) that provides the exact solution to\n the elliptic problem we are solving. This is used only\n for visualization purposes\n vis : int, optional\n output a detailed visualization of every smoothing step\n all throughout the V-cycle (if vis=1)\n vis_title : string, optional\n a descriptive title to write on the visualization plots\n\n Returns\n -------\n out: CellCenterMG2d object\n\n \"\"\"\n\n if nx != ny:\n raise ValueError(\"ERROR: multigrid currently requires nx = ny\")\n\n self.nx = nx\n self.ny = ny\n\n self.ng = ng\n\n self.xmin = xmin\n self.xmax = xmax\n\n self.ymin = ymin\n self.ymax = ymax\n\n if (xmax-xmin) != (ymax-ymin):\n raise ValueError(\"ERROR: multigrid currently requires a square domain\")\n\n\n self.alpha = alpha\n self.beta = beta\n\n self.nsmooth = nsmooth\n self.nsmooth_bottom = nsmooth_bottom\n\n self.max_cycles = 100\n\n self.verbose = verbose\n\n # for visualization purposes, we can set a function name that\n # provides the true solution to our elliptic problem.\n if true_function is not None:\n self.true_function = true_function\n\n # a small number used in computing the error, so we don't divide by 0\n self.small = 1.e-16\n\n # keep track of whether we've initialized the RHS\n self.initialized_rhs = 0\n\n # assume that self.nx = 2^(nlevels-1) and that nx = ny\n # this defines nlevels such that we end exactly on a 2x2 grid\n self.nlevels = int(math.log(self.nx)/math.log(2.0))\n\n # a multigrid object will be a list of grids\n self.grids = []\n\n # create the grids. Here, self.grids[0] will be the coarsest\n # grid and self.grids[nlevel-1] will be the finest grid\n # we store the solution, v, the rhs, f.\n\n # create the boundary condition object\n bc = bnd.BC(xlb=xl_BC_type, xrb=xr_BC_type,\n ylb=yl_BC_type, yrb=yr_BC_type)\n\n nx_t = ny_t = 2\n\n for i in range(self.nlevels):\n\n # create the grid\n my_grid = patch.Grid2d(nx_t, ny_t, ng=self.ng,\n xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)\n\n # add a CellCenterData2d object for this level to our list\n self.grids.append(patch.CellCenterData2d(my_grid, dtype=np.float64))\n\n # create the phi BC object -- this only applies for the finest\n # level. On the coarser levels, phi represents the residual,\n # which has homogeneous BCs\n bc_p = bnd.BC(xlb=xl_BC_type, xrb=xr_BC_type,\n ylb=yl_BC_type, yrb=yr_BC_type,\n xl_func=xl_BC, xr_func=xr_BC,\n yl_func=yl_BC, yr_func=yr_BC, grid=my_grid)\n\n if i == self.nlevels-1:\n self.grids[i].register_var(\"v\", bc_p)\n else:\n self.grids[i].register_var(\"v\", bc)\n\n self.grids[i].register_var(\"f\", bc)\n self.grids[i].register_var(\"r\", bc)\n\n if aux_field is not None:\n for f, b in zip(aux_field, aux_bc):\n self.grids[i].register_var(f, b)\n\n self.grids[i].create()\n\n if self.verbose: print(self.grids[i])\n\n nx_t = nx_t*2\n ny_t = ny_t*2\n\n\n # provide coordinate and indexing information for the solution mesh\n soln_grid = self.grids[self.nlevels-1].grid\n\n self.ilo = soln_grid.ilo\n self.ihi = soln_grid.ihi\n self.jlo = soln_grid.jlo\n self.jhi = soln_grid.jhi\n\n self.x = soln_grid.x\n self.dx = soln_grid.dx\n self.x2d = soln_grid.x2d\n\n self.y = soln_grid.y\n self.dy = soln_grid.dy # note, dy = dx is assumed\n self.y2d = soln_grid.y2d\n\n self.soln_grid = soln_grid\n\n # store the source norm\n self.source_norm = 0.0\n\n # after solving, keep track of the number of cycles taken, the\n # relative error from the previous cycle, and the residual error\n # (normalized to the source norm)\n self.num_cycles = 0\n self.residual_error = 1.e33\n self.relative_error = 1.e33\n\n # keep track of where we are in the V\n self.current_cycle = -1\n self.current_level = -1\n self.up_or_down = \"\"\n\n # for visualization -- what frame are we outputting?\n self.vis = vis\n self.vis_title = vis_title\n self.frame = 0\n\n\n # these draw functions are for visualization purposes and are\n # not ordinarily used, except for plotting the progression of the\n # solution within the V\n def _draw_V(self):\n \"\"\" draw the V-cycle on our optional visualization \"\"\"\n xdown = np.linspace(0.0, 0.5, self.nlevels)\n xup = np.linspace(0.5, 1.0, self.nlevels)\n\n ydown = np.linspace(1.0, 0.0, self.nlevels)\n yup = np.linspace(0.0, 1.0, self.nlevels)\n\n plt.plot(xdown, ydown, lw=2, color=\"k\")\n plt.plot(xup, yup, lw=2, color=\"k\")\n\n plt.scatter(xdown, ydown, marker=\"o\", color=\"k\", s=40)\n plt.scatter(xup, yup, marker=\"o\", color=\"k\", s=40)\n\n if self.up_or_down == \"down\":\n plt.scatter(xdown[self.nlevels-self.current_level-1],\n ydown[self.nlevels-self.current_level-1],\n marker=\"o\", color=\"r\", zorder=100, s=38)\n\n else:\n plt.scatter(xup[self.current_level], yup[self.current_level],\n marker=\"o\", color=\"r\", zorder=100, s=38)\n\n plt.text(0.7, 0.1, \"V-cycle %d\" % (self.current_cycle))\n plt.axis(\"off\")\n\n\n def _draw_solution(self):\n \"\"\" plot the current solution on our optional visualization \"\"\"\n myg = self.grids[self.current_level].grid\n\n v = self.grids[self.current_level].get_var(\"v\")\n\n cm = \"viridis\"\n\n plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1]),\n interpolation=\"nearest\", origin=\"lower\",\n extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cm)\n\n #plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n\n\n if self.current_level == self.nlevels-1:\n plt.title(r\"solving $L\\phi = f$\")\n else:\n plt.title(r\"solving $Le = r$\")\n\n formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)\n cb = plt.colorbar(format=formatter, shrink=0.5)\n\n cb.ax.yaxis.offsetText.set_fontsize(\"small\")\n cl = plt.getp(cb.ax, 'ymajorticklabels')\n plt.setp(cl, fontsize=\"small\")\n\n\n def _draw_main_solution(self):\n \"\"\"\n plot the solution at the finest level on our optional\n visualization\n \"\"\"\n myg = self.grids[self.nlevels-1].grid\n\n v = self.grids[self.nlevels-1].get_var(\"v\")\n\n cm = \"viridis\"\n\n plt.imshow(np.transpose(v[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1]),\n interpolation=\"nearest\", origin=\"lower\",\n extent=[self.xmin, self.xmax, self.ymin, self.ymax], cmap=cm)\n\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(r\"current fine grid solution\")\n\n formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)\n cb = plt.colorbar(format=formatter, shrink=0.5)\n\n cb.ax.yaxis.offsetText.set_fontsize(\"small\")\n cl = plt.getp(cb.ax, 'ymajorticklabels')\n plt.setp(cl, fontsize=\"small\")\n\n\n def _draw_main_error(self):\n \"\"\"\n plot the error with respect to the true solution on our optional\n visualization\n \"\"\"\n myg = self.grids[self.nlevels-1].grid\n\n v = self.grids[self.nlevels-1].get_var(\"v\")\n\n e = v - self.true_function(myg.x2d, myg.y2d)\n\n cmap = \"viridis\"\n\n plt.imshow(np.transpose(e[myg.ilo:myg.ihi+1,myg.jlo:myg.jhi+1]),\n interpolation=\"nearest\", origin=\"lower\",\n extent=[self.xmin, self.xmax, self.ymin, self.ymax], cm=cmap)\n\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(r\"current fine grid error\")\n\n formatter = matplotlib.ticker.ScalarFormatter(useMathText=True)\n cb = plt.colorbar(format=formatter, shrink=0.5)\n\n cb.ax.yaxis.offsetText.set_fontsize(\"small\")\n cl = plt.getp(cb.ax, 'ymajorticklabels')\n plt.setp(cl, fontsize=\"small\")\n\n\n def grid_info(self, level, indent=0):\n \"\"\"\n Report simple grid information\n \"\"\"\n print(\"{}level: {}, grid: {} x {}\".format(\n indent*\" \", level, self.grids[level].grid.nx, self.grids[level].grid.ny))\n\n\n def get_solution(self, grid=None):\n \"\"\"\n Return the solution after doing the MG solve\n\n If a grid object is passed in, then the solution is put on that\n grid -- not the passed in grid must have the same dx and dy\n\n Returns\n -------\n out : ndarray\n\n \"\"\"\n\n v = self.grids[self.nlevels-1].get_var(\"v\")\n\n if grid is None:\n return v.copy()\n else:\n myg = self.soln_grid\n assert grid.dx == myg.dx and grid.dy == myg.dy\n\n sol = grid.scratch_array()\n sol.v(buf=1)[:,:] = v.v(buf=1)\n return sol\n\n\n def get_solution_gradient(self, grid=None):\n \"\"\"\n Return the gradient of the solution after doing the MG solve. The\n x- and y-components are returned in separate arrays.\n\n If a grid object is passed in, then the gradient is computed on that\n grid. Note: the passed-in grid must have the same dx, dy\n\n Returns\n -------\n out : ndarray, ndarray\n\n \"\"\"\n\n myg = self.soln_grid\n\n if grid is None:\n og = self.soln_grid\n else:\n og = grid\n assert og.dx == myg.dx and og.dy == myg.dy\n\n v = self.grids[self.nlevels-1].get_var(\"v\")\n\n gx = og.scratch_array()\n gy = og.scratch_array()\n\n gx.v()[:,:] = 0.5*(v.ip(1) - v.ip(-1))/myg.dx\n gy.v()[:,:] = 0.5*(v.jp(1) - v.jp(-1))/myg.dy\n\n return gx, gy\n\n\n def get_solution_object(self):\n \"\"\"\n Return the full solution data object at the finest resolution\n after doing the MG solve\n\n Returns\n -------\n out : CellCenterData2d object\n\n \"\"\"\n return self.grids[self.nlevels-1]\n\n\n def init_solution(self, data):\n \"\"\"\n Initialize the solution to the elliptic problem by passing in\n a value for all defined zones\n\n Parameters\n ----------\n data : ndarray\n An array (of the same size as the finest MG level) with the\n values to initialize the solution to the elliptic problem.\n\n \"\"\"\n v = self.grids[self.nlevels-1].get_var(\"v\")\n v[:,:] = data.copy()\n\n\n def init_zeros(self):\n \"\"\"\n Set the initial solution to zero\n \"\"\"\n v = self.grids[self.nlevels-1].get_var(\"v\")\n v[:,:] = 0.0\n\n\n def init_RHS(self, data):\n \"\"\"\n Initialize the right hand side, f, of the Helmholtz equation\n (alpha - beta L) phi = f\n\n Parameters\n ----------\n data : ndarray\n An array (of the same size as the finest MG level) with the\n values to initialize the solution to the elliptic problem.\n\n \"\"\"\n\n f = self.grids[self.nlevels-1].get_var(\"f\")\n f[:,:] = data.copy()\n\n # store the source norm\n self.source_norm = f.norm()\n\n if self.verbose:\n print(\"Source norm = \", self.source_norm)\n\n self.initialized_rhs = 1\n\n\n def _compute_residual(self, level):\n \"\"\" compute the residual and store it in the r variable\"\"\"\n\n v = self.grids[level].get_var(\"v\")\n f = self.grids[level].get_var(\"f\")\n r = self.grids[level].get_var(\"r\")\n\n myg = self.grids[level].grid\n\n # compute the residual\n # r = f - alpha phi + beta L phi\n r.v()[:,:] = f.v()[:,:] - self.alpha*v.v()[:,:] + \\\n self.beta*( (v.ip(-1) + v.ip(1) - 2*v.v())/myg.dx**2 +\n (v.jp(-1) + v.jp(1) - 2*v.v())/myg.dy**2)\n\n\n def smooth(self, level, nsmooth):\n \"\"\"\n Use red-black Gauss-Seidel iterations to smooth the solution\n at a given level. This is used at each stage of the V-cycle\n (up and down) in the MG solution, but it can also be called\n directly to solve the elliptic problem (although it will take\n many more iterations).\n\n Parameters\n ----------\n level : int\n The level in the MG hierarchy to smooth the solution\n nsmooth : int\n The number of r-b Gauss-Seidel smoothing iterations to perform\n\n \"\"\"\n v = self.grids[level].get_var(\"v\")\n f = self.grids[level].get_var(\"f\")\n\n myg = self.grids[level].grid\n\n self.grids[level].fill_BC(\"v\")\n\n xcoeff = self.beta/myg.dx**2\n ycoeff = self.beta/myg.dy**2\n\n # do red-black G-S\n for i in range(nsmooth):\n\n # do the red black updating in four decoupled groups\n #\n #\n # | | |\n # --+-------+-------+--\n # | | |\n # | 4 | 3 |\n # | | |\n # --+-------+-------+--\n # | | |\n # jlo | 1 | 2 |\n # | | |\n # --+-------+-------+--\n # | ilo | |\n #\n # groups 1 and 3 are done together, then we need to\n # fill ghost cells, and then groups 2 and 4\n\n for n, (ix, iy) in enumerate([(0, 0), (1, 1), (1, 0), (0, 1)]):\n\n v.ip_jp(ix, iy, s=2)[:,:] = (f.ip_jp(ix, iy, s=2) +\n xcoeff*(v.ip_jp(1+ix, iy, s=2) + v.ip_jp(-1+ix, iy, s=2)) +\n ycoeff*(v.ip_jp(ix, 1+iy, s=2) + v.ip_jp(ix, -1+iy, s=2)) )/ \\\n (self.alpha + 2.0*xcoeff + 2.0*ycoeff)\n\n if n == 1 or n == 3:\n self.grids[level].fill_BC(\"v\")\n\n\n if self.vis == 1:\n plt.clf()\n\n plt.subplot(221)\n self._draw_solution()\n\n plt.subplot(222)\n self._draw_V()\n\n plt.subplot(223)\n self._draw_main_solution()\n\n plt.subplot(224)\n self._draw_main_error()\n\n\n plt.suptitle(self.vis_title, fontsize=18)\n\n plt.pause(0.001)\n plt.draw()\n plt.savefig(\"mg_%4.4d.png\" % (self.frame))\n self.frame += 1\n\n\n def solve(self, rtol=1.e-11):\n \"\"\"\n The main driver for the multigrid solution of the Helmholtz\n equation. This controls the V-cycles, smoothing at each\n step of the way and uses simple smoothing at the coarsest\n level to perform the bottom solve.\n\n Parameters\n ----------\n rtol : float\n The relative tolerance (residual norm / source norm) to\n solve to. Note that if the source norm is 0 (e.g. the\n righthand side of our equation is 0), then we just use\n the norm of the residual.\n\n \"\"\"\n\n # start by making sure that we've initialized the RHS\n if not self.initialized_rhs:\n msg.fail(\"ERROR: RHS not initialized\")\n\n if self.verbose:\n print(\"source norm = \", self.source_norm)\n\n old_phi = self.grids[self.nlevels-1].get_var(\"v\").copy()\n\n residual_error = 1.e33\n cycle = 1\n\n # V-cycles until we achieve the L2 norm of the residual < rtol\n while residual_error > rtol and cycle <= self.max_cycles:\n\n self.current_cycle = cycle\n\n # zero out the solution on all but the finest grid\n for level in range(self.nlevels-1):\n self.grids[level].zero(\"v\")\n\n if self.verbose:\n print(\"<<< beginning V-cycle (cycle {}) >>>\\n\".format(cycle))\n\n # do V-cycles through the entire hierarchy\n level = self.nlevels-1\n self.v_cycle(level)\n\n # compute the error with respect to the previous solution\n # this is for diagnostic purposes only -- it is not used to\n # determine convergence\n soln = self.grids[self.nlevels-1]\n\n diff = (soln.get_var(\"v\") - old_phi)/(soln.get_var(\"v\") + self.small)\n relative_error = soln.grid.norm(diff)\n\n old_phi = soln.get_var(\"v\").copy()\n\n # compute the residual error, relative to the source norm\n self._compute_residual(self.nlevels-1)\n fp = self.grids[level]\n r = fp.get_var(\"r\")\n\n if self.source_norm != 0.0:\n residual_error = r.norm()/self.source_norm\n else:\n residual_error = r.norm()\n\n if self.verbose:\n print(\"cycle {}: relative err = {}, residual err = {}\\n\".format(\n cycle, relative_error, residual_error))\n\n cycle += 1\n\n self.num_cycles = cycle-1\n self.relative_error = relative_error\n self.residual_error = residual_error\n fp.fill_BC(\"v\")\n\n\n def v_cycle(self, level):\n \"\"\"\n Perform a V-cycle for a single 2-level solve. This is applied\n recursively do V-cycle through the entire hierarchy.\n\n \"\"\"\n\n if level > 0:\n\n self.current_level = level\n self.up_or_down = \"down\"\n\n # pointers to the fine and coarse data\n fp = self.grids[level]\n cp = self.grids[level-1]\n\n if self.verbose:\n self._compute_residual(level)\n self.grid_info(level, indent=2)\n print(\" before G-S, residual L2: {}\".format(fp.get_var(\"r\").norm()))\n\n # smooth on the current level\n self.smooth(level, self.nsmooth)\n\n # compute the residual\n self._compute_residual(level)\n\n if self.verbose:\n print(\" after G-S, residual L2: {}\\n\".format(fp.get_var(\"r\").norm()))\n\n # restrict the residual down to the RHS of the coarser level\n f_coarse = cp.get_var(\"f\")\n f_coarse.v()[:,:] = fp.restrict(\"r\").v()\n\n # solve the coarse problem\n self.v_cycle(level-1)\n\n # ascending part\n self.current_level = level\n self.up_or_down = \"up\"\n\n fp = self.grids[level]\n cp = self.grids[level-1]\n\n # prolong the error up from the coarse grid\n e = cp.prolong(\"v\")\n\n # correct the solution on the current grid\n v = fp.get_var(\"v\")\n v.v()[:,:] += e.v()\n\n fp.fill_BC(\"v\")\n\n if self.verbose:\n self._compute_residual(level)\n self.grid_info(level, indent=2)\n print(\" before G-S, residual L2: {}\".format(fp.get_var(\"r\").norm()))\n\n # smooth\n self.smooth(level, self.nsmooth)\n\n if self.verbose:\n self._compute_residual(level)\n print(\" after G-S, residual L2: {}\\n\".format(fp.get_var(\"r\").norm()))\n\n else:\n # bottom solve: solve the discrete coarse problem. We\n # could use any number of different matrix solvers here\n # (like CG), but since we are 2x2 by design at this point,\n # we will just smooth\n if self.verbose: print(\" bottom solve:\")\n\n self.current_level = level\n bp = self.grids[level]\n\n if self.verbose:\n self.grid_info(level, indent=2)\n print(\"\")\n\n self.smooth(level, self.nsmooth_bottom)\n\n bp.fill_BC(\"v\")\n"
] |
[
[
"numpy.linspace",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.pyplot.text",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.getp",
"numpy.transpose",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pause"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
i-m-vivek/iGibson
|
[
"c9009a3da4bfe05e8a3c058d83e0a5d3be0cd648",
"c9009a3da4bfe05e8a3c058d83e0a5d3be0cd648",
"c9009a3da4bfe05e8a3c058d83e0a5d3be0cd648",
"c9009a3da4bfe05e8a3c058d83e0a5d3be0cd648"
] |
[
"gibson2/examples/demo/igsdf_obj_viz.py",
"gibson2/utils/map_utils.py",
"gibson2/robots/turtlebot_robot.py",
"gibson2/robots/fetch_robot.py"
] |
[
"import cv2\nimport sys\nimport os\nimport numpy as np\nfrom gibson2.simulator import Simulator\nfrom gibson2.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer\nfrom gibson2.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings\nfrom gibson2.render.profiler import Profiler\n# from gibson2.utils.assets_utils import get_model_path\nfrom gibson2.objects.articulated_object import ArticulatedObject\nimport gibson2\nfrom PIL import Image\nimport pybullet as p\nimport subprocess\n\ndef load_obj_np(filename_obj, normalization=False, texture_size=4, load_texture=False,\n texture_wrapping='REPEAT', use_bilinear=True):\n \"\"\"Load Wavefront .obj file into numpy array\n This function only supports vertices (v x x x) and faces (f x x x).\n \"\"\"\n # load vertices\n vertices = []\n with open(filename_obj) as f:\n lines = f.readlines()\n\n for line in lines:\n if len(line.split()) == 0:\n continue\n if line.split()[0] == 'v':\n vertices.append([float(v) for v in line.split()[1:4]])\n vertices = np.vstack(vertices).astype(np.float32)\n\n # load faces\n faces = []\n for line in lines:\n if len(line.split()) == 0:\n continue\n if line.split()[0] == 'f':\n vs = line.split()[1:]\n nv = len(vs)\n v0 = int(vs[0].split('/')[0])\n for i in range(nv - 2):\n v1 = int(vs[i + 1].split('/')[0])\n v2 = int(vs[i + 2].split('/')[0])\n faces.append((v0, v1, v2))\n faces = np.vstack(faces).astype(np.int32) - 1\n\n # load textures\n textures = None\n\n assert load_texture is False # Since I commented out the block below\n # if load_texture:\n # for line in lines:\n # if line.startswith('mtllib'):\n # filename_mtl = os.path.join(os.path.dirname(filename_obj), line.split()[1])\n # textures = load_textures(filename_obj, filename_mtl, texture_size,\n # texture_wrapping=texture_wrapping,\n # use_bilinear=use_bilinear)\n # if textures is None:\n # raise Exception('Failed to load textures.')\n # textures = textures.cpu().numpy()\n\n assert normalization is False # Since I commented out the block below\n # # normalize into a unit cube centered zero\n # if normalization:\n # vertices -= vertices.min(0)[0][None, :]\n # vertices /= torch.abs(vertices).max()\n # vertices *= 2\n # vertices -= vertices.max(0)[0][None, :] / 2\n\n if load_texture:\n return vertices, faces, textures\n else:\n return vertices, faces\n\n\ndef main():\n global _mouse_ix, _mouse_iy, down, view_direction\n\n model_path = sys.argv[1]\n print(model_path)\n\n model_id = os.path.basename(model_path)\n category = os.path.basename(os.path.dirname(model_path))\n\n hdr_texture = os.path.join(\n gibson2.ig_dataset_path, 'scenes', 'background', \n 'photo_studio_01_2k.hdr')\n settings = MeshRendererSettings(env_texture_filename=hdr_texture,\n enable_shadow=True, msaa=True,\n light_dimming_factor=1.5)\n\n s = Simulator(mode='headless', \n image_width=1800, image_height=1200, \n vertical_fov=70, rendering_settings=settings\n )\n\n s.renderer.set_light_position_direction([0,0,10], [0,0,0])\n\n s.renderer.load_object('plane/plane_z_up_0.obj', scale=[3,3,3])\n s.renderer.add_instance(0)\n s.renderer.set_pose([0,0,-1.5,1, 0, 0.0, 0.0], -1)\n\n\n v = []\n mesh_path = os.path.join(model_path, 'shape/visual')\n for fn in os.listdir(mesh_path):\n if fn.endswith('obj'):\n vertices, faces = load_obj_np(os.path.join(mesh_path, fn))\n v.append(vertices)\n\n v = np.vstack(v)\n print(v.shape)\n xlen = np.max(v[:,0]) - np.min(v[:,0])\n ylen = np.max(v[:,1]) - np.min(v[:,1])\n zlen = np.max(v[:,2]) - np.min(v[:,2])\n scale = 1.5/(max([xlen, ylen, zlen]))\n center = np.mean(v, axis=0)\n centered_v = v - center\n\n center = (np.max(v, axis=0) + np.min(v, axis=0)) / 2.\n\n urdf_path = os.path.join(model_path, '{}.urdf'.format(model_id))\n print(urdf_path)\n obj = ArticulatedObject(filename=urdf_path, scale=scale)\n s.import_object(obj)\n obj.set_position(center)\n s.sync()\n print(s.renderer.visual_objects, s.renderer.instances)\n\n _mouse_ix, _mouse_iy = -1, -1\n down = False\n\n theta,r = 0,1.5\n\n px = r*np.sin(theta)\n py = r*np.cos(theta)\n pz = 1\n camera_pose = np.array([px, py, pz])\n s.renderer.set_camera(camera_pose, [0,0,0], [0, 0, 1])\n\n num_views = 6 \n save_dir = os.path.join(model_path, 'visualizations')\n for i in range(num_views):\n theta += np.pi*2/(num_views+1)\n obj.set_orientation([0., 0., 1.0, np.cos(theta/2)])\n s.sync()\n with Profiler('Render'):\n frame = s.renderer.render(modes=('rgb'))\n img = Image.fromarray((\n 255*np.concatenate(frame, axis=1)[:,:,:3]).astype(np.uint8))\n img.save(os.path.join(save_dir, '{:02d}.png'.format(i)))\n\n cmd = 'ffmpeg -framerate 2 -i {s}/%2d.png -y -r 16 -c:v libx264 -pix_fmt yuvj420p {s}/{m}.mp4'.format(s=save_dir,m=model_id)\n subprocess.call(cmd, shell=True)\n cmd = 'rm {}/??.png'.format(save_dir)\n subprocess.call(cmd, shell=True)\n\nif __name__ == '__main__':\n main()\n",
"import os\nimport numpy as np\nfrom tqdm import tqdm\nimport cv2\nfrom PIL import Image\nimport sys\nfrom scipy.spatial import ConvexHull\n\n\ndef get_xy_floors(vertices, faces, dist_threshold=-0.98):\n z_faces = []\n z = np.array([0, 0, 1])\n faces_selected = []\n for face in tqdm(faces):\n normal = np.cross(\n vertices[face[2]] - vertices[face[1]], vertices[face[1]] - vertices[face[0]])\n dist = np.dot(normal, z) / np.linalg.norm(normal)\n if (dist_threshold is None) or ((dist_threshold is not None) and (dist < dist_threshold)):\n z_faces.append(vertices[face[0]][2])\n faces_selected.append(face)\n\n return np.array(z_faces), vertices, faces_selected\n\n\ndef gen_trav_map(vertices, faces, output_folder, add_clutter=False,\n trav_map_filename_format='floor_trav_{}.png',\n obstacle_map_filename_format='floor_{}.png'):\n \"\"\"\n Generate traversability maps.\n \"\"\"\n floors = [0.0]\n\n z_faces, vertices, faces_selected = get_xy_floors(vertices, faces)\n z_faces_all, vertices_all, faces_selected_all = get_xy_floors(\n vertices, faces, dist_threshold=None)\n\n xmin, ymin, _ = vertices.min(axis=0)\n xmax, ymax, _ = vertices.max(axis=0)\n\n max_length = np.max([np.abs(xmin), np.abs(ymin),\n np.abs(xmax), np.abs(ymax)])\n max_length = np.ceil(max_length).astype(np.int)\n\n wall_maps = gen_map(vertices, faces, output_folder,\n img_filename_format=obstacle_map_filename_format)\n wall_pts = np.array(np.where(wall_maps[0] == 0)).T\n wall_convex_hull = ConvexHull(wall_pts)\n wall_map_hull = np.zeros(wall_maps[0].shape).astype(np.uint8)\n cv2.fillPoly(wall_map_hull, [wall_convex_hull.points[wall_convex_hull.vertices][:, ::-1].reshape((-1, 1, 2)).astype(\n np.int32)], 255)\n\n for i_floor in range(len(floors)):\n floor = floors[i_floor]\n mask = (np.abs(z_faces - floor) < 0.2)\n faces_new = np.array(faces_selected)[mask, :]\n\n t = (vertices[faces_new][:, :, :2] + max_length) * 100\n t = t.astype(np.int32)\n\n floor_map = np.zeros((2 * max_length * 100, 2 * max_length * 100))\n\n cv2.fillPoly(floor_map, t, 1)\n\n if add_clutter is True: # Build clutter map\n mask1 = ((z_faces_all - floor) < 2.0) * \\\n ((z_faces_all - floor) > 0.05)\n faces_new1 = np.array(faces_selected_all)[mask1, :]\n\n t1 = (vertices_all[faces_new1][:, :, :2] + max_length) * 100\n t1 = t1.astype(np.int32)\n\n clutter_map = np.zeros(\n (2 * max_length * 100, 2 * max_length * 100))\n cv2.fillPoly(clutter_map, t1, 1)\n floor_map = np.float32((clutter_map == 0) * (floor_map == 1))\n\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n erosion = cv2.dilate(floor_map, kernel, iterations=2)\n erosion = cv2.erode(erosion, kernel, iterations=2)\n wall_map = wall_maps[i_floor]\n wall_map = cv2.erode(wall_map, kernel, iterations=1)\n erosion[wall_map == 0] = 0\n erosion[wall_map_hull == 0] = 0 # crop using convex hull\n\n cur_img = Image.fromarray((erosion * 255).astype(np.uint8))\n #cur_img = Image.fromarray(np.flipud(cur_img))\n cur_img.save(os.path.join(\n output_folder, trav_map_filename_format.format(i_floor)))\n\n\nINTERSECT_EDGE = 0\nINTERSECT_VERTEX = 1\n\n\nclass Plane(object):\n def __init__(self, orig, normal):\n self.orig = orig\n self.n = normal / np.linalg.norm(normal)\n\n def __str__(self):\n return 'plane(o=%s, n=%s)' % (self.orig, self.n)\n\n\ndef point_to_plane_dist(p, plane):\n return np.dot((p - plane.orig), plane.n)\n\n\ndef compute_triangle_plane_intersections(vertices, faces, tid, plane, dists, dist_tol=1e-8):\n \"\"\"\n Compute the intersection between a triangle and a plane\n Returns a list of intersections in the form\n (INTERSECT_EDGE, <intersection point>, <edge>) for edges intersection\n (INTERSECT_VERTEX, <intersection point>, <vertex index>) for vertices\n This return between 0 and 2 intersections :\n - 0 : the plane does not intersect the plane\n - 1 : one of the triangle's vertices lies on the plane (so it just\n \"touches\" the plane without really intersecting)\n - 2 : the plane slice the triangle in two parts (either vertex-edge,\n vertex-vertex or edge-edge)\n \"\"\"\n\n # TODO: Use an edge intersection cache (we currently compute each edge\n # intersection twice : once for each tri)\n\n # This is to avoid registering the same vertex intersection twice\n # from two different edges\n vert_intersect = {vid: False for vid in faces[tid]}\n\n # Iterate through the edges, cutting the ones that intersect\n intersections = []\n for e in ((faces[tid][0], faces[tid][1]),\n (faces[tid][0], faces[tid][2]),\n (faces[tid][1], faces[tid][2])):\n v1 = vertices[e[0]]\n d1 = dists[e[0]]\n v2 = vertices[e[1]]\n d2 = dists[e[1]]\n\n if np.fabs(d1) < dist_tol:\n # Avoid creating the vertex intersection twice\n if not vert_intersect[e[0]]:\n # point on plane\n intersections.append((INTERSECT_VERTEX, v1, e[0]))\n vert_intersect[e[0]] = True\n if np.fabs(d2) < dist_tol:\n if not vert_intersect[e[1]]:\n # point on plane\n intersections.append((INTERSECT_VERTEX, v2, e[1]))\n vert_intersect[e[1]] = True\n\n # If vertices are on opposite sides of the plane, we have an edge\n # intersection\n if d1 * d2 < 0:\n # Due to numerical accuracy, we could have both a vertex intersect\n # and an edge intersect on the same vertex, which is impossible\n if not vert_intersect[e[0]] and not vert_intersect[e[1]]:\n # intersection factor (between 0 and 1)\n # here is a nice drawing :\n # https://ravehgonen.files.wordpress.com/2013/02/slide8.png\n # keep in mind d1, d2 are *signed* distances (=> d1 - d2)\n s = d1 / (d1 - d2)\n vdir = v2 - v1\n ipos = v1 + vdir * s\n intersections.append((INTERSECT_EDGE, ipos, e))\n\n return intersections\n\n\ndef gen_map(vertices, faces, output_folder, img_filename_format='floor_{}.png'):\n xmin, ymin, _ = vertices.min(axis=0)\n xmax, ymax, _ = vertices.max(axis=0)\n\n max_length = np.max([np.abs(xmin), np.abs(ymin),\n np.abs(xmax), np.abs(ymax)])\n max_length = np.ceil(max_length).astype(np.int)\n\n floors = [0.0]\n print(floors)\n\n floor_maps = []\n\n for i_floor, floor in enumerate(floors):\n dists = []\n z = float(floor) + 0.5\n cross_section = []\n plane = Plane(np.array([0, 0, z]), np.array([0, 0, 1]))\n\n for v in vertices:\n dists.append(point_to_plane_dist(v, plane))\n\n for i in tqdm(range(len(faces))):\n res = compute_triangle_plane_intersections(vertices, faces,\n i, plane, dists)\n if len(res) == 2:\n cross_section.append((res[0][1], res[1][1]))\n\n floor_map = np.ones((2 * max_length * 100, 2 * max_length * 100))\n\n for item in cross_section:\n x1, x2 = (item[0][0]+max_length) * \\\n 100, (item[1][0]+max_length) * 100\n y1, y2 = (item[0][1]+max_length) * \\\n 100, (item[1][1]+max_length) * 100\n\n cv2.line(floor_map, (int(x1), int(y1)),\n (int(x2), int(y2)), color=(0, 0, 0), thickness=2)\n\n floor_maps.append(floor_map)\n cur_img = Image.fromarray((floor_map * 255).astype(np.uint8))\n #cur_img = Image.fromarray(np.flipud(cur_img))\n img_filename = img_filename_format.format(i_floor)\n cur_img.save(os.path.join(output_folder, img_filename))\n\n return floor_maps\n",
"import gym\nimport numpy as np\n\nfrom gibson2.robots.robot_locomotor import LocomotorRobot\n\n\nclass Turtlebot(LocomotorRobot):\n \"\"\"\n Turtlebot robot\n Reference: http://wiki.ros.org/Robots/TurtleBot\n Uses joint velocity control\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.velocity = config.get(\"velocity\", 1.0)\n LocomotorRobot.__init__(self,\n \"turtlebot/turtlebot.urdf\",\n action_dim=2,\n scale=config.get(\"robot_scale\", 1.0),\n is_discrete=config.get(\"is_discrete\", False),\n control=\"velocity\")\n\n def set_up_continuous_action_space(self):\n \"\"\"\n Set up continuous action space\n \"\"\"\n self.action_space = gym.spaces.Box(shape=(self.action_dim,),\n low=-1.0,\n high=1.0,\n dtype=np.float32)\n self.action_high = np.full(\n shape=self.action_dim, fill_value=self.velocity)\n self.action_low = -self.action_high\n\n def set_up_discrete_action_space(self):\n \"\"\"\n Set up discrete action space\n \"\"\"\n self.action_list = [[self.velocity, self.velocity], [-self.velocity, -self.velocity],\n [self.velocity * 0.5, -self.velocity * 0.5],\n [-self.velocity * 0.5, self.velocity * 0.5], [0, 0]]\n self.action_space = gym.spaces.Discrete(len(self.action_list))\n self.setup_keys_to_action()\n\n def setup_keys_to_action(self):\n self.keys_to_action = {\n (ord('w'),): 0, # forward\n (ord('s'),): 1, # backward\n (ord('d'),): 2, # turn right\n (ord('a'),): 3, # turn left\n (): 4 # stay still\n }\n",
"import gym\nimport numpy as np\nimport pybullet as p\n\nfrom gibson2.external.pybullet_tools.utils import joints_from_names, set_joint_positions\nfrom gibson2.robots.robot_locomotor import LocomotorRobot\n\n\nclass Fetch(LocomotorRobot):\n \"\"\"\n Fetch Robot\n Reference: https://fetchrobotics.com/robotics-platforms/fetch-mobile-manipulator/\n Uses joint velocity control\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.wheel_velocity = config.get('wheel_velocity', 1.0)\n self.torso_lift_velocity = config.get('torso_lift_velocity', 1.0)\n self.arm_velocity = config.get('arm_velocity', 1.0)\n self.wheel_dim = 2\n self.torso_lift_dim = 1\n self.arm_dim = 7\n LocomotorRobot.__init__(self,\n \"fetch/fetch.urdf\",\n action_dim=self.wheel_dim + self.torso_lift_dim + self.arm_dim,\n scale=config.get(\"robot_scale\", 1.0),\n is_discrete=config.get(\"is_discrete\", False),\n control=\"velocity\",\n self_collision=True)\n\n def set_up_continuous_action_space(self):\n \"\"\"\n Set up continuous action space\n \"\"\"\n self.action_high = np.array([self.wheel_velocity] * self.wheel_dim +\n [self.torso_lift_velocity] * self.torso_lift_dim +\n [self.arm_velocity] * self.arm_dim)\n self.action_low = -self.action_high\n self.action_space = gym.spaces.Box(shape=(self.action_dim,),\n low=-1.0,\n high=1.0,\n dtype=np.float32)\n\n def set_up_discrete_action_space(self):\n \"\"\"\n Set up discrete action space\n \"\"\"\n assert False, \"Fetch does not support discrete actions\"\n\n def robot_specific_reset(self):\n \"\"\"\n Fetch robot specific reset.\n Reset the torso lift joint and tuck the arm towards the body\n \"\"\"\n super(Fetch, self).robot_specific_reset()\n\n # roll the arm to its body\n robot_id = self.robot_ids[0]\n arm_joints = joints_from_names(robot_id,\n [\n 'torso_lift_joint',\n 'shoulder_pan_joint',\n 'shoulder_lift_joint',\n 'upperarm_roll_joint',\n 'elbow_flex_joint',\n 'forearm_roll_joint',\n 'wrist_flex_joint',\n 'wrist_roll_joint'\n ])\n\n rest_position = (0.02, np.pi / 2.0 - 0.4, np.pi / 2.0 -\n 0.1, -0.4, np.pi / 2.0 + 0.1, 0.0, np.pi / 2.0, 0.0)\n # might be a better pose to initiate manipulation\n # rest_position = (0.30322468280792236, -1.414019864768982,\n # 1.5178184935241699, 0.8189625336474915,\n # 2.200358942909668, 2.9631312579803466,\n # -1.2862852996643066, 0.0008453550418615341)\n\n set_joint_positions(robot_id, arm_joints, rest_position)\n\n def get_end_effector_position(self):\n \"\"\"\n Get end-effector position\n \"\"\"\n return self.parts['gripper_link'].get_position()\n\n def end_effector_part_index(self):\n \"\"\"\n Get end-effector link id\n \"\"\"\n return self.parts['gripper_link'].body_part_index\n\n def load(self):\n \"\"\"\n Load the robot into pybullet. Filter out unnecessary self collision\n due to modeling imperfection in the URDF\n \"\"\"\n ids = super(Fetch, self).load()\n robot_id = self.robot_ids[0]\n\n disable_collision_names = [\n ['torso_lift_joint', 'shoulder_lift_joint'],\n ['torso_lift_joint', 'torso_fixed_joint'],\n ['caster_wheel_joint', 'estop_joint'],\n ['caster_wheel_joint', 'laser_joint'],\n ['caster_wheel_joint', 'torso_fixed_joint'],\n ['caster_wheel_joint', 'l_wheel_joint'],\n ['caster_wheel_joint', 'r_wheel_joint'],\n ]\n for names in disable_collision_names:\n link_a, link_b = joints_from_names(robot_id, names)\n p.setCollisionFilterPair(robot_id, robot_id, link_a, link_b, 0)\n\n return ids\n"
] |
[
[
"numpy.min",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.vstack"
],
[
"numpy.dot",
"numpy.abs",
"scipy.spatial.ConvexHull",
"numpy.linalg.norm",
"numpy.ones",
"numpy.ceil",
"numpy.float32",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.fabs"
],
[
"numpy.full"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gmamaladze/robo-pi
|
[
"f9affc63760774073a3b1de4e4ea064bde2eb074"
] |
[
"tfvoicepi/tools/play.py"
] |
[
"import pyaudio\nimport numpy as np\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 16000\nCHUNK_SIZE = 1000\nMAX_INT16 = np.iinfo(np.int16).max\n\np = pyaudio.PyAudio()\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n output=True)\n\nfor i in range(0, 18):\n print(i)\n f = open(str(i) + \".raw\", \"rb\")\n with f:\n data = f.read()\n data_float = np.frombuffer(data, dtype=np.float)\n data_scaled = data_float * MAX_INT16\n data_int = data_scaled.astype(int)\n buff = memoryview(data_int).tobytes()\n stream.write(buff)\n\nstream.stop_stream()\nstream.close()\n\np.terminate()\n"
] |
[
[
"numpy.frombuffer",
"numpy.iinfo"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RuthAngus/kinematics-and-rotation
|
[
"7cad283612bc70ca9d12c79978561b938f527198"
] |
[
"code/test_dispersion.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom dispersion import *\nimport scipy.stats as sps\nimport pandas as pd\n\n\ndef test_dispersion():\n np.random.seed(42)\n N = 10000\n x = np.random.uniform(0, 100, N)\n y = np.random.randn(N)*x\n inds = np.argsort(x)\n x, y = x[inds], y[inds]\n\n # Running dispersion\n newx, d = running_dispersion(x, y, 100, mad=False)\n\n AT = np.vstack((newx, np.ones_like(newx)))\n ATA = np.dot(AT, AT.T)\n w = np.linalg.solve(ATA, np.dot(AT, d))\n\n # Binned dispersion\n bins, dbins, err, mean = binned_dispersion(x, y, 10, method=\"rms\")\n\n # Dispersion where you define the bins.\n db, k = dispersion(x, y, bins, method=\"std\")\n\n plt.figure(figsize=(10, 6))\n plt.plot(x, y, \".\")\n plt.plot(x, x, zorder=3, label=\"True dispersion\")\n plt.plot(newx, d, \".\", label=\"running\")\n plt.plot(x, w[0]*x + w[1], label=\"fit to running\")\n plt.plot(np.diff(bins)*.5+bins[:-1], db, \"k*\", label=\"pre-set bins\",\n zorder=10)\n plt.title(\"TESTING DISPERSION\")\n plt.errorbar(bins, dbins, yerr=err, fmt=\"wo\", markeredgecolor=\"k\",\n label=\"RMS\")\n plt.legend()\n plt.savefig(\"dispersion_test\")\n\n assert np.isclose(w[0], 1, atol=.1)\n\n\ndef test_MC():\n np.random.seed(42)\n N = 10000\n x = np.random.uniform(0, 100, N)\n y = np.random.randn(N)*x\n xerr, yerr = 5, 10\n xerrs = np.random.randn(N)*xerr\n yerrs = np.random.randn(N)*yerr\n bins = np.linspace(0, 100, 10)\n d, d_err, k, k_err = MC_dispersion(x+xerrs, y+yerrs, xerrs, yerrs, bins,\n 100, method=\"std\")\n d2, d2_err, k2, k2_err = MC_dispersion(x+xerrs, y+yerrs, xerrs, yerrs,\n bins, 100, method=\"mad\")\n plt.plot(x, y, \".\")\n print(np.shape(d))\n plt.plot(bins[:-1], d)\n plt.plot(bins[:-1], d2)\n plt.savefig(\"test\")\n\n\ndef test_sigma_clip():\n np.random.seed(42)\n\n x1 = np.random.randn(1000)\n x2 = np.random.randn(100)*5\n x = np.concatenate((x1, x2))\n np.random.shuffle(x)\n\n print(sps.kurtosis(x))\n\n plt.plot(np.arange(1100), x, \".\")\n xnew, m = sigma_clip(x, 3)\n\n print(sps.kurtosis(xnew))\n\n # plt.plot(np.arange(1100)[m], xnew, \".\", alpha=.5)\n # plt.savefig(\"test\")\n\n\ndef test_select_stars():\n df = pd.DataFrame(dict({\"A\": np.arange(10), \"B\": np.arange(10, 20)}))\n ms = select_stars(df, [1, 5, 8], \"A\")\n assert np.all(df.A.values[ms[0]] > 1)\n assert np.all(df.A.values[ms[0]] < 5)\n assert np.all(df.A.values[ms[1]] < 8)\n assert np.all(df.A.values[ms[1]] > 5)\n\n\ndef test_fit_line():\n w_true = [15, .2]\n\n n = 100\n x = np.linspace(0, 100, n)\n\n err = .5\n yerr = np.ones_like(x)*err\n np.random.seed(42)\n y = w_true[0] + w_true[1]*x + np.random.randn(n)*err\n\n w, wvar = fit_line(x, y, yerr)\n\n assert np.isclose(w[0], 15, atol=1*np.sqrt(wvar[0, 0]))\n assert np.isclose(w[1], .2, atol=1*np.sqrt(wvar[1, 1]))\n\n\ndef test_err_to_log10_err():\n value = 20\n err = .1\n assert np.isclose(10**(np.log10(value) + err_to_log10_err(value, err)),\n value + err, atol=.01*value)\n\n\ndef test_tan_dist():\n x1, y1 = 1, 1\n x2, y2 = 2, 2\n assert tan_dist(x1, y1, x2, y2) == np.sqrt(2)\n\n\ndef test_n_nearest_points():\n x1, y1 = 10, 12\n np.random.seed(42)\n x2, y2 = [np.random.randn(1000) + 10 for i in range(2)]\n z2 = np.random.randn(1000)*y2\n nx, ny, nz = n_nearest_points(x1, y1, x2, y2, z2, 50)\n\n\ndef test_make_bin():\n np.random.seed(42)\n x, y, z = [np.random.randn(1000) + 10 for i in range(3)]\n bx, by, bz = make_bin(10, 10, x, y, z, 1, 1)\n plt.plot(x, y, \".\")\n plt.plot(bx, by, \".\")\n\n\ndef test_calc_dispersion():\n x2, y2 = [np.random.randn(1000) + 10 for i in range(2)]\n z2 = np.random.randn(1000)*y2\n\n dispersions_nearest = calc_dispersion_nearest(x2, y2, z2, 100);\n dispersions_bins = calc_dispersion_bins(x2, y2, z2, .5, .5);\n\n return dispersions_nearest, dispersions_bins\n\n\nif __name__ == \"__main__\":\n # test_dispersion()\n # test_MC()\n # test_sigma_clip()\n # test_select_stars()\n # test_fit_line()\n test_err_to_log10_err()\n test_tan_dist()\n test_n_nearest_points()\n test_make_bin()\n test_calc_dispersion()\n\n\n"
] |
[
[
"numpy.dot",
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.all",
"numpy.random.randn",
"numpy.ones_like",
"numpy.arange",
"numpy.diff",
"matplotlib.pyplot.errorbar",
"numpy.isclose",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.log10",
"scipy.stats.kurtosis",
"numpy.argsort",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.shape",
"numpy.random.uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
dreamer2368/mirgecom
|
[
"dc79645af040510a7e2b11d3f93db4c34ad39228"
] |
[
"examples/wave-mpi.py"
] |
[
"\"\"\"Demonstrate wave MPI example.\"\"\"\n\n__copyright__ = \"Copyright (C) 2020 University of Illinois Board of Trustees\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\nimport logging\n\nimport numpy as np\nimport numpy.linalg as la # noqa\nimport pyopencl as cl\n\nfrom pytools.obj_array import flat_obj_array\n\nfrom meshmode.array_context import (PyOpenCLArrayContext,\n PytatoPyOpenCLArrayContext)\nfrom arraycontext import thaw, freeze\n\nfrom mirgecom.profiling import PyOpenCLProfilingArrayContext # noqa\n\nfrom meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa\n\nfrom grudge.eager import EagerDGDiscretization\nfrom grudge.shortcuts import make_visualizer\nfrom mirgecom.mpi import mpi_entry_point\nfrom mirgecom.integrators import rk4_step\nfrom mirgecom.wave import wave_operator\n\nimport pyopencl.tools as cl_tools\n\nfrom logpyle import IntervalTimer, set_dt\n\nfrom mirgecom.logging_quantities import (initialize_logmgr,\n logmgr_add_cl_device_info,\n logmgr_add_device_memory_usage)\n\n\ndef bump(actx, discr, t=0):\n \"\"\"Create a bump.\"\"\"\n source_center = np.array([0.2, 0.35, 0.1])[:discr.dim]\n source_width = 0.05\n source_omega = 3\n\n nodes = thaw(discr.nodes(), actx)\n center_dist = flat_obj_array([\n nodes[i] - source_center[i]\n for i in range(discr.dim)\n ])\n\n return (\n np.cos(source_omega*t)\n * actx.np.exp(\n -np.dot(center_dist, center_dist)\n / source_width**2))\n\n\n@mpi_entry_point\ndef main(snapshot_pattern=\"wave-mpi-{step:04d}-{rank:04d}.pkl\", restart_step=None,\n use_profiling=False, use_logmgr=False, actx_class=PyOpenCLArrayContext):\n \"\"\"Drive the example.\"\"\"\n cl_ctx = cl.create_some_context()\n queue = cl.CommandQueue(cl_ctx)\n\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n num_parts = comm.Get_size()\n\n logmgr = initialize_logmgr(use_logmgr,\n filename=\"wave-mpi.sqlite\", mode=\"wu\", mpi_comm=comm)\n if use_profiling:\n queue = cl.CommandQueue(cl_ctx,\n properties=cl.command_queue_properties.PROFILING_ENABLE)\n actx = actx_class(queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),\n logmgr=logmgr)\n else:\n queue = cl.CommandQueue(cl_ctx)\n actx = actx_class(queue,\n allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))\n\n if restart_step is None:\n\n from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis\n mesh_dist = MPIMeshDistributor(comm)\n\n dim = 2\n nel_1d = 16\n\n if mesh_dist.is_mananger_rank():\n from meshmode.mesh.generation import generate_regular_rect_mesh\n mesh = generate_regular_rect_mesh(\n a=(-0.5,)*dim, b=(0.5,)*dim,\n nelements_per_axis=(nel_1d,)*dim)\n\n print(\"%d elements\" % mesh.nelements)\n part_per_element = get_partition_by_pymetis(mesh, num_parts)\n local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)\n\n del mesh\n\n else:\n local_mesh = mesh_dist.receive_mesh_part()\n\n fields = None\n\n else:\n from mirgecom.restart import read_restart_data\n restart_data = read_restart_data(\n actx, snapshot_pattern.format(step=restart_step, rank=rank)\n )\n local_mesh = restart_data[\"local_mesh\"]\n nel_1d = restart_data[\"nel_1d\"]\n assert comm.Get_size() == restart_data[\"num_parts\"]\n\n order = 3\n\n discr = EagerDGDiscretization(actx, local_mesh, order=order,\n mpi_communicator=comm)\n\n current_cfl = 0.485\n wave_speed = 1.0\n from grudge.dt_utils import characteristic_lengthscales\n nodal_dt = characteristic_lengthscales(actx, discr) / wave_speed\n\n from grudge.op import nodal_min\n dt = actx.to_numpy(current_cfl * nodal_min(discr, \"vol\", nodal_dt))[()]\n\n t_final = 1\n\n if restart_step is None:\n t = 0\n istep = 0\n\n fields = flat_obj_array(\n bump(actx, discr),\n [discr.zeros(actx) for i in range(discr.dim)]\n )\n\n else:\n t = restart_data[\"t\"]\n istep = restart_step\n assert istep == restart_step\n restart_fields = restart_data[\"fields\"]\n old_order = restart_data[\"order\"]\n if old_order != order:\n old_discr = EagerDGDiscretization(actx, local_mesh, order=old_order,\n mpi_communicator=comm)\n from meshmode.discretization.connection import make_same_mesh_connection\n connection = make_same_mesh_connection(actx, discr.discr_from_dd(\"vol\"),\n old_discr.discr_from_dd(\"vol\"))\n fields = connection(restart_fields)\n else:\n fields = restart_fields\n\n if logmgr:\n logmgr_add_cl_device_info(logmgr, queue)\n logmgr_add_device_memory_usage(logmgr, queue)\n\n logmgr.add_watches([\"step.max\", \"t_step.max\", \"t_log.max\"])\n\n try:\n logmgr.add_watches([\"memory_usage_python.max\", \"memory_usage_gpu.max\"])\n except KeyError:\n pass\n\n if use_profiling:\n logmgr.add_watches([\"multiply_time.max\"])\n\n vis_timer = IntervalTimer(\"t_vis\", \"Time spent visualizing\")\n logmgr.add_quantity(vis_timer)\n\n vis = make_visualizer(discr)\n\n def rhs(t, w):\n return wave_operator(discr, c=wave_speed, w=w)\n\n compiled_rhs = actx.compile(rhs)\n\n while t < t_final:\n if logmgr:\n logmgr.tick_before()\n\n # restart must happen at beginning of step\n if istep % 100 == 0 and (\n # Do not overwrite the restart file that we just read.\n istep != restart_step):\n from mirgecom.restart import write_restart_file\n write_restart_file(\n actx, restart_data={\n \"local_mesh\": local_mesh,\n \"order\": order,\n \"fields\": fields,\n \"t\": t,\n \"step\": istep,\n \"nel_1d\": nel_1d,\n \"num_parts\": num_parts},\n filename=snapshot_pattern.format(step=istep, rank=rank),\n comm=comm\n )\n\n if istep % 10 == 0:\n print(istep, t, actx.to_numpy(discr.norm(fields[0])))\n vis.write_parallel_vtk_file(\n comm,\n \"fld-wave-mpi-%03d-%04d.vtu\" % (rank, istep),\n [\n (\"u\", fields[0]),\n (\"v\", fields[1:]),\n ], overwrite=True\n )\n\n fields = thaw(freeze(fields, actx), actx)\n fields = rk4_step(fields, t, dt, compiled_rhs)\n\n t += dt\n istep += 1\n\n if logmgr:\n set_dt(logmgr, dt)\n logmgr.tick_after()\n\n final_soln = actx.to_numpy(discr.norm(fields[0]))\n assert np.abs(final_soln - 0.04409852463947439) < 1e-14\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format=\"%(message)s\", level=logging.INFO)\n # Turn off profiling to not overwhelm CI\n use_profiling = False\n use_logging = True\n\n import argparse\n parser = argparse.ArgumentParser(description=\"Wave (MPI version)\")\n parser.add_argument(\"--lazy\", action=\"store_true\",\n help=\"switch to a lazy computation mode\")\n args = parser.parse_args()\n\n main(use_profiling=use_profiling, use_logmgr=use_logging,\n actx_class=PytatoPyOpenCLArrayContext if args.lazy\n else PyOpenCLArrayContext)\n\n\n# vim: foldmethod=marker\n"
] |
[
[
"numpy.abs",
"numpy.dot",
"numpy.array",
"numpy.cos"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aligholamee/Patterns
|
[
"74ead5de22988b5e3c86464d192899453b13d26e"
] |
[
"docs/assignment-4/src/5/knn_plot.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn import neighbors\n\nn_neighbors = 1\n\n\nX = np.array([\n [2, 4],\n [4, 2],\n [4, 6],\n [6, 4],\n [4, 4],\n [6, 2]\n])\n\ny = np.array([\n [0],\n [0],\n [0],\n [0],\n [1],\n [1]\n])\n\nh = .02 # step size in the mesh\n\n# Create color maps\ncmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])\ncmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\nfor weights in ['uniform', 'distance']:\n # we create an instance of Neighbours Classifier and fit the data.\n clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)\n clf.fit(X, y)\n\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure()\n plt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], cmap=cmap_bold)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.title(\"2-Class classification (k = %i, weights = '%s')\"\n % (n_neighbors, weights))\n\nplt.show()"
] |
[
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.arange",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.pcolormesh",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sebzap/CarlaRL
|
[
"5283d15dee9e8dc5e728314d56875b4fbca3acb2"
] |
[
"carla/agents/common.py"
] |
[
"\n\nimport random\nimport time\n\nimport numpy as np\n\nfrom carla.agents.core import ActorCritic\nfrom carla.agents.utils.dummy_vec_env import DummyVecEnv\nfrom carla.agents.utils.pytorch_utils import dict_obs_to_tensor, merge_dict_obs_list, merge_list_of_dicts\nfrom carla.agents.utils.subproc_vec_env import SubprocVecEnv\nfrom carla.agents.utils.weight_init import *\n\n\ndef logging(log_writer, logger, log_interval, epoch, steps_per_epoch, start_time):\n\n # log info to tensorboard\n log_writer.add_scalar(f'stats/EpRet', np.mean(logger.epoch_dict['EpRet']), epoch)\n log_writer.add_scalar(f'stats/EpLen', np.mean(logger.epoch_dict['EpLen']), epoch)\n log_writer.add_scalar(f'stats/LossPi', np.mean(logger.epoch_dict['LossPi']), epoch)\n log_writer.add_scalar(f'stats/LossV', np.mean(logger.epoch_dict['LossV']), epoch)\n log_writer.add_scalar(f'stats/Entropy', np.mean(logger.epoch_dict['Entropy']), epoch)\n\n # Log info about epoch\n logger.log_tabular('Epoch', epoch)\n logger.log_tabular('EpRet', with_min_and_max=True)\n logger.log_tabular('EpLen', average_only=True)\n logger.log_tabular('TotalEnvInteracts', (epoch + 1) * steps_per_epoch)\n\n # log context statistics\n for key in logger.epoch_dict.keys():\n if \"EpRet_\" in key and len(logger.epoch_dict[key]) > 0:\n context = key.split(\"_\")[-1]\n\n if epoch % log_interval == 0:\n log_writer.add_scalar(f'context_{context}/EpRet', np.mean(logger.epoch_dict[f\"EpRet_{context}\"]), epoch)\n log_writer.add_scalar(f'context_{context}/EpLen', np.mean(logger.epoch_dict[f\"EpLen_{context}\"]), epoch)\n log_writer.add_scalar(f'context_{context}/NObs', np.mean(logger.epoch_dict[f\"NObs_{context}\"]), epoch)\n log_writer.add_scalar(f'context_{context}/NGoodies', np.mean(logger.epoch_dict[f\"NGoodies_{context}\"]),\n epoch)\n log_writer.add_scalar(f'context_{context}/GoalReached',\n np.mean(logger.epoch_dict[f\"GoalReached_{context}\"]), epoch)\n\n logger.log_tabular('Time', time.time() - start_time)\n\n log_stats = logger.log_current_row\n print(f'Epoch: {epoch} | Avg. Ep. Return: {log_stats[\"AverageEpRet\"]:.4f} '\n f'| Avg. Ep. Length: {log_stats[\"EpLen\"]:.4f} | Time Passed: {log_stats[\"Time\"]:.4f}')\n\n logger.dump_tabular(print_to_terminal=False)\n\n\ndef collect_epoch_data(ac, env, initial_obs, buf, local_steps_per_epoch, obs_space, device,\n logger, n_proc, ep_ret, ep_len, max_ep_len, vae_buffer=None):\n\n # make sure agent is in eval mode, and for case of pretrained VAE, weights of encoder are frozen and in eval model.\n ac.eval()\n ac.freeze_context_net()\n ac.set_eval_context_net()\n\n o = initial_obs\n for t in range(local_steps_per_epoch):\n o = merge_dict_obs_list(o, obs_space)\n a, v, logp = ac.step(dict_obs_to_tensor(o, device=device))\n\n next_o, r, d, info = env.step(a)\n ep_ret += r\n ep_len += 1\n info = merge_list_of_dicts(info)\n\n # save and log\n buf.store(o, a, r, v, logp)\n logger.store(VVals=v)\n\n if vae_buffer is not None:\n vae_buffer.store(o)\n\n # Update obs (critical!)\n o = next_o\n for proc_idx in range(n_proc):\n timeout = ep_len[proc_idx] == max_ep_len\n terminal = d[proc_idx] or timeout\n epoch_ended = t == local_steps_per_epoch - 1\n\n if terminal or epoch_ended:\n # if trajectory didn't reach terminal state, bootstrap value target\n if timeout or epoch_ended:\n\n if n_proc > 1:\n # in case of more then one processes it should be wrapped as a list\n step_o = [o[proc_idx]]\n else:\n step_o = o\n\n _, v, _ = ac.step(dict_obs_to_tensor(merge_dict_obs_list(step_o, obs_space), device=device))\n v = v[0] # index 0 to get v as a single number\n else:\n v = 0\n buf.finish_path(proc_idx, v)\n if terminal:\n # only save EpRet / EpLen if trajectory finished\n logger.store(EpRet=ep_ret[proc_idx], EpLen=ep_len[proc_idx])\n\n # log context specific statistics\n context_id = info['context'][proc_idx]\n obstacles = info['obstacles'][proc_idx]\n goodies = info['goodies'][proc_idx]\n goal = info['goal_reached'][proc_idx]\n logger.store(**{f'EpRet_{context_id}': ep_ret[proc_idx],\n f'EpLen_{context_id}': ep_len[proc_idx],\n f'NObs_{context_id}': obstacles,\n f'NGoodies_{context_id}': goodies,\n f'GoalReached_{context_id}': goal,\n })\n\n # no env reset necessary, handled implicitly by subroc_vec_env\n ep_ret[proc_idx] = 0\n ep_len[proc_idx] = 0\n\n # return the initial observation for the next epoch\n return o\n\n\ndef setup_agent(obs_space, action_space, ac_kwargs, device):\n\n # Create actor-critic module\n ac = ActorCritic(obs_space, action_space, **ac_kwargs)\n\n # handling freezing and eval mode for VAE in context_net\n ac.freeze_context_net()\n ac.set_eval_context_net()\n\n ac = ac.to(device)\n\n return ac\n\n\ndef setup_environments(env_fn, env_kwargs, eval_env_kwargs, n_proc):\n # test env for logging\n test_env = env_fn(rank=0, **env_kwargs)()\n\n # Instantiate environment\n env_fns = [env_fn(rank=i, **env_kwargs) for i in range(n_proc)]\n\n eval_env = env_fn(rank=0, **eval_env_kwargs)()\n\n if n_proc == 1:\n env = DummyVecEnv(env_fns)\n else:\n env = SubprocVecEnv(env_fns)\n\n return env, eval_env, test_env\n\n\ndef set_seeds(seed):\n # Random seed\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True"
] |
[
[
"numpy.mean",
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stevemandala/Ax
|
[
"8e289a154e3a2ed237bf27ddb90e09963c0d6a97"
] |
[
"ax/models/numpy/randomforest.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nfrom ax.models.numpy_base import NumpyModel\nfrom ax.utils.common.docutils import copy_doc\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\n\n\nclass RandomForest(NumpyModel):\n \"\"\"A Random Forest model.\n\n Uses a parametric bootstrap to handle uncertainty in Y.\n\n Can be used to fit data, make predictions, and do cross validation; however\n gen is not implemented and so this model cannot generate new points.\n\n Args:\n max_features: Maximum number of features at each split. With one-hot\n encoding, this should be set to None. Defaults to \"sqrt\", which is\n Breiman's version of Random Forest.\n num_trees: Number of trees.\n \"\"\"\n\n def __init__(\n self, max_features: Optional[str] = \"sqrt\", num_trees: int = 500\n ) -> None:\n self.max_features = max_features\n self.num_trees = num_trees\n self.models: List[RandomForestRegressor] = []\n\n @copy_doc(NumpyModel.fit)\n def fit(\n self,\n Xs: List[np.ndarray],\n Ys: List[np.ndarray],\n Yvars: List[np.ndarray],\n bounds: List[Tuple[float, float]],\n task_features: List[int],\n feature_names: List[str],\n metric_names: List[str],\n fidelity_features: List[int],\n ) -> None:\n for i, X in enumerate(Xs):\n self.models.append(\n _get_rf(\n X=X,\n Y=Ys[i],\n Yvar=Yvars[i],\n num_trees=self.num_trees,\n max_features=self.max_features,\n )\n )\n\n @copy_doc(NumpyModel.predict)\n def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return _rf_predict(self.models, X)\n\n @copy_doc(NumpyModel.cross_validate)\n def cross_validate(\n self,\n Xs_train: List[np.ndarray],\n Ys_train: List[np.ndarray],\n Yvars_train: List[np.ndarray],\n X_test: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n cv_models: List[RandomForestRegressor] = []\n for i, X in enumerate(Xs_train):\n cv_models.append(\n _get_rf(\n X=X,\n Y=Ys_train[i],\n Yvar=Yvars_train[i],\n num_trees=self.num_trees,\n max_features=self.max_features,\n )\n )\n return _rf_predict(cv_models, X_test)\n\n\ndef _get_rf(\n X: np.ndarray,\n Y: np.ndarray,\n Yvar: np.ndarray,\n num_trees: int,\n max_features: Optional[str],\n) -> RandomForestRegressor:\n \"\"\"Fit a Random Forest model.\n\n Args:\n X: X\n Y: Y\n Yvar: Variance for Y\n num_trees: Number of trees\n max_features: Max features specifier\n\n Returns: Fitted Random Forest.\n \"\"\"\n r = RandomForestRegressor(\n n_estimators=num_trees, max_features=max_features, bootstrap=True\n )\n # pyre-fixme[16]: `RandomForestRegressor` has no attribute `estimators_`.\n r.estimators_ = [DecisionTreeRegressor() for i in range(r.n_estimators)]\n for estimator in r.estimators_:\n # Parametric bootstrap\n y = np.random.normal(loc=Y[:, 0], scale=np.sqrt(Yvar[:, 0]))\n estimator.fit(X, y)\n return r\n\n\ndef _rf_predict(\n models: List[RandomForestRegressor], X: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Make predictions with Random Forest models.\n\n Args:\n models: List of models for each outcome\n X: X to predict\n\n Returns: mean and covariance estimates\n \"\"\"\n f = np.zeros((X.shape[0], len(models)))\n cov = np.zeros((X.shape[0], len(models), len(models)))\n for i, m in enumerate(models):\n preds = np.vstack([tree.predict(X) for tree in m.estimators_]) # pyre-ignore\n f[:, i] = preds.mean(0)\n cov[:, i, i] = preds.var(0)\n return f, cov\n"
] |
[
[
"sklearn.ensemble.RandomForestRegressor",
"sklearn.tree.DecisionTreeRegressor",
"numpy.sqrt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zillow/datasets
|
[
"e017db64daab1eaccd564658d848263270f083c4"
] |
[
"datasets/tutorials/3_foreach_dataset_flow.py"
] |
[
"import os\n\nimport pandas as pd # type: ignore\nfrom metaflow import FlowSpec, step\n\nfrom datasets import Mode, dataset\nfrom datasets.plugins import BatchDataset, BatchOptions\n\n\nflow_dir = os.path.dirname(os.path.realpath(__file__))\nmy_dataset_foreach_path = os.path.join(flow_dir, \"data/my_dataset_foreach\")\n\n\nclass ForeachDatasetFlow(FlowSpec):\n @step\n def start(self):\n self.regions = [\"A\", \"B\"]\n self.next(self.foreach_split, foreach=\"regions\")\n\n @dataset(\n name=\"MyDataset\",\n options=BatchOptions(\n partition_by=\"region,run_id\",\n path=my_dataset_foreach_path,\n ),\n mode=Mode.READ_WRITE,\n )\n @step\n def foreach_split(self):\n df = pd.DataFrame({\"zpid\": [1, 2, 3] if self.input == \"A\" else [4, 5, 6]})\n\n # Set\n df[\"region\"] = self.input\n print(f\"saving: {self.input=}\")\n\n # Example of writing to a dataset with a path within a foreach split\n self.my_dataset: BatchDataset\n self.my_dataset.write_pandas(df)\n\n self.next(self.join_step)\n\n @step\n def join_step(self, inputs):\n self.my_dataset = inputs[0].my_dataset\n self.next(self.end)\n\n @step\n def end(self):\n print(f\"I have datasets \\n{self.my_dataset=}\\n\")\n print(\n self.my_dataset.to_pandas(partitions=dict(region=\"A\")).to_string(index=False),\n )\n\n\nif __name__ == \"__main__\":\n ForeachDatasetFlow()\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
XuMengyaAmy/ReportDALS
|
[
"057562b2703d49858f485148d5385d1c8544d55d"
] |
[
"val_ours.py"
] |
[
"import random\nfrom data import ImageDetectionsField, TextField, RawField\nfrom data import COCO, DataLoader\nimport evaluation\nfrom evaluation import PTBTokenizer, Cider\nfrom models.transformer import Transformer, MemoryAugmentedEncoder, MeshedDecoder, ScaledDotProductAttentionMemory\nimport torch\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.nn import NLLLoss\nfrom tqdm import tqdm\n\nimport argparse, os, pickle\nimport numpy as np\nimport itertools\nimport multiprocessing\nfrom shutil import copyfile\n\nimport os, json\n\nimport pylab\nfrom IPython import display\nfrom matplotlib import pyplot as plt\n\n\n# lines below to make the training reproducible (full set)\nseed = 1234\nrandom.seed(seed)\ntorch.manual_seed(seed)\nnp.random.seed(seed)\n\ntorch.cuda.manual_seed_all(seed)\nos.environ['PYTHONHASHSEED'] = str(seed)\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\ndef evaluate_metrics(model, dataloader, text_field):\n import itertools\n model.eval()\n gen = {}\n gts = {}\n with tqdm(desc='evaluation', unit='it', total=len(dataloader)) as pbar:\n for it, (images, caps_gt) in enumerate(iter(dataloader)):\n images = images.to(device)\n with torch.no_grad():\n out, _ = model.beam_search(images, 20, text_field.vocab.stoi['<eos>'], 5, out_size=1) \n caps_gen = text_field.decode(out, join_words=False)\n for i, (gts_i, gen_i) in enumerate(zip(caps_gt, caps_gen)):\n gen_i = ' '.join([k for k, g in itertools.groupby(gen_i)])\n gen['%d_%d' % (it, i)] = [gen_i, ] \n gts['%d_%d' % (it, i)] = gts_i\n pbar.update()\n\n\n if not os.path.exists('predict_caption'):\n os.makedirs('predict_caption')\n json.dump(gen, open('predict_caption/predict_caption_val.json', 'w'))\n\n gts = evaluation.PTBTokenizer.tokenize(gts)\n gen = evaluation.PTBTokenizer.tokenize(gen)\n scores, _ = evaluation.compute_scores(gts, gen)\n return scores\n\nif __name__ == '__main__':\n device = torch.device('cuda')\n parser = argparse.ArgumentParser(description='Meshed-Memory Transformer')\n parser.add_argument('--exp_name', type=str, default='m2_transformer')\n parser.add_argument('--batch_size', type=int, default=10)\n parser.add_argument('--workers', type=int, default=0)\n parser.add_argument('--m', type=int, default=40) \n parser.add_argument('--head', type=int, default=8)\n parser.add_argument('--warmup', type=int, default=10000)\n parser.add_argument('--features_path', type=str)\n parser.add_argument('--features_path_DA', type=str)\n parser.add_argument('--annotation_folder', type=str)\n parser.add_argument('--annotation_folder_DA', type=str)\n args = parser.parse_args()\n print(args)\n\n print('Validation')\n\n # Pipeline for image regions\n image_field = ImageDetectionsField(detections_path=args.features_path, max_detections=6, load_in_tmp=False) \n\n \n # Pipeline for text\n text_field = TextField(init_token='<bos>', eos_token='<eos>', lower=True, tokenize='spacy',\n remove_punctuation=True, nopoints=False)\n\n \n\n \n # Create the dataset\n dataset = COCO(image_field, text_field, args.features_path, args.annotation_folder, args.annotation_folder)\n dataset_DA = COCO(image_field, text_field, args.features_path_DA , args.annotation_folder_DA, args.annotation_folder_DA) \n train_dataset, val_dataset = dataset.splits \n train_dataset_DA, val_dataset_DA = dataset_DA.splits \n \n print(\"-\"*100)\n print(len(train_dataset))\n print(len(val_dataset))\n \n\n if not os.path.isfile('vocab_%s.pkl' % args.exp_name):\n print(\"Building vocabulary\")\n text_field.build_vocab(train_dataset, val_dataset, min_freq=2) \n pickle.dump(text_field.vocab, open('vocab_%s.pkl' % args.exp_name, 'wb'))\n else:\n text_field.vocab = pickle.load(open('vocab_%s.pkl' % args.exp_name, 'rb'))\n\n print(len(text_field.vocab))\n print(text_field.vocab.stoi)\n\n # Model and dataloaders\n encoder = MemoryAugmentedEncoder(3, 0, attention_module=ScaledDotProductAttentionMemory, \n attention_module_kwargs={'m': args.m}) \n decoder = MeshedDecoder(len(text_field.vocab), 54, 3, text_field.vocab.stoi['<pad>'])\n model = Transformer(text_field.vocab.stoi['<bos>'], encoder, decoder).to(device)\n\n\n dict_dataset_val = val_dataset.image_dictionary({'image': image_field, 'text': RawField()})\n print(len(dict_dataset_val)) \n dict_dataset_val_DA = val_dataset_DA.image_dictionary({'image': image_field, 'text': RawField()})\n\n\n data = torch.load('saved_best_checkpoints/7_saved_models_final_3outputs/%s_best.pth' % args.exp_name)\n model.load_state_dict(data['state_dict'])\n print(\"Epoch %d\" % data['epoch']) \n print(data['best_cider'])\n\n\n dict_dataloader_val = DataLoader(dict_dataset_val, batch_size=args.batch_size // 5)\n dict_dataloader_val_DA = DataLoader(dict_dataset_val_DA, batch_size=args.batch_size // 5)\n \n # Validation scores\n scores = evaluate_metrics(model, dict_dataloader_val, text_field) \n print(\"MICCAI Validation scores :\", scores)\n\n scores_DA = evaluate_metrics(model, dict_dataloader_val_DA, text_field) \n print(\"DA (SGH NUH) Validation scores \", scores_DA)\n"
] |
[
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fhoguin/manim
|
[
"9017dc97b04094f99d77bb930df5bf3a3aead9c1"
] |
[
"manim/_config/utils.py"
] |
[
"\"\"\"Utilities to create and set the config.\n\nThe main class exported by this module is :class:`ManimConfig`. This class\ncontains all configuration options, including frame geometry (e.g. frame\nheight/width, frame rate), output (e.g. directories, logging), styling\n(e.g. background color, transparency), and general behavior (e.g. writing a\nmovie vs writing a single frame).\n\nSee :doc:`tutorials/configuration` for an introduction to Manim's configuration system.\n\n\"\"\"\n\nimport os\nimport sys\nimport copy\nimport logging\nimport configparser\nfrom pathlib import Path\nfrom collections.abc import Mapping, MutableMapping\n\nimport numpy as np\nimport colour\n\nfrom .. import constants\nfrom ..utils.tex import TexTemplate, TexTemplateFromFile\nfrom .logger_utils import set_file_logger\n\n\ndef config_file_paths():\n \"\"\"The paths where ``.cfg`` files will be searched for.\n\n When manim is first imported, it processes any ``.cfg`` files it finds. This\n function returns the locations in which these files are searched for. In\n ascending order of precedence, these are: the library-wide config file, the\n user-wide config file, and the folder-wide config file.\n\n The library-wide config file determines manim's default behavior. The\n user-wide config file is stored in the user's home folder, and determines\n the behavior of manim whenever the user invokes it from anywhere in the\n system. The folder-wide config file only affects scenes that are in the\n same folder. The latter two files are optional.\n\n These files, if they exist, are meant to loaded into a single\n :class:`configparser.ConfigParser` object, and then processed by\n :class:`ManimConfig`.\n\n Returns\n -------\n List[:class:`Path`]\n List of paths which may contain ``.cfg`` files, in ascending order of\n precedence.\n\n See Also\n --------\n :func:`make_config_parser`, :meth:`ManimConfig.digest_file`,\n :meth:`ManimConfig.digest_parser`\n\n Notes\n -----\n The location of the user-wide config file is OS-specific.\n\n \"\"\"\n library_wide = Path.resolve(Path(__file__).parent / \"default.cfg\")\n if sys.platform.startswith(\"win32\"):\n user_wide = Path.home() / \"AppData\" / \"Roaming\" / \"Manim\" / \"manim.cfg\"\n else:\n user_wide = Path.home() / \".config\" / \"manim\" / \"manim.cfg\"\n folder_wide = Path(\"manim.cfg\")\n return [library_wide, user_wide, folder_wide]\n\n\ndef make_config_parser(custom_file=None):\n \"\"\"Make a :class:`ConfigParser` object and load any ``.cfg`` files.\n\n The user-wide file, if it exists, overrides the library-wide file. The\n folder-wide file, if it exists, overrides the other two.\n\n The folder-wide file can be ignored by passing ``custom_file``. However,\n the user-wide and library-wide config files cannot be ignored.\n\n Parameters\n ----------\n custom_file : :class:`str`\n Path to a custom config file. If used, the folder-wide file in the\n relevant directory will be ignored, if it exists. If None, the\n folder-wide file will be used, if it exists.\n\n Returns\n -------\n :class:`ConfigParser`\n A parser containing the config options found in the .cfg files that\n were found. It is guaranteed to contain at least the config options\n found in the library-wide file.\n\n See Also\n --------\n :func:`config_file_paths`\n\n \"\"\"\n library_wide, user_wide, folder_wide = config_file_paths()\n # From the documentation: \"An application which requires initial values to\n # be loaded from a file should load the required file or files using\n # read_file() before calling read() for any optional files.\"\n # https://docs.python.org/3/library/configparser.html#configparser.ConfigParser.read\n parser = configparser.ConfigParser()\n with open(library_wide) as file:\n parser.read_file(file) # necessary file\n\n other_files = [user_wide, custom_file if custom_file else folder_wide]\n parser.read(other_files) # optional files\n\n return parser\n\n\ndef _determine_quality(args):\n old_qualities = {\n \"k\": \"fourk_quality\",\n \"e\": \"high_quality\",\n \"m\": \"medium_quality\",\n \"l\": \"low_quality\",\n }\n\n for quality in constants.QUALITIES:\n if quality == constants.DEFAULT_QUALITY:\n # Skip so we prioritize anything that overwrites the default quality.\n pass\n elif getattr(args, quality, None) or (\n hasattr(args, \"quality\")\n and args.quality == constants.QUALITIES[quality][\"flag\"]\n ):\n return quality\n\n for quality in old_qualities:\n if getattr(args, quality, None):\n logging.getLogger(\"manim\").warning(\n f\"Option -{quality} is deprecated please use the --quality/-q flag.\"\n )\n return old_qualities[quality]\n\n return constants.DEFAULT_QUALITY\n\n\nclass ManimConfig(MutableMapping):\n \"\"\"Dict-like class storing all config options.\n\n The global ``config`` object is an instance of this class, and acts as a\n single source of truth for all of the library's customizable behavior.\n\n The global ``config`` object is capable of digesting different types of\n sources and converting them into a uniform interface. These sources are\n (in ascending order of precedence): configuration files, command line\n arguments, and programmatic changes. Regardless of how the user chooses to\n set a config option, she can access its current value using\n :class:`ManimConfig`'s attributes and properties.\n\n Notes\n -----\n Each config option is implemented as a property of this class.\n\n Each config option can be set via a config file, using the full name of the\n property. If a config option has an associated CLI flag, then the flag is\n equal to the full name of the property. Those that admit an alternative\n flag or no flag at all are documented in the individual property's\n docstring.\n\n Examples\n --------\n Each config option allows for dict syntax and attribute syntax. For\n example, the following two lines are equivalent,\n\n .. code-block:: python\n\n >>> from manim import config, WHITE\n >>> config.background_color = WHITE\n >>> config['background_color'] = WHITE\n\n The former is preferred; the latter is provided mostly for backwards\n compatibility.\n\n The config options are designed to keep internal consistency. For example,\n setting ``frame_y_radius`` will affect ``frame_height``:\n\n .. code-block:: python\n\n >>> config.frame_height\n 8.0\n >>> config.frame_y_radius = 5.0\n >>> config.frame_height\n 10.0\n\n There are many ways of interacting with config options. Take for example\n the config option ``background_color``. There are three ways to change it:\n via a config file, via CLI flags, or programmatically.\n\n To set the background color via a config file, save the following\n ``manim.cfg`` file with the following contents.\n\n .. code-block::\n\n [CLI]\n background_color = WHITE\n\n In order to have this ``.cfg`` file apply to a manim scene, it needs to be\n placed in the same directory as the script,\n\n .. code-block:: bash\n\n project/\n ├─scene.py\n └─manim.cfg\n\n Now, when the user executes\n\n .. code-block:: bash\n\n manim scene.py\n\n the background of the scene will be set to ``WHITE``. This applies regardless\n of where the manim command is invoked from.\n\n Command line arguments override ``.cfg`` files. In the previous example,\n executing\n\n .. code-block:: bash\n\n manim scene.py -c BLUE\n\n will set the background color to BLUE, regardless of the conents of\n ``manim.cfg``.\n\n Finally, any programmatic changes made within the scene script itself will\n override the command line arguments. For example, if ``scene.py`` contains\n the following\n\n .. code-block:: python\n\n from manim import *\n config.background_color = RED\n class MyScene(Scene):\n # ...\n\n the background color will be set to RED, regardless of the contents of\n ``manim.cfg`` or the CLI arguments used when invoking manim.\n\n \"\"\"\n\n _OPTS = {\n \"background_color\",\n \"background_opacity\",\n \"custom_folders\",\n \"disable_caching\",\n \"ffmpeg_loglevel\",\n \"flush_cache\",\n \"frame_height\",\n \"frame_rate\",\n \"frame_width\",\n \"frame_x_radius\",\n \"frame_y_radius\",\n \"from_animation_number\",\n \"images_dir\",\n \"input_file\",\n \"js_renderer_path\",\n \"leave_progress_bars\",\n \"log_dir\",\n \"log_to_file\",\n \"max_files_cached\",\n \"media_dir\",\n \"movie_file_extension\",\n \"partial_movie_dir\",\n \"pixel_height\",\n \"pixel_width\",\n \"png_mode\",\n \"preview\",\n \"progress_bar\",\n \"save_as_gif\",\n \"save_last_frame\",\n \"save_pngs\",\n \"scene_names\",\n \"show_in_file_browser\",\n \"skip_animations\",\n \"sound\",\n \"tex_dir\",\n \"tex_template_file\",\n \"text_dir\",\n \"upto_animation_number\",\n \"use_js_renderer\",\n \"verbosity\",\n \"video_dir\",\n \"write_all\",\n \"write_to_movie\",\n }\n\n def __init__(self):\n self._d = {k: None for k in self._OPTS}\n\n # behave like a dict\n def __iter__(self):\n return iter(self._d)\n\n def __len__(self):\n return len(self._d)\n\n def __contains__(self, key):\n try:\n self.__getitem__(key)\n return True\n except AttributeError:\n return False\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __setitem__(self, key, val):\n getattr(ManimConfig, key).fset(self, val) # fset is the property's setter\n\n def update(self, obj):\n \"\"\"Digest the options found in another :class:`ManimConfig` or in a dict.\n\n Similar to :meth:`dict.update`, replaces the values of this object with\n those of ``obj``.\n\n Parameters\n ----------\n obj : Union[:class:`ManimConfig`, :class:`dict`]\n The object to copy values from.\n\n Returns\n -------\n None\n\n Raises\n -----\n :class:`AttributeError`\n If ``obj`` is a dict but contains keys that do not belong to any\n config options.\n\n See Also\n --------\n :meth:`~ManimConfig.digest_file`, :meth:`~ManimConfig.digest_args`,\n :meth:`~ManimConfig.digest_parser`\n\n \"\"\"\n\n if isinstance(obj, ManimConfig):\n self._d.update(obj._d)\n\n elif isinstance(obj, dict):\n # First update the underlying _d, then update other properties\n _dict = {k: v for k, v in obj.items() if k in self._d}\n for k, v in _dict.items():\n self[k] = v\n\n _dict = {k: v for k, v in obj.items() if k not in self._d}\n for k, v in _dict.items():\n self[k] = v\n\n # don't allow to delete anything\n def __delitem__(self, key):\n raise AttributeError(\"'ManimConfig' object does not support item deletion\")\n\n def __delattr__(self, key):\n raise AttributeError(\"'ManimConfig' object does not support item deletion\")\n\n # copy functions\n def copy(self):\n \"\"\"Deepcopy the contents of this ManimConfig.\n\n Returns\n -------\n :class:`ManimConfig`\n A copy of this object containing no shared references.\n\n See Also\n --------\n :func:`tempconfig`\n\n Notes\n -----\n This is the main mechanism behind :func:`tempconfig`.\n\n \"\"\"\n return copy.deepcopy(self)\n\n def __copy__(self):\n \"\"\"See ManimConfig.copy().\"\"\"\n return copy.deepcopy(self)\n\n def __deepcopy__(self, memo):\n \"\"\"See ManimConfig.copy().\"\"\"\n c = ManimConfig()\n # Deepcopying the underlying dict is enough because all properties\n # either read directly from it or compute their value on the fly from\n # vaulues read directly from it.\n c._d = copy.deepcopy(self._d, memo)\n return c\n\n # helper type-checking methods\n def _set_from_list(self, key, val, values):\n \"\"\"Set ``key`` to ``val`` if ``val`` is contained in ``values``.\"\"\"\n if val in values:\n self._d[key] = val\n else:\n raise ValueError(f\"attempted to set {key} to {val}; must be in {values}\")\n\n def _set_boolean(self, key, val):\n \"\"\"Set ``key`` to ``val`` if ``val`` is Boolean.\"\"\"\n if val in [True, False]:\n self._d[key] = val\n else:\n raise ValueError(f\"{key} must be boolean\")\n\n def _set_str(self, key, val):\n \"\"\"Set ``key`` to ``val`` if ``val`` is a string.\"\"\"\n if isinstance(val, str):\n self._d[key] = val\n elif not val:\n self._d[key] = \"\"\n else:\n raise ValueError(f\"{key} must be str or falsy value\")\n\n def _set_between(self, key, val, lo, hi):\n \"\"\"Set ``key`` to ``val`` if lo <= val <= hi.\"\"\"\n if lo <= val <= hi:\n self._d[key] = val\n else:\n raise ValueError(f\"{key} must be {lo} <= {key} <= {hi}\")\n\n def _set_pos_number(self, key, val, allow_inf):\n \"\"\"Set ``key`` to ``val`` if ``val`` is a positive integer.\"\"\"\n if isinstance(val, int) and val > -1:\n self._d[key] = val\n elif allow_inf and (val == -1 or val == float(\"inf\")):\n self._d[key] = float(\"inf\")\n else:\n raise ValueError(\n f\"{key} must be a non-negative integer (use -1 for infinity)\"\n )\n\n # builders\n def digest_parser(self, parser):\n \"\"\"Process the config options present in a :class:`ConfigParser` object.\n\n This method processes arbitrary parsers, not only those read from a\n single file, whereas :meth:`~ManimConfig.digest_file` can only process one\n file at a time.\n\n Parameters\n ----------\n parser : :class:`ConfigParser`\n An object reflecting the contents of one or many ``.cfg`` files. In\n particular, it may reflect the contents of mulitple files that have\n been parsed in a cascading fashion.\n\n Returns\n -------\n self : :class:`ManimConfig`\n This object, after processing the contents of ``parser``.\n\n See Also\n --------\n :func:`make_config_parser`, :meth:`~.ManimConfig.digest_file`,\n :meth:`~.ManimConfig.digest_args`,\n\n Notes\n -----\n If there are multiple ``.cfg`` files to process, it is always more\n efficient to parse them into a single :class:`ConfigParser` object\n first, and then call this function once (instead of calling\n :meth:`~.ManimConfig.digest_file` multiple times).\n\n Examples\n --------\n To digest the config options set in two files, first create a\n ConfigParser and parse both files and then digest the parser:\n\n .. code-block:: python\n\n parser = configparser.ConfigParser()\n parser.read([file1, file2])\n config = ManimConfig().digest_parser(parser)\n\n In fact, the global ``config`` object is initialized like so:\n\n .. code-block:: python\n\n parser = make_config_parser()\n config = ManimConfig().digest_parser(parser)\n\n \"\"\"\n self._parser = parser\n\n # boolean keys\n for key in [\n \"write_to_movie\",\n \"save_last_frame\",\n \"write_all\",\n \"save_pngs\",\n \"save_as_gif\",\n \"preview\",\n \"show_in_file_browser\",\n \"progress_bar\",\n \"sound\",\n \"leave_progress_bars\",\n \"log_to_file\",\n \"disable_caching\",\n \"flush_cache\",\n \"custom_folders\",\n \"skip_animations\",\n \"use_js_renderer\",\n ]:\n setattr(self, key, parser[\"CLI\"].getboolean(key, fallback=False))\n\n # int keys\n for key in [\n \"from_animation_number\",\n \"upto_animation_number\",\n \"frame_rate\",\n \"max_files_cached\",\n \"pixel_height\",\n \"pixel_width\",\n ]:\n setattr(self, key, parser[\"CLI\"].getint(key))\n\n # str keys\n for key in [\n \"verbosity\",\n \"media_dir\",\n \"log_dir\",\n \"video_dir\",\n \"images_dir\",\n \"text_dir\",\n \"tex_dir\",\n \"partial_movie_dir\",\n \"input_file\",\n \"output_file\",\n \"png_mode\",\n \"movie_file_extension\",\n \"background_color\",\n \"js_renderer_path\",\n ]:\n setattr(self, key, parser[\"CLI\"].get(key, fallback=\"\", raw=True))\n\n # float keys\n for key in [\"background_opacity\"]:\n setattr(self, key, parser[\"CLI\"].getfloat(key))\n\n # other logic\n self[\"frame_height\"] = 8.0\n self[\"frame_width\"] = (\n self[\"frame_height\"] * self[\"pixel_width\"] / self[\"pixel_height\"]\n )\n\n val = parser[\"CLI\"].get(\"tex_template_file\")\n if val:\n setattr(self, \"tex_template_file\", val)\n\n val = parser[\"ffmpeg\"].get(\"loglevel\")\n if val:\n setattr(self, \"ffmpeg_loglevel\", val)\n\n return self\n\n def digest_args(self, args):\n \"\"\"Process the config options present in CLI arguments.\n\n Parameters\n ----------\n args : :class:`argparse.Namespace`\n An object returned by :func:`.main_utils.parse_args()`.\n\n Returns\n -------\n self : :class:`ManimConfig`\n This object, after processing the contents of ``parser``.\n\n See Also\n --------\n :func:`.main_utils.parse_args()`, :meth:`~.ManimConfig.digest_parser`,\n :meth:`~.ManimConfig.digest_file`\n\n Notes\n -----\n If ``args.config_file`` is a non-empty string, ``ManimConfig`` tries to digest the\n contents of said file with :meth:`~ManimConfig.digest_file` before\n digesting any other CLI arguments.\n\n \"\"\"\n # if a config file has been passed, digest it first so that other CLI\n # flags supersede it\n if args.config_file:\n self.digest_file(args.config_file)\n\n self.input_file = args.file\n self.scene_names = args.scene_names if args.scene_names is not None else []\n self.output_file = args.output_file\n\n for key in [\n \"preview\",\n \"show_in_file_browser\",\n \"sound\",\n \"leave_progress_bars\",\n \"write_to_movie\",\n \"save_last_frame\",\n \"save_pngs\",\n \"save_as_gif\",\n \"write_all\",\n \"disable_caching\",\n \"flush_cache\",\n \"transparent\",\n \"scene_names\",\n \"verbosity\",\n \"background_color\",\n ]:\n if hasattr(args, key):\n attr = getattr(args, key)\n # if attr is None, then no argument was passed and we should\n # not change the current config\n if attr is not None:\n self[key] = attr\n\n # dry_run is special because it can only be set to True\n if hasattr(args, \"dry_run\"):\n if getattr(args, \"dry_run\"):\n self[\"dry_run\"] = True\n\n for key in [\n \"media_dir\", # always set this one first\n \"log_dir\",\n \"log_to_file\", # always set this one last\n ]:\n if hasattr(args, key):\n attr = getattr(args, key)\n # if attr is None, then no argument was passed and we should\n # not change the current config\n if attr is not None:\n self[key] = attr\n\n # The -s (--save_last_frame) flag invalidates -w (--write_to_movie).\n if self[\"save_last_frame\"]:\n self[\"write_to_movie\"] = False\n\n # Handle the -n flag.\n nflag = args.from_animation_number\n if nflag is not None:\n if \",\" in nflag:\n start, end = nflag.split(\",\")\n self.from_animation_number = int(start)\n self.upto_animation_number = int(end)\n else:\n self.from_animation_number = int(nflag)\n\n # Handle the quality flags\n self.quality = _determine_quality(args)\n\n # Handle the -r flag.\n rflag = args.resolution\n if rflag is not None:\n try:\n h, w = rflag.split(\",\")\n self.pixel_height = int(h)\n self.pixel_width = int(w)\n except ValueError:\n raise ValueError(\n f'invalid argument {rflag} for -r flag (must have a comma \",\")'\n )\n\n # Handle --custom_folders\n if args.custom_folders:\n for opt in [\n \"media_dir\",\n \"video_dir\",\n \"images_dir\",\n \"text_dir\",\n \"tex_dir\",\n \"log_dir\",\n \"partial_movie_dir\",\n ]:\n self[opt] = self._parser[\"custom_folders\"].get(opt, raw=True)\n # --media_dir overrides the deaful.cfg file\n if hasattr(args, \"media_dir\") and args.media_dir:\n self.media_dir = args.media_dir\n\n return self\n\n def digest_file(self, filename):\n \"\"\"Process the config options present in a ``.cfg`` file.\n\n This method processes a single ``.cfg`` file, whereas\n :meth:`~ManimConfig.digest_parser` can process arbitrary parsers, built\n perhaps from multiple ``.cfg`` files.\n\n Parameters\n ----------\n filename : :class:`str`\n Path to the ``.cfg`` file.\n\n Returns\n -------\n self : :class:`ManimConfig`\n This object, after processing the contents of ``filename``.\n\n See Also\n --------\n :meth:`~ManimConfig.digest_file`, :meth:`~ManimConfig.digest_args`,\n :func:`make_config_parser`\n\n Notes\n -----\n If there are multiple ``.cfg`` files to process, it is always more\n efficient to parse them into a single :class:`ConfigParser` object\n first and digesting them with one call to\n :meth:`~ManimConfig.digest_parser`, instead of calling this method\n multiple times.\n\n \"\"\"\n if filename:\n return self.digest_parser(make_config_parser(filename))\n\n # config options are properties\n preview = property(\n lambda self: self._d[\"preview\"],\n lambda self, val: self._set_boolean(\"preview\", val),\n doc=\"Whether to play the rendered movie (-p).\",\n )\n\n show_in_file_browser = property(\n lambda self: self._d[\"show_in_file_browser\"],\n lambda self, val: self._set_boolean(\"show_in_file_browser\", val),\n doc=\"Whether to show the output file in the file browser (-f).\",\n )\n\n progress_bar = property(\n lambda self: self._d[\"progress_bar\"],\n lambda self, val: self._set_boolean(\"progress_bar\", val),\n doc=\"Whether to show progress bars while rendering animations.\",\n )\n\n leave_progress_bars = property(\n lambda self: self._d[\"leave_progress_bars\"],\n lambda self, val: self._set_boolean(\"leave_progress_bars\", val),\n doc=\"Whether to leave the progress bar for each animation.\",\n )\n\n @property\n def log_to_file(self):\n \"\"\"Whether to save logs to a file.\"\"\"\n return self._d[\"log_to_file\"]\n\n @log_to_file.setter\n def log_to_file(self, val):\n self._set_boolean(\"log_to_file\", val)\n if val:\n if not os.path.exists(self[\"log_dir\"]):\n os.makedirs(self[\"log_dir\"])\n set_file_logger(self, self[\"verbosity\"])\n\n sound = property(\n lambda self: self._d[\"sound\"],\n lambda self, val: self._set_boolean(\"sound\", val),\n doc=\"Whether to play a sound to notify when a scene is rendered (no flag).\",\n )\n\n write_to_movie = property(\n lambda self: self._d[\"write_to_movie\"],\n lambda self, val: self._set_boolean(\"write_to_movie\", val),\n doc=\"Whether to render the scene to a movie file (-w).\",\n )\n\n save_last_frame = property(\n lambda self: self._d[\"save_last_frame\"],\n lambda self, val: self._set_boolean(\"save_last_frame\", val),\n doc=\"Whether to save the last frame of the scene as an image file (-s).\",\n )\n\n write_all = property(\n lambda self: self._d[\"write_all\"],\n lambda self, val: self._set_boolean(\"write_all\", val),\n doc=\"Whether to render all scenes in the input file (-a).\",\n )\n\n save_pngs = property(\n lambda self: self._d[\"save_pngs\"],\n lambda self, val: self._set_boolean(\"save_pngs\", val),\n doc=\"Whether to save all frames in the scene as images files (-g).\",\n )\n\n save_as_gif = property(\n lambda self: self._d[\"save_as_gif\"],\n lambda self, val: self._set_boolean(\"save_as_gif\", val),\n doc=\"Whether to save the rendered scene in .gif format (-i).\",\n )\n\n @property\n def verbosity(self):\n \"\"\"Logger verbosity; \"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", or \"CRITICAL\" (-v).\"\"\"\n return self._d[\"verbosity\"]\n\n @verbosity.setter\n def verbosity(self, val):\n \"\"\"Verbosity level of the logger.\"\"\"\n self._set_from_list(\n \"verbosity\",\n val,\n [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n )\n logging.getLogger(\"manim\").setLevel(val)\n\n ffmpeg_loglevel = property(\n lambda self: self._d[\"ffmpeg_loglevel\"],\n lambda self, val: self._set_from_list(\n \"ffmpeg_loglevel\", val, [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"]\n ),\n doc=\"Verbosity level of ffmpeg (no flag).\",\n )\n\n pixel_width = property(\n lambda self: self._d[\"pixel_width\"],\n lambda self, val: self._set_pos_number(\"pixel_width\", val, False),\n doc=\"Frame width in pixels (--resolution, -r).\",\n )\n\n pixel_height = property(\n lambda self: self._d[\"pixel_height\"],\n lambda self, val: self._set_pos_number(\"pixel_height\", val, False),\n doc=\"Frame height in pixels (--resolution, -r).\",\n )\n\n aspect_ratio = property(\n lambda self: self._d[\"pixel_width\"] / self._d[\"pixel_height\"],\n doc=\"Aspect ratio (width / height) in pixels (--resolution, -r).\",\n )\n\n frame_height = property(\n lambda self: self._d[\"frame_height\"],\n lambda self, val: self._d.__setitem__(\"frame_height\", val),\n doc=\"Frame height in logical units (no flag).\",\n )\n\n frame_width = property(\n lambda self: self._d[\"frame_width\"],\n lambda self, val: self._d.__setitem__(\"frame_width\", val),\n doc=\"Frame width in logical units (no flag).\",\n )\n\n frame_y_radius = property(\n lambda self: self._d[\"frame_height\"] / 2,\n lambda self, val: (\n self._d.__setitem__(\"frame_y_radius\", val)\n or self._d.__setitem__(\"frame_height\", 2 * val)\n ),\n doc=\"Half the frame height (no flag).\",\n )\n\n frame_x_radius = property(\n lambda self: self._d[\"frame_width\"] / 2,\n lambda self, val: (\n self._d.__setitem__(\"frame_x_radius\", val)\n or self._d.__setitem__(\"frame_width\", 2 * val)\n ),\n doc=\"Half the frame width (no flag).\",\n )\n\n top = property(\n lambda self: self.frame_y_radius * constants.UP,\n doc=\"Coordinate at the center top of the frame.\",\n )\n\n bottom = property(\n lambda self: self.frame_y_radius * constants.DOWN,\n doc=\"Coordinate at the center bottom of the frame.\",\n )\n\n left_side = property(\n lambda self: self.frame_x_radius * constants.LEFT,\n doc=\"Coordinate at the middle left of the frame.\",\n )\n\n right_side = property(\n lambda self: self.frame_x_radius * constants.RIGHT,\n doc=\"Coordinate at the middle right of the frame.\",\n )\n\n frame_rate = property(\n lambda self: self._d[\"frame_rate\"],\n lambda self, val: self._d.__setitem__(\"frame_rate\", val),\n doc=\"Frame rate in frames per second (-q).\",\n )\n\n background_color = property(\n lambda self: self._d[\"background_color\"],\n lambda self, val: self._d.__setitem__(\"background_color\", colour.Color(val)),\n doc=\"Background color of the scene (-c).\",\n )\n\n from_animation_number = property(\n lambda self: self._d[\"from_animation_number\"],\n lambda self, val: self._d.__setitem__(\"from_animation_number\", val),\n doc=\"Start rendering animations at this number (-n).\",\n )\n\n upto_animation_number = property(\n lambda self: self._d[\"upto_animation_number\"],\n lambda self, val: self._set_pos_number(\"upto_animation_number\", val, True),\n doc=\"Stop rendering animations at this nmber. Use -1 to avoid skipping (-n).\",\n )\n\n skip_animations = property(\n lambda self: self._d[\"skip_animations\"],\n lambda self, val: self._set_boolean(\"skip_animations\", val),\n doc=\"Whether to skip the next animation. Internal use only.\",\n )\n\n max_files_cached = property(\n lambda self: self._d[\"max_files_cached\"],\n lambda self, val: self._set_pos_number(\"max_files_cached\", val, True),\n doc=\"Maximum number of files cached. Use -1 for infinity (no flag).\",\n )\n\n flush_cache = property(\n lambda self: self._d[\"flush_cache\"],\n lambda self, val: self._set_boolean(\"flush_cache\", val),\n doc=\"Whether to delete all the cached partial movie files.\",\n )\n\n disable_caching = property(\n lambda self: self._d[\"disable_caching\"],\n lambda self, val: self._set_boolean(\"disable_caching\", val),\n doc=\"Whether to use scene caching.\",\n )\n\n png_mode = property(\n lambda self: self._d[\"png_mode\"],\n lambda self, val: self._set_from_list(\"png_mode\", val, [\"RGB\", \"RGBA\"]),\n doc=\"Either RGA (no transparency) or RGBA (with transparency) (no flag).\",\n )\n\n movie_file_extension = property(\n lambda self: self._d[\"movie_file_extension\"],\n lambda self, val: self._set_from_list(\n \"movie_file_extension\", val, [\".mp4\", \".mov\"]\n ),\n doc=\"Either .mp4 or .mov (no flag).\",\n )\n\n background_opacity = property(\n lambda self: self._d[\"background_opacity\"],\n lambda self, val: self._set_between(\"background_opacity\", val, 0, 1),\n doc=\"A number between 0.0 (fully transparent) and 1.0 (fully opaque).\",\n )\n\n frame_size = property(\n lambda self: (self._d[\"pixel_width\"], self._d[\"pixel_height\"]),\n lambda self, tup: (\n self._d.__setitem__(\"pixel_width\", tup[0])\n or self._d.__setitem__(\"pixel_height\", tup[1])\n ),\n doc=\"Tuple with (pixel width, pixel height) (no flag).\",\n )\n\n @property\n def quality(self):\n \"\"\"Video quality (-q).\"\"\"\n keys = [\"pixel_width\", \"pixel_height\", \"frame_rate\"]\n q = {k: self[k] for k in keys}\n for qual in constants.QUALITIES:\n if all([q[k] == constants.QUALITIES[qual][k] for k in keys]):\n return qual\n else:\n return None\n\n @quality.setter\n def quality(self, qual):\n if qual not in constants.QUALITIES:\n raise KeyError(f\"quality must be one of {list(constants.QUALITIES.keys())}\")\n q = constants.QUALITIES[qual]\n self.frame_size = q[\"pixel_width\"], q[\"pixel_height\"]\n self.frame_rate = q[\"frame_rate\"]\n\n @property\n def transparent(self):\n \"\"\"Whether the background opacity is 0.0 (-t).\"\"\"\n return self._d[\"background_opacity\"] == 0.0\n\n @transparent.setter\n def transparent(self, val):\n if val:\n self.png_mode = \"RGBA\"\n self.movie_file_extension = \".mov\"\n self.background_opacity = 0.0\n else:\n self.png_mode = \"RGB\"\n self.movie_file_extension = \".mp4\"\n self.background_opacity = 1.0\n\n @property\n def dry_run(self):\n \"\"\"Whether dry run is enabled.\"\"\"\n return (\n self.write_to_movie is False\n and self.write_all is False\n and self.save_last_frame is False\n and self.save_pngs is False\n and self.save_as_gif is False\n )\n\n @dry_run.setter\n def dry_run(self, val):\n if val:\n self.write_to_movie = False\n self.write_all = False\n self.save_last_frame = False\n self.save_pngs = False\n self.save_as_gif = False\n else:\n raise ValueError(\n \"It is unclear what it means to set dry_run to \"\n \"False. Instead, try setting each option \"\n \"individually. (write_to_movie, write_alll, \"\n \"save_last_frame, save_pngs, or save_as_gif)\"\n )\n\n @property\n def use_js_renderer(self):\n \"\"\"Whether to use JS renderer or not (default).\"\"\"\n self._d[\"use_js_renderer\"]\n\n @use_js_renderer.setter\n def use_js_renderer(self, val):\n self._d[\"use_js_renderer\"] = val\n if val:\n self[\"disable_caching\"] = True\n\n js_renderer_path = property(\n lambda self: self._d[\"js_renderer_path\"],\n lambda self, val: self._d.__setitem__(\"js_renderer_path\", val),\n doc=\"Path to JS renderer.\",\n )\n\n media_dir = property(\n lambda self: self._d[\"media_dir\"],\n lambda self, val: self._set_dir(\"media_dir\", val),\n doc=\"Main output directory. See :meth:`ManimConfig.get_dir`.\",\n )\n\n def get_dir(self, key, **kwargs):\n \"\"\"Resolve a config option that stores a directory.\n\n Config options that store directories may depend on one another. This\n method is used to provide the actual directory to the end user.\n\n Parameters\n ----------\n key : :class:`str`\n The config option to be resolved. Must be an option ending in\n ``'_dir'``, for example ``'media_dir'`` or ``'video_dir'``.\n\n kwargs : :class:`str`\n Any strings to be used when resolving the directory.\n\n Returns\n -------\n :class:`pathlib.Path`\n Path to the requested directory. If the path resolves to the empty\n string, return ``None`` instead.\n\n Raises\n ------\n :class:`KeyError`\n When ``key`` is not a config option that stores a directory and\n thus :meth:`~ManimConfig.get_dir` is not appropriate; or when\n ``key`` is appropriate but there is not enough information to\n resolve the directory.\n\n Notes\n -----\n Standard :meth:`str.format` syntax is used to resolve the paths so the\n paths may contain arbitrary placeholders using f-string notation.\n However, these will require ``kwargs`` to contain the required values.\n\n Examples\n --------\n\n The value of ``config.tex_dir`` is ``'{media_dir}/Tex'`` by default,\n i.e. it is a subfolder of wherever ``config.media_dir`` is located. In\n order to get the *actual* directory, use :meth:`~ManimConfig.get_dir`.\n\n .. code-block:: python\n\n >>> from manim import config\n >>> config.tex_dir\n '{media_dir}/Tex'\n >>> config.media_dir\n './media'\n >>> config.get_dir(\"tex_dir\").as_posix()\n 'media/Tex'\n\n Resolving directories is done in a lazy way, at the last possible\n moment, to reflect any changes in other config options:\n\n .. code-block:: python\n\n >>> config.media_dir = 'my_media_dir'\n >>> config.get_dir(\"tex_dir\").as_posix()\n 'my_media_dir/Tex'\n\n Some directories depend on information that is not available to\n :class:`ManimConfig`. For example, the default value of `video_dir`\n includes the name of the input file and the video quality\n (e.g. 480p15). This informamtion has to be supplied via ``kwargs``:\n\n .. code-block:: python\n\n >>> config.video_dir\n '{media_dir}/videos/{module_name}/{quality}'\n >>> config.get_dir(\"video_dir\")\n Traceback (most recent call last):\n KeyError: 'video_dir {media_dir}/videos/{module_name}/{quality} requires the following keyword arguments: module_name'\n >>> config.get_dir(\"video_dir\", module_name=\"myfile\").as_posix()\n 'my_media_dir/videos/myfile/1080p60'\n\n Note the quality does not need to be passed as keyword argument since\n :class:`ManimConfig` does store information about quality.\n\n Directories may be recursively defined. For example, the config option\n ``partial_movie_dir`` depends on ``video_dir``, which in turn depends\n on ``media_dir``:\n\n .. code-block:: python\n\n >>> config.partial_movie_dir\n '{video_dir}/partial_movie_files/{scene_name}'\n >>> config.get_dir(\"partial_movie_dir\")\n Traceback (most recent call last):\n KeyError: 'partial_movie_dir {video_dir}/partial_movie_files/{scene_name} requires the following keyword arguments: scene_name'\n >>> config.get_dir(\"partial_movie_dir\", module_name=\"myfile\", scene_name=\"myscene\").as_posix()\n 'my_media_dir/videos/myfile/1080p60/partial_movie_files/myscene'\n\n Standard f-string syntax is used. Arbitrary names can be used when\n defining directories, as long as the corresponding values are passed to\n :meth:`ManimConfig.get_dir` via ``kwargs``.\n\n .. code-block:: python\n\n >>> config.media_dir = \"{dir1}/{dir2}\"\n >>> config.get_dir(\"media_dir\")\n Traceback (most recent call last):\n KeyError: 'media_dir {dir1}/{dir2} requires the following keyword arguments: dir1'\n >>> config.get_dir(\"media_dir\", dir1='foo', dir2='bar').as_posix()\n 'foo/bar'\n >>> config.media_dir = \"./media\"\n >>> config.get_dir(\"media_dir\").as_posix()\n 'media'\n\n \"\"\"\n dirs = [\n \"media_dir\",\n \"video_dir\",\n \"images_dir\",\n \"text_dir\",\n \"tex_dir\",\n \"log_dir\",\n \"input_file\",\n \"output_file\",\n \"partial_movie_dir\",\n ]\n if key not in dirs:\n raise KeyError(\n \"must pass one of \"\n \"{media,video,images,text,tex,log}_dir \"\n \"or {input,output}_file\"\n )\n\n dirs.remove(key) # a path cannot contain itself\n\n all_args = {k: self._d[k] for k in dirs}\n all_args.update(kwargs)\n all_args[\"quality\"] = f\"{self.pixel_height}p{self.frame_rate}\"\n\n path = self._d[key]\n while \"{\" in path:\n try:\n path = path.format(**all_args)\n except KeyError as exc:\n raise KeyError(\n f\"{key} {self._d[key]} requires the following \"\n + \"keyword arguments: \"\n + \" \".join(exc.args)\n ) from exc\n\n return Path(path) if path else None\n\n def _set_dir(self, key, val):\n if isinstance(val, Path):\n self._d.__setitem__(key, str(val))\n else:\n self._d.__setitem__(key, val)\n\n log_dir = property(\n lambda self: self._d[\"log_dir\"],\n lambda self, val: self._set_dir(\"log_dir\", val),\n doc=\"Directory to place logs. See :meth:`ManimConfig.get_dir`.\",\n )\n\n video_dir = property(\n lambda self: self._d[\"video_dir\"],\n lambda self, val: self._set_dir(\"video_dir\", val),\n doc=\"Directory to place videos (no flag). See :meth:`ManimConfig.get_dir`.\",\n )\n\n images_dir = property(\n lambda self: self._d[\"images_dir\"],\n lambda self, val: self._set_dir(\"images_dir\", val),\n doc=\"Directory to place images (no flag). See :meth:`ManimConfig.get_dir`.\",\n )\n\n text_dir = property(\n lambda self: self._d[\"text_dir\"],\n lambda self, val: self._set_dir(\"text_dir\", val),\n doc=\"Directory to place text (no flag). See :meth:`ManimConfig.get_dir`.\",\n )\n\n tex_dir = property(\n lambda self: self._d[\"tex_dir\"],\n lambda self, val: self._set_dir(\"tex_dir\", val),\n doc=\"Directory to place tex (no flag). See :meth:`ManimConfig.get_dir`.\",\n )\n\n partial_movie_dir = property(\n lambda self: self._d[\"partial_movie_dir\"],\n lambda self, val: self._set_dir(\"partial_movie_dir\", val),\n doc=\"Directory to place partial movie files (no flag). See :meth:`ManimConfig.get_dir`.\",\n )\n\n custom_folders = property(\n lambda self: self._d[\"custom_folders\"],\n lambda self, val: self._set_boolean(\"custom_folders\", val),\n doc=\"Whether to use custom folder output.\",\n )\n\n input_file = property(\n lambda self: self._d[\"input_file\"],\n lambda self, val: self._set_dir(\"input_file\", val),\n doc=\"Input file name.\",\n )\n\n output_file = property(\n lambda self: self._d[\"output_file\"],\n lambda self, val: self._set_dir(\"output_file\", val),\n doc=\"Output file name (-o).\",\n )\n\n scene_names = property(\n lambda self: self._d[\"scene_names\"],\n lambda self, val: self._d.__setitem__(\"scene_names\", val),\n doc=\"Scenes to play from file.\",\n )\n\n @property\n def tex_template(self):\n \"\"\"Template used when rendering Tex. See :class:`.TexTemplate`.\"\"\"\n if not hasattr(self, \"_tex_template\") or not self._tex_template:\n fn = self._d[\"tex_template_file\"]\n if fn:\n self._tex_template = TexTemplateFromFile(filename=fn)\n else:\n self._tex_template = TexTemplate()\n return self._tex_template\n\n @tex_template.setter\n def tex_template(self, val):\n if isinstance(val, (TexTemplateFromFile, TexTemplate)):\n self._tex_template = val\n\n @property\n def tex_template_file(self):\n \"\"\"File to read Tex template from (no flag). See :class:`.TexTemplateFromFile`.\"\"\"\n return self._d[\"tex_template_file\"]\n\n @tex_template_file.setter\n def tex_template_file(self, val):\n if val:\n if not os.access(val, os.R_OK):\n logging.getLogger(\"manim\").warning(\n f\"Custom TeX template {val} not found or not readable.\"\n )\n else:\n self._d[\"tex_template_file\"] = Path(val)\n self._tex_template = TexTemplateFromFile(filename=val)\n else:\n self._d[\"tex_template_file\"] = val # actually set the falsy value\n self._tex_template = TexTemplate() # but don't use it\n\n\nclass ManimFrame(Mapping):\n _OPTS = {\n \"pixel_width\",\n \"pixel_height\",\n \"aspect_ratio\",\n \"frame_height\",\n \"frame_width\",\n \"frame_y_radius\",\n \"frame_x_radius\",\n \"top\",\n \"bottom\",\n \"left_side\",\n \"right_side\",\n }\n _CONSTANTS = {\n \"UP\": np.array((0.0, 1.0, 0.0)),\n \"DOWN\": np.array((0.0, -1.0, 0.0)),\n \"RIGHT\": np.array((1.0, 0.0, 0.0)),\n \"LEFT\": np.array((-1.0, 0.0, 0.0)),\n \"IN\": np.array((0.0, 0.0, -1.0)),\n \"OUT\": np.array((0.0, 0.0, 1.0)),\n \"ORIGIN\": np.array((0.0, 0.0, 0.0)),\n \"X_AXIS\": np.array((1.0, 0.0, 0.0)),\n \"Y_AXIS\": np.array((0.0, 1.0, 0.0)),\n \"Z_AXIS\": np.array((0.0, 0.0, 1.0)),\n \"UL\": np.array((-1.0, 1.0, 0.0)),\n \"UR\": np.array((1.0, 1.0, 0.0)),\n \"DL\": np.array((-1.0, -1.0, 0.0)),\n \"DR\": np.array((1.0, -1.0, 0.0)),\n }\n\n def __init__(self, c):\n if not isinstance(c, ManimConfig):\n raise TypeError(\"argument must be instance of 'ManimConfig'\")\n # need to use __dict__ directly because setting attributes is not\n # allowed (see __setattr__)\n self.__dict__[\"_c\"] = c\n\n # there are required by parent class Mapping to behave like a dict\n def __getitem__(self, key):\n if key in self._OPTS:\n return self._c[key]\n elif key in self._CONSTANTS:\n return self._CONSTANTS[key]\n else:\n raise KeyError(key)\n\n def __iter__(self):\n return iter(list(self._OPTS) + list(self._CONSTANTS))\n\n def __len__(self):\n return len(self._OPTS)\n\n # make this truly immutable\n def __setattr__(self, attr, val):\n raise TypeError(\"'ManimFrame' object does not support item assignment\")\n\n def __setitem__(self, key, val):\n raise TypeError(\"'ManimFrame' object does not support item assignment\")\n\n def __delitem__(self, key):\n raise TypeError(\"'ManimFrame' object does not support item deletion\")\n\n\nfor opt in list(ManimFrame._OPTS) + list(ManimFrame._CONSTANTS):\n setattr(ManimFrame, opt, property(lambda self, o=opt: self[o]))\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ap439111/Sentiment-Analysis-Model-AmazonSageMaker
|
[
"8e804e8795d6dfcaf4ac7936cf95204ae94c7ad5"
] |
[
"serve/predict.py"
] |
[
"import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\nfrom utils import review_to_words, convert_and_pad\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef input_fn(serialized_input_data, content_type):\n print('Deserializing the input data.')\n if content_type == 'text/plain':\n data = serialized_input_data.decode('utf-8')\n return data\n raise Exception('Requested unsupported ContentType in content_type: ' + content_type)\n\ndef output_fn(prediction_output, accept):\n print('Serializing the generated output.')\n return str(prediction_output)\n\ndef predict_fn(input_data, model):\n print('Inferring sentiment of input data.')\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n if model.word_dict is None:\n raise Exception('Model has not been loaded properly, no word_dict.')\n \n # TODO: Process input_data so that it is ready to be sent to our model.\n # You should produce two variables:\n # data_X - A sequence of length 500 which represents the converted review\n # data_len - The length of the review\n input_data_2words = review_to_words(input_data)\n \n data_X, data_len = convert_and_pad(model.word_dict, input_data_2words)\n\n # Using data_X and data_len we construct an appropriate input tensor. Remember\n # that our model expects input data of the form 'len, review[500]'.\n data_pack = np.hstack((data_len, data_X))\n data_pack = data_pack.reshape(1, -1)\n \n data = torch.from_numpy(data_pack)\n data = data.to(device)\n\n # Make sure to put the model into evaluation mode\n model.eval()\n\n # TODO: Compute the result of applying the model to the input data. The variable `result` should\n # be a numpy array which contains a single integer which is either 1 or 0\n with torch.no_grad():\n output = model(data)\n result = np.round(output.numpy())\n\n return result\n"
] |
[
[
"numpy.hstack",
"torch.load",
"torch.from_numpy",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuhuihan/Reinforcement-Learning
|
[
"02bba66885bcd9cd9e13881c573ba5778cc3b93d",
"02bba66885bcd9cd9e13881c573ba5778cc3b93d"
] |
[
"vvlab/utils/OUProcess.py",
"vvlab/models/actor_net.py"
] |
[
"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n@author: Jiawei Wu\n@create time: 2020-04-06 15:36\n@edit time: 2020-04-06 15:37\n@FilePath: /vvlab/utils/OUProcess.py\n@desc: \n\"\"\"\n\nimport numpy as np\n\nclass OUProcess(object):\n \"\"\"Ornstein-Uhlenbeck process\"\"\"\n\n def __init__(self, x_size, mu=0, theta=0.15, sigma=0.3):\n self.x = np.ones(x_size) * mu\n self.x_size = x_size\n self.mu = mu\n self.theta = theta\n self.sigma = sigma\n\n def __call__(self):\n return self.noise()\n \n def noise(self):\n dx = self.theta * (self.mu - self.x) + self.sigma * np.random.randn(self.x_size)\n self.x = self.x + dx\n return self.x",
"#!/usr/bin/env python\n# coding=utf-8\n\"\"\"\n@author: Jiawei Wu\n@create time: 2019-12-06 23:23\n@edit time: 2019-12-12 11:11\n@desc: DDPG中Actor使用的网络\n特点是有一个bound的特殊参数。因为Actor往往只输出一个动作,但是有上下限.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nCUDA = torch.cuda.is_available()\n\n\nclass SimpleActorNet(nn.Module):\n \"\"\"定义Actor的网络结构\"\"\"\n\n def __init__(self, n_states, n_actions, n_neurons=30, a_bound=1):\n \"\"\"\n 定义隐藏层和输出层参数\n @param n_obs: number of observations\n @param n_actions: number of actions\n @param n_neurons: 隐藏层神经元数目\n @param a_bound: action的倍率\n \"\"\"\n super(SimpleActorNet, self).__init__()\n self.bound = a_bound\n self.fc1 = nn.Linear(n_states, n_neurons)\n self.fc1.weight.data.normal_(0, 0.1)\n self.out = nn.Linear(n_neurons, n_actions)\n self.out.weight.data.normal_(0, 0.1)\n if CUDA:\n self.bound = torch.FloatTensor([self.bound]).cuda()\n else:\n self.bound = torch.FloatTensor([self.bound])\n\n def forward(self, x):\n \"\"\"\n 定义网络结构: 第一层网络->ReLU激活->输出层->tanh激活->softmax->输出\n \"\"\"\n x = x.cuda() if CUDA else x\n x = self.fc1(x)\n x = F.relu(x)\n x = self.out(x)\n action_value = F.tanh(x)\n action_value = action_value * self.bound\n return action_value\n\n"
] |
[
[
"numpy.random.randn",
"numpy.ones"
],
[
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.functional.tanh"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Programmer-RD-AI/Tesla-Stock-Prediction-RNN-PyTorch
|
[
"bbbff92999776106debaf081cb383334e9e8fa32"
] |
[
"wandb/run-20210820_110019-1sf9o95p/files/code/run.py"
] |
[
"import torch\nimport numpy as np\nimport pandas as pd\nfrom torch.nn import *\nfrom torch.optim import *\nfrom help_funcs import *\nfrom model import *\nfrom sklearn.preprocessing import (\n StandardScaler,\n RobustScaler,\n MinMaxScaler,\n MaxAbsScaler,\n OneHotEncoder,\n LabelEncoder,\n Normalizer,\n)\n\ndata = pd.read_csv(\"./data/data.csv\")\ndata = data[\"High\"]\ndata.dropna(inplace=True)\ndata = torch.from_numpy(np.array(data.tolist()))\ndata_input = data.view(1, -1)[:1, :-1].to(device).float()\ndata_target = data.view(1, -1)[:1, 1:].to(device).float()\nmodel = Model()\ncriterion = MSELoss()\noptimizer = LBFGS(model.parameters(), lr=0.8)\nname = \"baseline\"\nepochs = 50\n# model = train(\n# optimizer, criterion, model, data_input, data_target, name=name, epochs=epochs\n# )\npreprocessings = [\n StandardScaler,\n RobustScaler,\n MinMaxScaler,\n MaxAbsScaler,\n OneHotEncoder,\n LabelEncoder,\n Normalizer,\n]\nfor preprocessing in preprocessings:\n model = Model().to(device)\n criterion = MSELoss()\n optimizer = LBFGS(model.parameters(), lr=0.8)\n name = f'{preprocessing()}-preprocessing'\n data = pd.read_csv(\"./data/data.csv\")\n data = data[\"High\"]\n data.dropna(inplace=True)\n preprocessing = preprocessing()\n preprocessing.fit(np.array(data).reshape(-1,1))\n data = preprocessing.transform(np.array(data).reshape(-1,1))\n data = np.array(data.tolist())\n data = torch.from_numpy(data)\n data_input = data.view(1, -1)[:1, :-1].to(device).float()\n data_target = data.view(1, -1)[:1, 1:].to(device).float()\n model = train(\n optimizer, criterion, model, data_input, data_target, name=name, epochs=epochs\n )\n"
] |
[
[
"numpy.array",
"pandas.read_csv",
"torch.from_numpy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ziliHarvey/smart-annotation-pointrcnn
|
[
"c615de59b0af58222c3d77e6b7e9fa1c81a30468"
] |
[
"app/label_loader.py"
] |
[
"import numpy as np\n\ndef get_label_anno(label_path):\n annotations = {}\n annotations.update({\n 'name': [],\n 'dimensions': [],\n 'location': [],\n 'rotation_y': []\n })\n with open(label_path, 'r') as f:\n lines = f.readlines()\n content = [line.strip().split(' ') for line in lines]\n annotations['name'] = np.array([x[0] for x in content])\n annotations['dimensions'] = np.array(\n [[float(info) for info in x[1:4]] for x in content]).reshape(\n -1, 3)\n annotations['location'] = np.array(\n [[float(info) for info in x[4:7]] for x in content]).reshape(-1, 3)\n annotations['rotation_y'] = np.array(\n [float(x[7]) for x in content]).reshape(-1)\n return annotations\n\nif __name__ == \"__main__\":\n # example file\n # VEHICLE 1.7742 1.9809 4.5410 22.0288 15.6219 0.1392 3.1450 1.6872\n # name, h, w, l, x, y, z, ry, score\n # in LiDAR's frame\n detections_dir = \"PointCNN/output/rpn/argo_config_sampling_trainfull/eval/epoch_no_number/sample/test_mode/detections/data\"\n label_path = detections_dir + \"/000000.txt\"\n #print(get_label_anno(label_path)) "
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intelligenttrafficforecasting/DCRNN
|
[
"128dc26e265491aad88bc4cb67d6ab2d2193532b"
] |
[
"lib/metrics.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n\n\ndef masked_mse_tf(preds, labels, null_val=np.nan):\n \"\"\"\n Accuracy with masking.\n :param preds:\n :param labels:\n :param null_val:\n :return:\n \"\"\"\n if np.isnan(null_val):\n mask = ~tf.is_nan(labels)\n else:\n mask = tf.not_equal(labels, null_val)\n mask = tf.cast(mask, tf.float32)\n mask /= tf.reduce_mean(mask)\n mask = tf.where(tf.is_nan(mask), tf.zeros_like(mask), mask)\n loss = tf.square(tf.subtract(preds, labels))\n loss = loss * mask\n loss = tf.where(tf.is_nan(loss), tf.zeros_like(loss), loss)\n return tf.reduce_mean(loss)\n\n\ndef masked_mae_tf(preds, labels, null_val=np.nan):\n \"\"\"\n Accuracy with masking.\n :param preds:\n :param labels:\n :param null_val:\n :return:\n \"\"\"\n if np.isnan(null_val):\n mask = ~tf.is_nan(labels)\n else:\n mask = tf.not_equal(labels, null_val)\n mask = tf.cast(mask, tf.float32)\n mask /= tf.reduce_mean(mask)\n mask = tf.where(tf.is_nan(mask), tf.zeros_like(mask), mask)\n loss = tf.abs(tf.subtract(preds, labels))\n loss = loss * mask\n loss = tf.where(tf.is_nan(loss), tf.zeros_like(loss), loss)\n return tf.reduce_mean(loss)\n\n\ndef masked_rmse_tf(preds, labels, null_val=np.nan):\n \"\"\"\n Accuracy with masking.\n :param preds:\n :param labels:\n :param null_val:\n :return:\n \"\"\"\n return tf.sqrt(masked_mse_tf(preds=preds, labels=labels, null_val=null_val))\n\n\ndef masked_rmse_np(preds, labels, null_val=np.nan):\n return np.sqrt(masked_mse_np(preds=preds, labels=labels, null_val=null_val))\n\n\ndef masked_mse_np(preds, labels, null_val=np.nan):\n with np.errstate(divide='ignore', invalid='ignore'):\n if np.isnan(null_val):\n mask = ~np.isnan(labels)\n else:\n mask = np.not_equal(labels, null_val)\n mask = mask.astype('float32')\n mask /= np.mean(mask)\n rmse = np.square(np.subtract(preds, labels)).astype('float32')\n rmse = np.nan_to_num(rmse * mask)\n return np.mean(rmse)\n\n\ndef masked_mae_np(preds, labels, null_val=np.nan):\n with np.errstate(divide='ignore', invalid='ignore'):\n if np.isnan(null_val):\n mask = ~np.isnan(labels)\n else:\n mask = np.not_equal(labels, null_val)\n mask = mask.astype('float32')\n mask /= np.mean(mask)\n mae = np.abs(np.subtract(preds, labels)).astype('float32')\n mae = np.nan_to_num(mae * mask)\n return np.mean(mae)\n\n\ndef masked_mape_np(preds, labels, null_val=np.nan):\n with np.errstate(divide='ignore', invalid='ignore'):\n if np.isnan(null_val):\n mask = ~np.isnan(labels)\n else:\n mask = np.not_equal(labels, null_val)\n mask = mask.astype('float32')\n mask /= np.mean(mask)\n mape = np.abs(np.divide(np.subtract(preds, labels).astype('float32'), labels))\n mape = np.nan_to_num(mask * mape)\n return np.mean(mape)\n\n\n# Builds loss function.\ndef masked_mse_loss(scaler, null_val):\n def loss(preds, labels):\n if scaler:\n preds = scaler.inverse_transform(preds)\n labels = scaler.inverse_transform(labels)\n return masked_mse_tf(preds=preds, labels=labels, null_val=null_val)\n\n return loss\n\n\ndef masked_rmse_loss(scaler, null_val):\n def loss(preds, labels):\n if scaler:\n preds = scaler.inverse_transform(preds)\n labels = scaler.inverse_transform(labels)\n return masked_rmse_tf(preds=preds, labels=labels, null_val=null_val)\n\n return loss\n\n\ndef masked_mae_loss(scaler, null_val):\n def loss(preds, labels):\n if scaler:\n preds = scaler.inverse_transform(preds)\n labels = scaler.inverse_transform(labels)\n mae = masked_mae_tf(preds=preds, labels=labels, null_val=null_val)\n return mae\n\n return loss\n\n\ndef calculate_metrics(df_pred, df_test, null_val):\n \"\"\"\n Calculate the MAE, MAPE, RMSE\n :param df_pred:\n :param df_test:\n :param null_val:\n :return:\n \"\"\"\n mape = masked_mape_np(preds=df_pred.as_matrix(), labels=df_test.as_matrix(), null_val=null_val)\n mae = masked_mae_np(preds=df_pred.as_matrix(), labels=df_test.as_matrix(), null_val=null_val)\n rmse = masked_rmse_np(preds=df_pred.as_matrix(), labels=df_test.as_matrix(), null_val=null_val)\n return mae, mape, rmse"
] |
[
[
"tensorflow.is_nan",
"tensorflow.not_equal",
"tensorflow.reduce_mean",
"numpy.isnan",
"tensorflow.cast",
"numpy.subtract",
"numpy.nan_to_num",
"tensorflow.subtract",
"tensorflow.zeros_like",
"numpy.mean",
"numpy.errstate",
"numpy.not_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
CallMeMisterOwl/manim
|
[
"b22bdee1000f95c86df645545cf26c76b8d0ccab"
] |
[
"manim/utils/space_ops.py"
] |
[
"\"\"\"Utility functions for two- and three-dimensional vectors.\"\"\"\n\n__all__ = [\n \"get_norm\",\n \"quaternion_mult\",\n \"quaternion_from_angle_axis\",\n \"angle_axis_from_quaternion\",\n \"quaternion_conjugate\",\n \"rotate_vector\",\n \"thick_diagonal\",\n \"rotation_matrix\",\n \"rotation_about_z\",\n \"z_to_vector\",\n \"angle_between\",\n \"angle_of_vector\",\n \"angle_between_vectors\",\n \"project_along_vector\",\n \"normalize\",\n \"cross\",\n \"get_unit_normal\",\n \"compass_directions\",\n \"complex_to_R3\",\n \"R3_to_complex\",\n \"complex_func_to_R3_func\",\n \"center_of_mass\",\n \"midpoint\",\n \"find_intersection\",\n \"line_intersection\",\n \"get_winding_number\",\n \"cross2d\",\n \"earclip_triangulation\",\n]\n\n\nimport itertools as it\nimport math\nfrom functools import reduce\n\nimport numpy as np\nfrom mapbox_earcut import triangulate_float32 as earcut\n\nfrom .. import config\nfrom ..constants import DOWN, OUT, PI, RIGHT, TAU\nfrom ..utils.iterables import adjacent_pairs\nfrom ..utils.simple_functions import fdiv\n\n\ndef get_norm(vect):\n return sum([x ** 2 for x in vect]) ** 0.5\n\n\ndef norm_squared(v):\n return v[0] * v[0] + v[1] * v[1] + v[2] * v[2]\n\n\n# Quaternions\n# TODO, implement quaternion type\n\n\ndef quaternion_mult(*quats):\n if config.renderer == \"opengl\":\n if len(quats) == 0:\n return [1, 0, 0, 0]\n result = quats[0]\n for next_quat in quats[1:]:\n w1, x1, y1, z1 = result\n w2, x2, y2, z2 = next_quat\n result = [\n w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,\n w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,\n w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2,\n w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2,\n ]\n return result\n else:\n q1 = quats[0]\n q2 = quats[1]\n\n w1, x1, y1, z1 = q1\n w2, x2, y2, z2 = q2\n return np.array(\n [\n w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,\n w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,\n w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2,\n w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2,\n ]\n )\n\n\ndef quaternion_from_angle_axis(angle, axis, axis_normalized=False):\n if config.renderer == \"opengl\":\n if not axis_normalized:\n axis = normalize(axis)\n return [math.cos(angle / 2), *(math.sin(angle / 2) * axis)]\n else:\n return np.append(np.cos(angle / 2), np.sin(angle / 2) * normalize(axis))\n\n\ndef angle_axis_from_quaternion(quaternion):\n axis = normalize(quaternion[1:], fall_back=np.array([1, 0, 0]))\n angle = 2 * np.arccos(quaternion[0])\n if angle > TAU / 2:\n angle = TAU - angle\n return angle, axis\n\n\ndef quaternion_conjugate(quaternion):\n result = np.array(quaternion)\n result[1:] *= -1\n return result\n\n\ndef rotate_vector(vector, angle, axis=OUT):\n if len(vector) == 2:\n # Use complex numbers...because why not\n z = complex(*vector) * np.exp(complex(0, angle))\n return np.array([z.real, z.imag])\n elif len(vector) == 3:\n # Use quaternions...because why not\n quat = quaternion_from_angle_axis(angle, axis)\n quat_inv = quaternion_conjugate(quat)\n product = reduce(quaternion_mult, [quat, np.append(0, vector), quat_inv])\n return product[1:]\n else:\n raise ValueError(\"vector must be of dimension 2 or 3\")\n\n\ndef thick_diagonal(dim, thickness=2):\n row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))\n col_indices = np.transpose(row_indices)\n return (np.abs(row_indices - col_indices) < thickness).astype(\"uint8\")\n\n\ndef rotation_matrix_transpose_from_quaternion(quat):\n quat_inv = quaternion_conjugate(quat)\n return [\n quaternion_mult(quat, [0, *basis], quat_inv)[1:]\n for basis in [\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ]\n ]\n\n\ndef rotation_matrix_from_quaternion(quat):\n return np.transpose(rotation_matrix_transpose_from_quaternion(quat))\n\n\ndef rotation_matrix_transpose(angle, axis):\n if axis[0] == 0 and axis[1] == 0:\n # axis = [0, 0, z] case is common enough it's worth\n # having a shortcut\n sgn = 1 if axis[2] > 0 else -1\n cos_a = math.cos(angle)\n sin_a = math.sin(angle) * sgn\n return [\n [cos_a, sin_a, 0],\n [-sin_a, cos_a, 0],\n [0, 0, 1],\n ]\n quat = quaternion_from_angle_axis(angle, axis)\n return rotation_matrix_transpose_from_quaternion(quat)\n\n\ndef rotation_matrix(angle, axis):\n \"\"\"\n Rotation in R^3 about a specified axis of rotation.\n \"\"\"\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])\n\n\ndef rotation_about_z(angle):\n return [\n [np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1],\n ]\n\n\ndef z_to_vector(vector):\n \"\"\"\n Returns some matrix in SO(3) which takes the z-axis to the\n (normalized) vector provided as an argument\n \"\"\"\n norm = get_norm(vector)\n if norm == 0:\n return np.identity(3)\n v = np.array(vector) / norm\n phi = np.arccos(v[2])\n if any(v[:2]):\n # projection of vector to unit circle\n axis_proj = v[:2] / get_norm(v[:2])\n theta = np.arccos(axis_proj[0])\n if axis_proj[1] < 0:\n theta = -theta\n else:\n theta = 0\n phi_down = np.array(\n [[np.cos(phi), 0, np.sin(phi)], [0, 1, 0], [-np.sin(phi), 0, np.cos(phi)]]\n )\n return np.dot(rotation_about_z(theta), phi_down)\n\n\ndef angle_between(v1, v2):\n return np.arccos(np.dot(v1 / get_norm(v1), v2 / get_norm(v2)))\n\n\ndef angle_of_vector(vector):\n \"\"\"\n Returns polar coordinate theta when vector is project on xy plane\n \"\"\"\n if config.renderer == \"opengl\":\n return np.angle(complex(*vector[:2]))\n else:\n z = complex(*vector[:2])\n if z == 0:\n return 0\n return np.angle(complex(*vector[:2]))\n\n\ndef angle_between_vectors(v1, v2):\n \"\"\"\n Returns the angle between two 3D vectors.\n This angle will always be btw 0 and pi\n \"\"\"\n if config[\"renderer\"] == \"opengl\":\n diff = (angle_of_vector(v2) - angle_of_vector(v1)) % TAU\n return min(diff, TAU - diff)\n else:\n return np.arccos(fdiv(np.dot(v1, v2), get_norm(v1) * get_norm(v2)))\n\n\ndef project_along_vector(point, vector):\n matrix = np.identity(3) - np.outer(vector, vector)\n return np.dot(point, matrix.T)\n\n\ndef normalize(vect, fall_back=None):\n norm = get_norm(vect)\n if norm > 0:\n return np.array(vect) / norm\n else:\n if fall_back is not None:\n return fall_back\n else:\n return np.zeros(len(vect))\n\n\ndef normalize_along_axis(array, axis, fall_back=None):\n norms = np.sqrt((array * array).sum(axis))\n norms[norms == 0] = 1\n buffed_norms = np.repeat(norms, array.shape[axis]).reshape(array.shape)\n array /= buffed_norms\n return array\n\n\ndef cross(v1, v2):\n return np.array(\n [\n v1[1] * v2[2] - v1[2] * v2[1],\n v1[2] * v2[0] - v1[0] * v2[2],\n v1[0] * v2[1] - v1[1] * v2[0],\n ]\n )\n\n\ndef get_unit_normal(v1, v2, tol=1e-6):\n if config.renderer == \"opengl\":\n v1 = normalize(v1)\n v2 = normalize(v2)\n cp = cross(v1, v2)\n cp_norm = get_norm(cp)\n if cp_norm < tol:\n # Vectors align, so find a normal to them in the plane shared with the z-axis\n new_cp = cross(cross(v1, OUT), v1)\n new_cp_norm = get_norm(new_cp)\n if new_cp_norm < tol:\n return DOWN\n return new_cp / new_cp_norm\n return cp / cp_norm\n else:\n return normalize(cross(v1, v2))\n\n\n###\n\n\ndef compass_directions(n=4, start_vect=RIGHT):\n angle = TAU / n\n return np.array([rotate_vector(start_vect, k * angle) for k in range(n)])\n\n\ndef complex_to_R3(complex_num):\n return np.array((complex_num.real, complex_num.imag, 0))\n\n\ndef R3_to_complex(point):\n return complex(*point[:2])\n\n\ndef complex_func_to_R3_func(complex_func):\n return lambda p: complex_to_R3(complex_func(R3_to_complex(p)))\n\n\ndef center_of_mass(points):\n points = [np.array(point).astype(\"float\") for point in points]\n return sum(points) / len(points)\n\n\ndef midpoint(point1, point2):\n return center_of_mass([point1, point2])\n\n\ndef line_intersection(line1, line2):\n \"\"\"\n Returns intersection point of two lines,\n each defined with a pair of vectors determining\n the end points\n \"\"\"\n x_diff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n y_diff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise ValueError(\"Lines do not intersect\")\n d = (det(*line1), det(*line2))\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n return np.array([x, y, 0])\n\n\ndef find_intersection(p0, v0, p1, v1, threshold=1e-5):\n \"\"\"\n Return the intersection of a line passing through p0 in direction v0\n with one passing through p1 in direction v1. (Or array of intersections\n from arrays of such points/directions).\n For 3d values, it returns the point on the ray p0 + v0 * t closest to the\n ray p1 + v1 * t\n \"\"\"\n p0 = np.array(p0, ndmin=2)\n v0 = np.array(v0, ndmin=2)\n p1 = np.array(p1, ndmin=2)\n v1 = np.array(v1, ndmin=2)\n m, n = np.shape(p0)\n assert n in [2, 3]\n\n numer = np.cross(v1, p1 - p0)\n denom = np.cross(v1, v0)\n if n == 3:\n d = len(np.shape(numer))\n new_numer = np.multiply(numer, numer).sum(d - 1)\n new_denom = np.multiply(denom, numer).sum(d - 1)\n numer, denom = new_numer, new_denom\n\n denom[abs(denom) < threshold] = np.inf # So that ratio goes to 0 there\n ratio = numer / denom\n ratio = np.repeat(ratio, n).reshape((m, n))\n return p0 + ratio * v0\n\n\ndef get_winding_number(points):\n total_angle = 0\n for p1, p2 in adjacent_pairs(points):\n d_angle = angle_of_vector(p2) - angle_of_vector(p1)\n d_angle = ((d_angle + PI) % TAU) - PI\n total_angle += d_angle\n return total_angle / TAU\n\n\ndef shoelace(x_y):\n \"\"\"2D implementation of the shoelace formula.\n\n Returns\n -------\n :class:`float`\n Returns signed area.\n \"\"\"\n x = x_y[:, 0]\n y = x_y[:, 1]\n area = 0.5 * np.array(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n return area\n\n\ndef shoelace_direction(x_y):\n \"\"\"\n Uses the area determined by the shoelace method to determine whether\n the input set of points is directed clockwise or counterclockwise.\n\n Returns\n -------\n :class:`str`\n Either ``\"CW\"`` or ``\"CCW\"``.\n \"\"\"\n area = shoelace(x_y)\n return \"CW\" if area > 0 else \"CCW\"\n\n\ndef cross2d(a, b):\n if len(a.shape) == 2:\n return a[:, 0] * b[:, 1] - a[:, 1] * b[:, 0]\n else:\n return a[0] * b[1] - b[0] * a[1]\n\n\ndef earclip_triangulation(verts, ring_ends):\n \"\"\"\n Returns a list of indices giving a triangulation\n of a polygon, potentially with holes\n\n - verts is a numpy array of points\n\n - ring_ends is a list of indices indicating where\n the ends of new paths are\n \"\"\"\n\n # First, connect all the rings so that the polygon\n # with holes is instead treated as a (very convex)\n # polygon with one edge. Do this by drawing connections\n # between rings close to each other\n rings = [list(range(e0, e1)) for e0, e1 in zip([0, *ring_ends], ring_ends)]\n attached_rings = rings[:1]\n detached_rings = rings[1:]\n loop_connections = {}\n\n while detached_rings:\n i_range, j_range = [\n list(\n filter(\n # Ignore indices that are already being\n # used to draw some connection\n lambda i: i not in loop_connections,\n it.chain(*ring_group),\n )\n )\n for ring_group in (attached_rings, detached_rings)\n ]\n\n # Closet point on the atttached rings to an estimated midpoint\n # of the detached rings\n tmp_j_vert = midpoint(verts[j_range[0]], verts[j_range[len(j_range) // 2]])\n i = min(i_range, key=lambda i: norm_squared(verts[i] - tmp_j_vert))\n # Closet point of the detached rings to the aforementioned\n # point of the attached rings\n j = min(j_range, key=lambda j: norm_squared(verts[i] - verts[j]))\n # Recalculate i based on new j\n i = min(i_range, key=lambda i: norm_squared(verts[i] - verts[j]))\n\n # Remember to connect the polygon at these points\n loop_connections[i] = j\n loop_connections[j] = i\n\n # Move the ring which j belongs to from the\n # attached list to the detached list\n new_ring = next(filter(lambda ring: ring[0] <= j < ring[-1], detached_rings))\n detached_rings.remove(new_ring)\n attached_rings.append(new_ring)\n\n # Setup linked list\n after = []\n end0 = 0\n for end1 in ring_ends:\n after.extend(range(end0 + 1, end1))\n after.append(end0)\n end0 = end1\n\n # Find an ordering of indices walking around the polygon\n indices = []\n i = 0\n for _ in range(len(verts) + len(ring_ends) - 1):\n # starting = False\n if i in loop_connections:\n j = loop_connections[i]\n indices.extend([i, j])\n i = after[j]\n else:\n indices.append(i)\n i = after[i]\n if i == 0:\n break\n\n meta_indices = earcut(verts[indices, :2], [len(indices)])\n return [indices[mi] for mi in meta_indices]\n"
] |
[
[
"numpy.cross",
"numpy.dot",
"numpy.abs",
"numpy.multiply",
"numpy.linalg.inv",
"numpy.arange",
"numpy.arccos",
"numpy.cos",
"numpy.sin",
"numpy.append",
"numpy.shape",
"numpy.identity",
"numpy.transpose",
"numpy.outer",
"numpy.repeat",
"numpy.array",
"numpy.roll"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CitizenB/pandas
|
[
"ee1efb6d923a2c3e5a912efe20a336179614993d",
"ee1efb6d923a2c3e5a912efe20a336179614993d",
"ee1efb6d923a2c3e5a912efe20a336179614993d",
"ee1efb6d923a2c3e5a912efe20a336179614993d"
] |
[
"pandas/io/formats/style.py",
"pandas/tests/indexing/multiindex/test_xs.py",
"pandas/tests/frame/methods/test_rename.py",
"pandas/tests/extension/arrow/arrays.py"
] |
[
"\"\"\"\nModule for applying conditional formatting to DataFrames and Series.\n\"\"\"\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nimport copy\nfrom functools import partial\nfrom itertools import product\nfrom typing import (\n Any,\n Callable,\n DefaultDict,\n Dict,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\nfrom uuid import uuid1\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nfrom pandas._typing import Axis, FrameOrSeries, FrameOrSeriesUnion, Label\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.dtypes.common import is_float\n\nimport pandas as pd\nfrom pandas.api.types import is_dict_like, is_list_like\nimport pandas.core.common as com\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice\n\njinja2 = import_optional_dependency(\"jinja2\", extra=\"DataFrame.style requires jinja2.\")\n\n\ntry:\n import matplotlib.pyplot as plt\n from matplotlib import colors\n\n has_mpl = True\nexcept ImportError:\n has_mpl = False\n no_mpl_message = \"{0} requires matplotlib.\"\n\n\n@contextmanager\ndef _mpl(func: Callable):\n if has_mpl:\n yield plt, colors\n else:\n raise ImportError(no_mpl_message.format(func.__name__))\n\n\nclass Styler:\n \"\"\"\n Helps style a DataFrame or Series according to the data with HTML and CSS.\n\n Parameters\n ----------\n data : Series or DataFrame\n Data to be styled - either a Series or DataFrame.\n precision : int\n Precision to round floats to, defaults to pd.options.display.precision.\n table_styles : list-like, default None\n List of {selector: (attr, value)} dicts; see Notes.\n uuid : str, default None\n A unique identifier to avoid CSS collisions; generated automatically.\n caption : str, default None\n Caption to attach to the table.\n table_attributes : str, default None\n Items that show up in the opening ``<table>`` tag\n in addition to automatic (by default) id.\n cell_ids : bool, default True\n If True, each cell will have an ``id`` attribute in their HTML tag.\n The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``\n where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row\n number and ``<num_col>`` is the column number.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n\n .. versionadded:: 1.0.0\n\n Attributes\n ----------\n env : Jinja2 jinja2.Environment\n template : Jinja2 Template\n loader : Jinja2 Loader\n\n See Also\n --------\n DataFrame.style : Return a Styler object containing methods for building\n a styled HTML representation for the DataFrame.\n\n Notes\n -----\n Most styling will be done by passing style functions into\n ``Styler.apply`` or ``Styler.applymap``. Style functions should\n return values with strings containing CSS ``'attr: value'`` that will\n be applied to the indicated cells.\n\n If using in the Jupyter notebook, Styler has defined a ``_repr_html_``\n to automatically render itself. Otherwise call Styler.render to get\n the generated HTML.\n\n CSS classes are attached to the generated HTML\n\n * Index and Column names include ``index_name`` and ``level<k>``\n where `k` is its level in a MultiIndex\n * Index label cells include\n\n * ``row_heading``\n * ``row<n>`` where `n` is the numeric position of the row\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Column label cells include\n * ``col_heading``\n * ``col<n>`` where `n` is the numeric position of the column\n * ``level<k>`` where `k` is the level in a MultiIndex\n\n * Blank cells include ``blank``\n * Data cells include ``data``\n \"\"\"\n\n loader = jinja2.PackageLoader(\"pandas\", \"io/formats/templates\")\n env = jinja2.Environment(loader=loader, trim_blocks=True)\n template = env.get_template(\"html.tpl\")\n\n def __init__(\n self,\n data: FrameOrSeriesUnion,\n precision: Optional[int] = None,\n table_styles: Optional[List[Dict[str, List[Tuple[str, str]]]]] = None,\n uuid: Optional[str] = None,\n caption: Optional[str] = None,\n table_attributes: Optional[str] = None,\n cell_ids: bool = True,\n na_rep: Optional[str] = None,\n ):\n self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list)\n self._todo: List[Tuple[Callable, Tuple, Dict]] = []\n\n if not isinstance(data, (pd.Series, pd.DataFrame)):\n raise TypeError(\"``data`` must be a Series or DataFrame\")\n if data.ndim == 1:\n data = data.to_frame()\n if not data.index.is_unique or not data.columns.is_unique:\n raise ValueError(\"style is not supported for non-unique indices.\")\n\n self.data = data\n self.index = data.index\n self.columns = data.columns\n\n self.uuid = uuid\n self.table_styles = table_styles\n self.caption = caption\n if precision is None:\n precision = get_option(\"display.precision\")\n self.precision = precision\n self.table_attributes = table_attributes\n self.hidden_index = False\n self.hidden_columns: Sequence[int] = []\n self.cell_ids = cell_ids\n self.na_rep = na_rep\n\n # display_funcs maps (row, col) -> formatting function\n\n def default_display_func(x):\n if self.na_rep is not None and pd.isna(x):\n return self.na_rep\n elif is_float(x):\n display_format = f\"{x:.{self.precision}f}\"\n return display_format\n else:\n return x\n\n self._display_funcs: DefaultDict[\n Tuple[int, int], Callable[[Any], str]\n ] = defaultdict(lambda: default_display_func)\n\n def _repr_html_(self) -> str:\n \"\"\"\n Hooks into Jupyter notebook rich display system.\n \"\"\"\n return self.render()\n\n @doc(NDFrame.to_excel, klass=\"Styler\")\n def to_excel(\n self,\n excel_writer,\n sheet_name: str = \"Sheet1\",\n na_rep: str = \"\",\n float_format: Optional[str] = None,\n columns: Optional[Sequence[Label]] = None,\n header: Union[Sequence[Label], bool] = True,\n index: bool = True,\n index_label: Optional[Union[Label, Sequence[Label]]] = None,\n startrow: int = 0,\n startcol: int = 0,\n engine: Optional[str] = None,\n merge_cells: bool = True,\n encoding: Optional[str] = None,\n inf_rep: str = \"inf\",\n verbose: bool = True,\n freeze_panes: Optional[Tuple[int, int]] = None,\n ) -> None:\n\n from pandas.io.formats.excel import ExcelFormatter\n\n formatter = ExcelFormatter(\n self,\n na_rep=na_rep,\n cols=columns,\n header=header,\n float_format=float_format,\n index=index,\n index_label=index_label,\n merge_cells=merge_cells,\n inf_rep=inf_rep,\n )\n formatter.write(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n engine=engine,\n )\n\n def _translate(self):\n \"\"\"\n Convert the DataFrame in `self.data` and the attrs from `_build_styles`\n into a dictionary of {head, body, uuid, cellstyle}.\n \"\"\"\n table_styles = self.table_styles or []\n caption = self.caption\n ctx = self.ctx\n precision = self.precision\n hidden_index = self.hidden_index\n hidden_columns = self.hidden_columns\n uuid = self.uuid or str(uuid1()).replace(\"-\", \"_\")\n ROW_HEADING_CLASS = \"row_heading\"\n COL_HEADING_CLASS = \"col_heading\"\n INDEX_NAME_CLASS = \"index_name\"\n\n DATA_CLASS = \"data\"\n BLANK_CLASS = \"blank\"\n BLANK_VALUE = \"\"\n\n def format_attr(pair):\n return f\"{pair['key']}={pair['value']}\"\n\n # for sparsifying a MultiIndex\n idx_lengths = _get_level_lengths(self.index)\n col_lengths = _get_level_lengths(self.columns, hidden_columns)\n\n cell_context = dict()\n\n n_rlvls = self.data.index.nlevels\n n_clvls = self.data.columns.nlevels\n rlabels = self.data.index.tolist()\n clabels = self.data.columns.tolist()\n\n if n_rlvls == 1:\n rlabels = [[x] for x in rlabels]\n if n_clvls == 1:\n clabels = [[x] for x in clabels]\n clabels = list(zip(*clabels))\n\n cellstyle_map = defaultdict(list)\n head = []\n\n for r in range(n_clvls):\n # Blank for Index columns...\n row_es = [\n {\n \"type\": \"th\",\n \"value\": BLANK_VALUE,\n \"display_value\": BLANK_VALUE,\n \"is_visible\": not hidden_index,\n \"class\": \" \".join([BLANK_CLASS]),\n }\n ] * (n_rlvls - 1)\n\n # ... except maybe the last for columns.names\n name = self.data.columns.names[r]\n cs = [\n BLANK_CLASS if name is None else INDEX_NAME_CLASS,\n f\"level{r}\",\n ]\n name = BLANK_VALUE if name is None else name\n row_es.append(\n {\n \"type\": \"th\",\n \"value\": name,\n \"display_value\": name,\n \"class\": \" \".join(cs),\n \"is_visible\": not hidden_index,\n }\n )\n\n if clabels:\n for c, value in enumerate(clabels[r]):\n cs = [\n COL_HEADING_CLASS,\n f\"level{r}\",\n f\"col{c}\",\n ]\n cs.extend(\n cell_context.get(\"col_headings\", {}).get(r, {}).get(c, [])\n )\n es = {\n \"type\": \"th\",\n \"value\": value,\n \"display_value\": value,\n \"class\": \" \".join(cs),\n \"is_visible\": _is_visible(c, r, col_lengths),\n }\n colspan = col_lengths.get((r, c), 0)\n if colspan > 1:\n es[\"attributes\"] = [\n format_attr({\"key\": \"colspan\", \"value\": colspan})\n ]\n row_es.append(es)\n head.append(row_es)\n\n if (\n self.data.index.names\n and com.any_not_none(*self.data.index.names)\n and not hidden_index\n ):\n index_header_row = []\n\n for c, name in enumerate(self.data.index.names):\n cs = [INDEX_NAME_CLASS, f\"level{c}\"]\n name = \"\" if name is None else name\n index_header_row.append(\n {\"type\": \"th\", \"value\": name, \"class\": \" \".join(cs)}\n )\n\n index_header_row.extend(\n [{\"type\": \"th\", \"value\": BLANK_VALUE, \"class\": \" \".join([BLANK_CLASS])}]\n * (len(clabels[0]) - len(hidden_columns))\n )\n\n head.append(index_header_row)\n\n body = []\n for r, idx in enumerate(self.data.index):\n row_es = []\n for c, value in enumerate(rlabels[r]):\n rid = [\n ROW_HEADING_CLASS,\n f\"level{c}\",\n f\"row{r}\",\n ]\n es = {\n \"type\": \"th\",\n \"is_visible\": (_is_visible(r, c, idx_lengths) and not hidden_index),\n \"value\": value,\n \"display_value\": value,\n \"id\": \"_\".join(rid[1:]),\n \"class\": \" \".join(rid),\n }\n rowspan = idx_lengths.get((c, r), 0)\n if rowspan > 1:\n es[\"attributes\"] = [\n format_attr({\"key\": \"rowspan\", \"value\": rowspan})\n ]\n row_es.append(es)\n\n for c, col in enumerate(self.data.columns):\n cs = [DATA_CLASS, f\"row{r}\", f\"col{c}\"]\n cs.extend(cell_context.get(\"data\", {}).get(r, {}).get(c, []))\n formatter = self._display_funcs[(r, c)]\n value = self.data.iloc[r, c]\n row_dict = {\n \"type\": \"td\",\n \"value\": value,\n \"class\": \" \".join(cs),\n \"display_value\": formatter(value),\n \"is_visible\": (c not in hidden_columns),\n }\n # only add an id if the cell has a style\n if self.cell_ids or not (len(ctx[r, c]) == 1 and ctx[r, c][0] == \"\"):\n row_dict[\"id\"] = \"_\".join(cs[1:])\n row_es.append(row_dict)\n props = []\n for x in ctx[r, c]:\n # have to handle empty styles like ['']\n if x.count(\":\"):\n props.append(tuple(x.split(\":\")))\n else:\n props.append((\"\", \"\"))\n cellstyle_map[tuple(props)].append(f\"row{r}_col{c}\")\n body.append(row_es)\n\n cellstyle = [\n {\"props\": list(props), \"selectors\": selectors}\n for props, selectors in cellstyle_map.items()\n ]\n\n table_attr = self.table_attributes\n use_mathjax = get_option(\"display.html.use_mathjax\")\n if not use_mathjax:\n table_attr = table_attr or \"\"\n if 'class=\"' in table_attr:\n table_attr = table_attr.replace('class=\"', 'class=\"tex2jax_ignore ')\n else:\n table_attr += ' class=\"tex2jax_ignore\"'\n\n return dict(\n head=head,\n cellstyle=cellstyle,\n body=body,\n uuid=uuid,\n precision=precision,\n table_styles=table_styles,\n caption=caption,\n table_attributes=table_attr,\n )\n\n def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> \"Styler\":\n \"\"\"\n Format the text display value of cells.\n\n Parameters\n ----------\n formatter : str, callable, dict or None\n If ``formatter`` is None, the default formatter is used.\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that restricts which elements\n ``formatter`` is applied to.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where\n ``a`` is one of\n\n - str: this will be wrapped in: ``a.format(x)``\n - callable: called with the value of an individual cell\n\n The default display value for numeric values is the \"general\" (``g``)\n format with ``pd.options.display.precision`` precision.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])\n >>> df.style.format(\"{:.2%}\")\n >>> df['c'] = ['a', 'b', 'c', 'd']\n >>> df.style.format({'c': str.upper})\n \"\"\"\n if formatter is None:\n assert self._display_funcs.default_factory is not None\n formatter = self._display_funcs.default_factory()\n\n if subset is None:\n row_locs = range(len(self.data))\n col_locs = range(len(self.data.columns))\n else:\n subset = _non_reducing_slice(subset)\n if len(subset) == 1:\n subset = subset, self.data.columns\n\n sub_df = self.data.loc[subset]\n row_locs = self.data.index.get_indexer_for(sub_df.index)\n col_locs = self.data.columns.get_indexer_for(sub_df.columns)\n\n if is_dict_like(formatter):\n for col, col_formatter in formatter.items():\n # formatter must be callable, so '{}' are converted to lambdas\n col_formatter = _maybe_wrap_formatter(col_formatter, na_rep)\n col_num = self.data.columns.get_indexer_for([col])[0]\n\n for row_num in row_locs:\n self._display_funcs[(row_num, col_num)] = col_formatter\n else:\n # single scalar to format all cells with\n formatter = _maybe_wrap_formatter(formatter, na_rep)\n locs = product(*(row_locs, col_locs))\n for i, j in locs:\n self._display_funcs[(i, j)] = formatter\n return self\n\n def render(self, **kwargs) -> str:\n \"\"\"\n Render the built up styles to HTML.\n\n Parameters\n ----------\n **kwargs\n Any additional keyword arguments are passed\n through to ``self.template.render``.\n This is useful when you need to provide\n additional variables for a custom template.\n\n Returns\n -------\n rendered : str\n The rendered HTML.\n\n Notes\n -----\n ``Styler`` objects have defined the ``_repr_html_`` method\n which automatically calls ``self.render()`` when it's the\n last item in a Notebook cell. When calling ``Styler.render()``\n directly, wrap the result in ``IPython.display.HTML`` to view\n the rendered HTML in the notebook.\n\n Pandas uses the following keys in render. Arguments passed\n in ``**kwargs`` take precedence, so think carefully if you want\n to override them:\n\n * head\n * cellstyle\n * body\n * uuid\n * precision\n * table_styles\n * caption\n * table_attributes\n \"\"\"\n self._compute()\n # TODO: namespace all the pandas keys\n d = self._translate()\n # filter out empty styles, every cell will have a class\n # but the list of props may just be [['', '']].\n # so we have the nested anys below\n trimmed = [x for x in d[\"cellstyle\"] if any(any(y) for y in x[\"props\"])]\n d[\"cellstyle\"] = trimmed\n d.update(kwargs)\n return self.template.render(**d)\n\n def _update_ctx(self, attrs: DataFrame) -> None:\n \"\"\"\n Update the state of the Styler.\n\n Collects a mapping of {index_label: ['<property>: <value>']}.\n\n Parameters\n ----------\n attrs : DataFrame\n should contain strings of '<property>: <value>;<prop2>: <val2>'\n Whitespace shouldn't matter and the final trailing ';' shouldn't\n matter.\n \"\"\"\n for row_label, v in attrs.iterrows():\n for col_label, col in v.items():\n i = self.index.get_indexer([row_label])[0]\n j = self.columns.get_indexer([col_label])[0]\n for pair in col.rstrip(\";\").split(\";\"):\n self.ctx[(i, j)].append(pair)\n\n def _copy(self, deepcopy: bool = False) -> \"Styler\":\n styler = Styler(\n self.data,\n precision=self.precision,\n caption=self.caption,\n uuid=self.uuid,\n table_styles=self.table_styles,\n na_rep=self.na_rep,\n )\n if deepcopy:\n styler.ctx = copy.deepcopy(self.ctx)\n styler._todo = copy.deepcopy(self._todo)\n else:\n styler.ctx = self.ctx\n styler._todo = self._todo\n return styler\n\n def __copy__(self) -> \"Styler\":\n \"\"\"\n Deep copy by default.\n \"\"\"\n return self._copy(deepcopy=False)\n\n def __deepcopy__(self, memo) -> \"Styler\":\n return self._copy(deepcopy=True)\n\n def clear(self) -> None:\n \"\"\"\n Reset the styler, removing any previously applied styles.\n\n Returns None.\n \"\"\"\n self.ctx.clear()\n self._todo = []\n\n def _compute(self):\n \"\"\"\n Execute the style functions built up in `self._todo`.\n\n Relies on the conventions that all style functions go through\n .apply or .applymap. The append styles to apply as tuples of\n\n (application method, *args, **kwargs)\n \"\"\"\n r = self\n for func, args, kwargs in self._todo:\n r = func(self)(*args, **kwargs)\n return r\n\n def _apply(\n self,\n func: Callable[..., \"Styler\"],\n axis: Optional[Axis] = 0,\n subset=None,\n **kwargs,\n ) -> \"Styler\":\n subset = slice(None) if subset is None else subset\n subset = _non_reducing_slice(subset)\n data = self.data.loc[subset]\n if axis is not None:\n result = data.apply(func, axis=axis, result_type=\"expand\", **kwargs)\n result.columns = data.columns\n else:\n result = func(data, **kwargs)\n if not isinstance(result, pd.DataFrame):\n raise TypeError(\n f\"Function {repr(func)} must return a DataFrame when \"\n f\"passed to `Styler.apply` with axis=None\"\n )\n if not (\n result.index.equals(data.index) and result.columns.equals(data.columns)\n ):\n raise ValueError(\n f\"Result of {repr(func)} must have identical \"\n f\"index and columns as the input\"\n )\n\n result_shape = result.shape\n expected_shape = self.data.loc[subset].shape\n if result_shape != expected_shape:\n raise ValueError(\n f\"Function {repr(func)} returned the wrong shape.\\n\"\n f\"Result has shape: {result.shape}\\n\"\n f\"Expected shape: {expected_shape}\"\n )\n self._update_ctx(result)\n return self\n\n def apply(\n self,\n func: Callable[..., \"Styler\"],\n axis: Optional[Axis] = 0,\n subset=None,\n **kwargs,\n ) -> \"Styler\":\n \"\"\"\n Apply a function column-wise, row-wise, or table-wise.\n\n Updates the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a Series or DataFrame (depending\n on ``axis``), and return an object with the same shape.\n Must return a DataFrame with identical index and\n column labels when ``axis=None``.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n subset : IndexSlice\n A valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice.\n **kwargs : dict\n Pass along to ``func``.\n\n Returns\n -------\n self : Styler\n\n Notes\n -----\n The output shape of ``func`` should match the input, i.e. if\n ``x`` is the input row, column, or table (depending on ``axis``),\n then ``func(x).shape == x.shape`` should be true.\n\n This is similar to ``DataFrame.apply``, except that ``axis=None``\n applies the function to the entire DataFrame at once,\n rather than column-wise or row-wise.\n\n Examples\n --------\n >>> def highlight_max(x):\n ... return ['background-color: yellow' if v == x.max() else ''\n for v in x]\n ...\n >>> df = pd.DataFrame(np.random.randn(5, 2))\n >>> df.style.apply(highlight_max)\n \"\"\"\n self._todo.append(\n (lambda instance: getattr(instance, \"_apply\"), (func, axis, subset), kwargs)\n )\n return self\n\n def _applymap(self, func: Callable, subset=None, **kwargs) -> \"Styler\":\n func = partial(func, **kwargs) # applymap doesn't take kwargs?\n if subset is None:\n subset = pd.IndexSlice[:]\n subset = _non_reducing_slice(subset)\n result = self.data.loc[subset].applymap(func)\n self._update_ctx(result)\n return self\n\n def applymap(self, func: Callable, subset=None, **kwargs) -> \"Styler\":\n \"\"\"\n Apply a function elementwise.\n\n Updates the HTML representation with the result.\n\n Parameters\n ----------\n func : function\n ``func`` should take a scalar and return a scalar.\n subset : IndexSlice\n A valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice.\n **kwargs : dict\n Pass along to ``func``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.where\n \"\"\"\n self._todo.append(\n (lambda instance: getattr(instance, \"_applymap\"), (func, subset), kwargs)\n )\n return self\n\n def where(\n self,\n cond: Callable,\n value: str,\n other: Optional[str] = None,\n subset=None,\n **kwargs,\n ) -> \"Styler\":\n \"\"\"\n Apply a function elementwise.\n\n Updates the HTML representation with a style which is\n selected in accordance with the return value of a function.\n\n Parameters\n ----------\n cond : callable\n ``cond`` should take a scalar and return a boolean.\n value : str\n Applied when ``cond`` returns true.\n other : str\n Applied when ``cond`` returns false.\n subset : IndexSlice\n A valid indexer to limit ``data`` to *before* applying the\n function. Consider using a pandas.IndexSlice.\n **kwargs : dict\n Pass along to ``cond``.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.applymap\n \"\"\"\n if other is None:\n other = \"\"\n\n return self.applymap(\n lambda val: value if cond(val) else other, subset=subset, **kwargs\n )\n\n def set_precision(self, precision: int) -> \"Styler\":\n \"\"\"\n Set the precision used to render.\n\n Parameters\n ----------\n precision : int\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.precision = precision\n return self\n\n def set_table_attributes(self, attributes: str) -> \"Styler\":\n \"\"\"\n Set the table attributes.\n\n These are the items that show up in the opening ``<table>`` tag\n in addition to to automatic (by default) id.\n\n Parameters\n ----------\n attributes : str\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_attributes('class=\"pure-table\"')\n # ... <table class=\"pure-table\"> ...\n \"\"\"\n self.table_attributes = attributes\n return self\n\n def export(self) -> List[Tuple[Callable, Tuple, Dict]]:\n \"\"\"\n Export the styles to applied to the current Styler.\n\n Can be applied to a second style with ``Styler.use``.\n\n Returns\n -------\n styles : list\n\n See Also\n --------\n Styler.use\n \"\"\"\n return self._todo\n\n def use(self, styles: List[Tuple[Callable, Tuple, Dict]]) -> \"Styler\":\n \"\"\"\n Set the styles on the current Styler.\n\n Possibly uses styles from ``Styler.export``.\n\n Parameters\n ----------\n styles : list\n List of style functions.\n\n Returns\n -------\n self : Styler\n\n See Also\n --------\n Styler.export\n \"\"\"\n self._todo.extend(styles)\n return self\n\n def set_uuid(self, uuid: str) -> \"Styler\":\n \"\"\"\n Set the uuid for a Styler.\n\n Parameters\n ----------\n uuid : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.uuid = uuid\n return self\n\n def set_caption(self, caption: str) -> \"Styler\":\n \"\"\"\n Set the caption on a Styler.\n\n Parameters\n ----------\n caption : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.caption = caption\n return self\n\n def set_table_styles(self, table_styles) -> \"Styler\":\n \"\"\"\n Set the table styles on a Styler.\n\n These are placed in a ``<style>`` tag before the generated HTML table.\n\n Parameters\n ----------\n table_styles : list\n Each individual table_style should be a dictionary with\n ``selector`` and ``props`` keys. ``selector`` should be a CSS\n selector that the style will be applied to (automatically\n prefixed by the table's UUID) and ``props`` should be a list of\n tuples with ``(attribute, value)``.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_table_styles(\n ... [{'selector': 'tr:hover',\n ... 'props': [('background-color', 'yellow')]}]\n ... )\n \"\"\"\n self.table_styles = table_styles\n return self\n\n def set_na_rep(self, na_rep: str) -> \"Styler\":\n \"\"\"\n Set the missing data representation on a Styler.\n\n .. versionadded:: 1.0.0\n\n Parameters\n ----------\n na_rep : str\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.na_rep = na_rep\n return self\n\n def hide_index(self) -> \"Styler\":\n \"\"\"\n Hide any indices from rendering.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.hidden_index = True\n return self\n\n def hide_columns(self, subset) -> \"Styler\":\n \"\"\"\n Hide columns from rendering.\n\n .. versionadded:: 0.23.0\n\n Parameters\n ----------\n subset : IndexSlice\n An argument to ``DataFrame.loc`` that identifies which columns\n are hidden.\n\n Returns\n -------\n self : Styler\n \"\"\"\n subset = _non_reducing_slice(subset)\n hidden_df = self.data.loc[subset]\n self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)\n return self\n\n # -----------------------------------------------------------------------\n # A collection of \"builtin\" styles\n # -----------------------------------------------------------------------\n\n @staticmethod\n def _highlight_null(v, null_color: str) -> str:\n return f\"background-color: {null_color}\" if pd.isna(v) else \"\"\n\n def highlight_null(\n self,\n null_color: str = \"red\",\n subset: Optional[Union[Label, Sequence[Label]]] = None,\n ) -> \"Styler\":\n \"\"\"\n Shade the background ``null_color`` for missing values.\n\n Parameters\n ----------\n null_color : str, default 'red'\n subset : label or list of labels, default None\n A valid slice for ``data`` to limit the style application to.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n self.applymap(self._highlight_null, null_color=null_color, subset=subset)\n return self\n\n def background_gradient(\n self,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n axis: Optional[Axis] = 0,\n subset=None,\n text_color_threshold: float = 0.408,\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> \"Styler\":\n \"\"\"\n Color the background in a gradient style.\n\n The background color is determined according\n to the data in each column (optionally row). Requires matplotlib.\n\n Parameters\n ----------\n cmap : str or colormap\n Matplotlib colormap.\n low : float\n Compress the range by the low.\n high : float\n Compress the range by the high.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n subset : IndexSlice\n A valid slice for ``data`` to limit the style application to.\n text_color_threshold : float or int\n Luminance threshold for determining text color. Facilitates text\n visibility across varying background colors. From 0 to 1.\n 0 = all text is dark colored, 1 = all text is light colored.\n\n .. versionadded:: 0.24.0\n\n vmin : float, optional\n Minimum data value that corresponds to colormap minimum value.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 1.0.0\n\n vmax : float, optional\n Maximum data value that corresponds to colormap maximum value.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n self : Styler\n\n Raises\n ------\n ValueError\n If ``text_color_threshold`` is not a value from 0 to 1.\n\n Notes\n -----\n Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the\n text legible by not using the entire range of the color map. The range\n of the data is extended by ``low * (x.max() - x.min())`` and ``high *\n (x.max() - x.min())`` before normalizing.\n \"\"\"\n subset = _maybe_numeric_slice(self.data, subset)\n subset = _non_reducing_slice(subset)\n self.apply(\n self._background_gradient,\n cmap=cmap,\n subset=subset,\n axis=axis,\n low=low,\n high=high,\n text_color_threshold=text_color_threshold,\n vmin=vmin,\n vmax=vmax,\n )\n return self\n\n @staticmethod\n def _background_gradient(\n s,\n cmap=\"PuBu\",\n low: float = 0,\n high: float = 0,\n text_color_threshold: float = 0.408,\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ):\n \"\"\"\n Color background in a range according to the data.\n \"\"\"\n if (\n not isinstance(text_color_threshold, (float, int))\n or not 0 <= text_color_threshold <= 1\n ):\n msg = \"`text_color_threshold` must be a value from 0 to 1.\"\n raise ValueError(msg)\n\n with _mpl(Styler.background_gradient) as (plt, colors):\n smin = np.nanmin(s.to_numpy()) if vmin is None else vmin\n smax = np.nanmax(s.to_numpy()) if vmax is None else vmax\n rng = smax - smin\n # extend lower / upper bounds, compresses color range\n norm = colors.Normalize(smin - (rng * low), smax + (rng * high))\n # matplotlib colors.Normalize modifies inplace?\n # https://github.com/matplotlib/matplotlib/issues/5427\n rgbas = plt.cm.get_cmap(cmap)(norm(s.to_numpy(dtype=float)))\n\n def relative_luminance(rgba) -> float:\n \"\"\"\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n \"\"\"\n r, g, b = (\n x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4)\n for x in rgba[:3]\n )\n return 0.2126 * r + 0.7152 * g + 0.0722 * b\n\n def css(rgba) -> str:\n dark = relative_luminance(rgba) < text_color_threshold\n text_color = \"#f1f1f1\" if dark else \"#000000\"\n return f\"background-color: {colors.rgb2hex(rgba)};color: {text_color};\"\n\n if s.ndim == 1:\n return [css(rgba) for rgba in rgbas]\n else:\n return pd.DataFrame(\n [[css(rgba) for rgba in row] for row in rgbas],\n index=s.index,\n columns=s.columns,\n )\n\n def set_properties(self, subset=None, **kwargs) -> \"Styler\":\n \"\"\"\n Method to set one or more non-data dependent properties or each cell.\n\n Parameters\n ----------\n subset : IndexSlice\n A valid slice for ``data`` to limit the style application to.\n **kwargs : dict\n A dictionary of property, value pairs to be set for each cell.\n\n Returns\n -------\n self : Styler\n\n Examples\n --------\n >>> df = pd.DataFrame(np.random.randn(10, 4))\n >>> df.style.set_properties(color=\"white\", align=\"right\")\n >>> df.style.set_properties(**{'background-color': 'yellow'})\n \"\"\"\n values = \";\".join(f\"{p}: {v}\" for p, v in kwargs.items())\n f = lambda x: values\n return self.applymap(f, subset=subset)\n\n @staticmethod\n def _bar(\n s,\n align: str,\n colors: List[str],\n width: float = 100,\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ):\n \"\"\"\n Draw bar chart in dataframe cells.\n \"\"\"\n # Get input value range.\n smin = np.nanmin(s.to_numpy()) if vmin is None else vmin\n smax = np.nanmax(s.to_numpy()) if vmax is None else vmax\n if align == \"mid\":\n smin = min(0, smin)\n smax = max(0, smax)\n elif align == \"zero\":\n # For \"zero\" mode, we want the range to be symmetrical around zero.\n smax = max(abs(smin), abs(smax))\n smin = -smax\n # Transform to percent-range of linear-gradient\n normed = width * (s.to_numpy(dtype=float) - smin) / (smax - smin + 1e-12)\n zero = -width * smin / (smax - smin + 1e-12)\n\n def css_bar(start: float, end: float, color: str) -> str:\n \"\"\"\n Generate CSS code to draw a bar from start to end.\n \"\"\"\n css = \"width: 10em; height: 80%;\"\n if end > start:\n css += \"background: linear-gradient(90deg,\"\n if start > 0:\n css += f\" transparent {start:.1f}%, {color} {start:.1f}%, \"\n e = min(end, width)\n css += f\"{color} {e:.1f}%, transparent {e:.1f}%)\"\n return css\n\n def css(x):\n if pd.isna(x):\n return \"\"\n\n # avoid deprecated indexing `colors[x > zero]`\n color = colors[1] if x > zero else colors[0]\n\n if align == \"left\":\n return css_bar(0, x, color)\n else:\n return css_bar(min(x, zero), max(x, zero), color)\n\n if s.ndim == 1:\n return [css(x) for x in normed]\n else:\n return pd.DataFrame(\n [[css(x) for x in row] for row in normed],\n index=s.index,\n columns=s.columns,\n )\n\n def bar(\n self,\n subset=None,\n axis: Optional[Axis] = 0,\n color=\"#d65f5f\",\n width: float = 100,\n align: str = \"left\",\n vmin: Optional[float] = None,\n vmax: Optional[float] = None,\n ) -> \"Styler\":\n \"\"\"\n Draw bar chart in the cell backgrounds.\n\n Parameters\n ----------\n subset : IndexSlice, optional\n A valid slice for `data` to limit the style application to.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n color : str or 2-tuple/list\n If a str is passed, the color is the same for both\n negative and positive numbers. If 2-tuple/list is used, the\n first element is the color_negative and the second is the\n color_positive (eg: ['#d65f5f', '#5fba7d']).\n width : float, default 100\n A number between 0 or 100. The largest value will cover `width`\n percent of the cell's width.\n align : {'left', 'zero',' mid'}, default 'left'\n How to align the bars with the cells.\n\n - 'left' : the min value starts at the left of the cell.\n - 'zero' : a value of zero is located at the center of the cell.\n - 'mid' : the center of the cell is at (max-min)/2, or\n if values are all negative (positive) the zero is aligned\n at the right (left) of the cell.\n vmin : float, optional\n Minimum bar value, defining the left hand limit\n of the bar drawing range, lower values are clipped to `vmin`.\n When None (default): the minimum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n vmax : float, optional\n Maximum bar value, defining the right hand limit\n of the bar drawing range, higher values are clipped to `vmax`.\n When None (default): the maximum value of the data will be used.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n self : Styler\n \"\"\"\n if align not in (\"left\", \"zero\", \"mid\"):\n raise ValueError(\"`align` must be one of {'left', 'zero',' mid'}\")\n\n if not (is_list_like(color)):\n color = [color, color]\n elif len(color) == 1:\n color = [color[0], color[0]]\n elif len(color) > 2:\n raise ValueError(\n \"`color` must be string or a list-like \"\n \"of length 2: [`color_neg`, `color_pos`] \"\n \"(eg: color=['#d65f5f', '#5fba7d'])\"\n )\n\n subset = _maybe_numeric_slice(self.data, subset)\n subset = _non_reducing_slice(subset)\n self.apply(\n self._bar,\n subset=subset,\n axis=axis,\n align=align,\n colors=color,\n width=width,\n vmin=vmin,\n vmax=vmax,\n )\n\n return self\n\n def highlight_max(\n self, subset=None, color: str = \"yellow\", axis: Optional[Axis] = 0\n ) -> \"Styler\":\n \"\"\"\n Highlight the maximum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n A valid slice for ``data`` to limit the style application to.\n color : str, default 'yellow'\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n\n Returns\n -------\n self : Styler\n \"\"\"\n return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)\n\n def highlight_min(\n self, subset=None, color: str = \"yellow\", axis: Optional[Axis] = 0\n ) -> \"Styler\":\n \"\"\"\n Highlight the minimum by shading the background.\n\n Parameters\n ----------\n subset : IndexSlice, default None\n A valid slice for ``data`` to limit the style application to.\n color : str, default 'yellow'\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n Apply to each column (``axis=0`` or ``'index'``), to each row\n (``axis=1`` or ``'columns'``), or to the entire DataFrame at once\n with ``axis=None``.\n\n Returns\n -------\n self : Styler\n \"\"\"\n return self._highlight_handler(\n subset=subset, color=color, axis=axis, max_=False\n )\n\n def _highlight_handler(\n self,\n subset=None,\n color: str = \"yellow\",\n axis: Optional[Axis] = None,\n max_: bool = True,\n ) -> \"Styler\":\n subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))\n self.apply(\n self._highlight_extrema, color=color, axis=axis, subset=subset, max_=max_\n )\n return self\n\n @staticmethod\n def _highlight_extrema(\n data: FrameOrSeries, color: str = \"yellow\", max_: bool = True\n ):\n \"\"\"\n Highlight the min or max in a Series or DataFrame.\n \"\"\"\n attr = f\"background-color: {color}\"\n\n if max_:\n extrema = data == np.nanmax(data.to_numpy())\n else:\n extrema = data == np.nanmin(data.to_numpy())\n\n if data.ndim == 1: # Series from .apply\n return [attr if v else \"\" for v in extrema]\n else: # DataFrame from .tee\n return pd.DataFrame(\n np.where(extrema, attr, \"\"), index=data.index, columns=data.columns\n )\n\n @classmethod\n def from_custom_template(cls, searchpath, name):\n \"\"\"\n Factory function for creating a subclass of ``Styler``.\n\n Uses a custom template and Jinja environment.\n\n Parameters\n ----------\n searchpath : str or list\n Path or paths of directories containing the templates.\n name : str\n Name of your custom template to use for rendering.\n\n Returns\n -------\n MyStyler : subclass of Styler\n Has the correct ``env`` and ``template`` class attributes set.\n \"\"\"\n loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])\n\n class MyStyler(cls):\n env = jinja2.Environment(loader=loader)\n template = env.get_template(name)\n\n return MyStyler\n\n def pipe(self, func: Callable, *args, **kwargs):\n \"\"\"\n Apply ``func(self, *args, **kwargs)``, and return the result.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n func : function\n Function to apply to the Styler. Alternatively, a\n ``(callable, keyword)`` tuple where ``keyword`` is a string\n indicating the keyword of ``callable`` that expects the Styler.\n *args : optional\n Arguments passed to `func`.\n **kwargs : optional\n A dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object :\n The value returned by ``func``.\n\n See Also\n --------\n DataFrame.pipe : Analogous method for DataFrame.\n Styler.apply : Apply a function row-wise, column-wise, or table-wise to\n modify the dataframe's styling.\n\n Notes\n -----\n Like :meth:`DataFrame.pipe`, this method can simplify the\n application of several user-defined functions to a styler. Instead\n of writing:\n\n .. code-block:: python\n\n f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)\n\n users can write:\n\n .. code-block:: python\n\n (df.style.set_precision(3)\n .pipe(g, arg1=a)\n .pipe(f, arg2=b, arg3=c))\n\n In particular, this allows users to define functions that take a\n styler object, along with other parameters, and return the styler after\n making styling changes (such as calling :meth:`Styler.apply` or\n :meth:`Styler.set_properties`). Using ``.pipe``, these user-defined\n style \"transformations\" can be interleaved with calls to the built-in\n Styler interface.\n\n Examples\n --------\n >>> def format_conversion(styler):\n ... return (styler.set_properties(**{'text-align': 'right'})\n ... .format({'conversion': '{:.1%}'}))\n\n The user-defined ``format_conversion`` function above can be called\n within a sequence of other style modifications:\n\n >>> df = pd.DataFrame({'trial': list(range(5)),\n ... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})\n >>> (df.style\n ... .highlight_min(subset=['conversion'], color='yellow')\n ... .pipe(format_conversion)\n ... .set_caption(\"Results with minimum conversion highlighted.\"))\n \"\"\"\n return com.pipe(self, func, *args, **kwargs)\n\n\ndef _is_visible(idx_row, idx_col, lengths) -> bool:\n \"\"\"\n Index -> {(idx_row, idx_col): bool}).\n \"\"\"\n return (idx_col, idx_row) in lengths\n\n\ndef _get_level_lengths(index, hidden_elements=None):\n \"\"\"\n Given an index, find the level length for each element.\n\n Optional argument is a list of index positions which\n should not be visible.\n\n Result is a dictionary of (level, initial_position): span\n \"\"\"\n levels = index.format(sparsify=lib.no_default, adjoin=False, names=False)\n\n if hidden_elements is None:\n hidden_elements = []\n\n lengths = {}\n if index.nlevels == 1:\n for i, value in enumerate(levels):\n if i not in hidden_elements:\n lengths[(0, i)] = 1\n return lengths\n\n for i, lvl in enumerate(levels):\n for j, row in enumerate(lvl):\n if not get_option(\"display.multi_sparse\"):\n lengths[(i, j)] = 1\n elif (row is not lib.no_default) and (j not in hidden_elements):\n last_label = j\n lengths[(i, last_label)] = 1\n elif row is not lib.no_default:\n # even if its hidden, keep track of it in case\n # length >1 and later elements are visible\n last_label = j\n lengths[(i, last_label)] = 0\n elif j not in hidden_elements:\n lengths[(i, last_label)] += 1\n\n non_zero_lengths = {\n element: length for element, length in lengths.items() if length >= 1\n }\n\n return non_zero_lengths\n\n\ndef _maybe_wrap_formatter(\n formatter: Union[Callable, str], na_rep: Optional[str]\n) -> Callable:\n if isinstance(formatter, str):\n formatter_func = lambda x: formatter.format(x)\n elif callable(formatter):\n formatter_func = formatter\n else:\n msg = f\"Expected a template string or callable, got {formatter} instead\"\n raise TypeError(msg)\n\n if na_rep is None:\n return formatter_func\n elif isinstance(na_rep, str):\n return lambda x: na_rep if pd.isna(x) else formatter_func(x)\n else:\n msg = f\"Expected a string, got {na_rep} instead\"\n raise TypeError(msg)\n",
"import numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, MultiIndex, Series, concat, date_range\nimport pandas._testing as tm\nimport pandas.core.common as com\n\n\[email protected]\ndef four_level_index_dataframe():\n arr = np.array(\n [\n [-0.5109, -2.3358, -0.4645, 0.05076, 0.364],\n [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],\n [-0.6662, -0.5243, -0.358, 0.89145, 2.5838],\n ]\n )\n index = MultiIndex(\n levels=[[\"a\", \"x\"], [\"b\", \"q\"], [10.0032, 20.0, 30.0], [3, 4, 5]],\n codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],\n names=[\"one\", \"two\", \"three\", \"four\"],\n )\n return DataFrame(arr, index=index, columns=list(\"ABCDE\"))\n\n\[email protected](\n \"key, level, exp_arr, exp_index\",\n [\n (\"a\", \"lvl0\", lambda x: x[:, 0:2], Index([\"bar\", \"foo\"], name=\"lvl1\")),\n (\"foo\", \"lvl1\", lambda x: x[:, 1:2], Index([\"a\"], name=\"lvl0\")),\n ],\n)\ndef test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):\n # see gh-2903\n arr = np.random.randn(4, 4)\n index = MultiIndex(\n levels=[[\"a\", \"b\"], [\"bar\", \"foo\", \"hello\", \"world\"]],\n codes=[[0, 0, 1, 1], [0, 1, 2, 3]],\n names=[\"lvl0\", \"lvl1\"],\n )\n df = DataFrame(arr, columns=index)\n result = df.xs(key, level=level, axis=1)\n expected = DataFrame(exp_arr(arr), columns=exp_index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_xs_values(multiindex_dataframe_random_data):\n df = multiindex_dataframe_random_data\n result = df.xs((\"bar\", \"two\")).values\n expected = df.values[4]\n tm.assert_almost_equal(result, expected)\n\n\ndef test_xs_loc_equality(multiindex_dataframe_random_data):\n df = multiindex_dataframe_random_data\n result = df.xs((\"bar\", \"two\"))\n expected = df.loc[(\"bar\", \"two\")]\n tm.assert_series_equal(result, expected)\n\n\ndef test_xs_missing_values_in_index():\n # see gh-6574\n # missing values in returned index should be preserved\n acc = [\n (\"a\", \"abcde\", 1),\n (\"b\", \"bbcde\", 2),\n (\"y\", \"yzcde\", 25),\n (\"z\", \"xbcde\", 24),\n (\"z\", None, 26),\n (\"z\", \"zbcde\", 25),\n (\"z\", \"ybcde\", 26),\n ]\n df = DataFrame(acc, columns=[\"a1\", \"a2\", \"cnt\"]).set_index([\"a1\", \"a2\"])\n expected = DataFrame(\n {\"cnt\": [24, 26, 25, 26]},\n index=Index([\"xbcde\", np.nan, \"zbcde\", \"ybcde\"], name=\"a2\"),\n )\n\n result = df.xs(\"z\", level=\"a1\")\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\"key, level\", [(\"one\", \"second\"), ([\"one\"], [\"second\"])])\ndef test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):\n # see gh-13719\n frame = multiindex_dataframe_random_data\n df = concat([frame] * 2)\n assert df.index.is_unique is False\n expected = concat([frame.xs(\"one\", level=\"second\")] * 2)\n\n result = df.xs(key, level=level)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_xs_level(multiindex_dataframe_random_data):\n df = multiindex_dataframe_random_data\n result = df.xs(\"two\", level=\"second\")\n expected = df[df.index.get_level_values(1) == \"two\"]\n expected.index = Index([\"foo\", \"bar\", \"baz\", \"qux\"], name=\"first\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_xs_level_eq_2():\n arr = np.random.randn(3, 5)\n index = MultiIndex(\n levels=[[\"a\", \"p\", \"x\"], [\"b\", \"q\", \"y\"], [\"c\", \"r\", \"z\"]],\n codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],\n )\n df = DataFrame(arr, index=index)\n expected = DataFrame(arr[1:2], index=[[\"a\"], [\"b\"]])\n result = df.xs(\"c\", level=2)\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"indexer\",\n [\n lambda df: df.xs((\"a\", 4), level=[\"one\", \"four\"]),\n lambda df: df.xs(\"a\").xs(4, level=\"four\"),\n ],\n)\ndef test_xs_level_multiple(indexer, four_level_index_dataframe):\n df = four_level_index_dataframe\n expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]\n expected_index = MultiIndex(\n levels=[[\"q\"], [20.0]], codes=[[0], [0]], names=[\"two\", \"three\"]\n )\n expected = DataFrame(expected_values, index=expected_index, columns=list(\"ABCDE\"))\n result = indexer(df)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_xs_setting_with_copy_error(multiindex_dataframe_random_data):\n # this is a copy in 0.14\n df = multiindex_dataframe_random_data\n result = df.xs(\"two\", level=\"second\")\n\n # setting this will give a SettingWithCopyError\n # as we are trying to write a view\n msg = \"A value is trying to be set on a copy of a slice from a DataFrame\"\n with pytest.raises(com.SettingWithCopyError, match=msg):\n result[:] = 10\n\n\ndef test_xs_setting_with_copy_error_multiple(four_level_index_dataframe):\n # this is a copy in 0.14\n df = four_level_index_dataframe\n result = df.xs((\"a\", 4), level=[\"one\", \"four\"])\n\n # setting this will give a SettingWithCopyError\n # as we are trying to write a view\n msg = \"A value is trying to be set on a copy of a slice from a DataFrame\"\n with pytest.raises(com.SettingWithCopyError, match=msg):\n result[:] = 10\n\n\ndef test_xs_integer_key():\n # see gh-2107\n dates = range(20111201, 20111205)\n ids = list(\"abcde\")\n index = MultiIndex.from_product([dates, ids], names=[\"date\", \"secid\"])\n df = DataFrame(np.random.randn(len(index), 3), index, [\"X\", \"Y\", \"Z\"])\n\n result = df.xs(20111201, level=\"date\")\n expected = df.loc[20111201, :]\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"indexer\", [lambda df: df.xs(\"a\", level=0), lambda df: df.xs(\"a\")]\n)\ndef test_xs_level0(indexer, four_level_index_dataframe):\n df = four_level_index_dataframe\n expected_values = [\n [-0.5109, -2.3358, -0.4645, 0.05076, 0.364],\n [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],\n ]\n expected_index = MultiIndex(\n levels=[[\"b\", \"q\"], [10.0032, 20.0], [4, 5]],\n codes=[[0, 1], [0, 1], [1, 0]],\n names=[\"two\", \"three\", \"four\"],\n )\n expected = DataFrame(expected_values, index=expected_index, columns=list(\"ABCDE\"))\n\n result = indexer(df)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_xs_level_series(multiindex_dataframe_random_data):\n # this test is not explicitly testing .xs functionality\n # TODO: move to another module or refactor\n df = multiindex_dataframe_random_data\n s = df[\"A\"]\n result = s[:, \"two\"]\n expected = df.xs(\"two\", level=1)[\"A\"]\n tm.assert_series_equal(result, expected)\n\n\ndef test_xs_level_series_ymd(multiindex_year_month_day_dataframe_random_data):\n # this test is not explicitly testing .xs functionality\n # TODO: move to another module or refactor\n df = multiindex_year_month_day_dataframe_random_data\n s = df[\"A\"]\n result = s[2000, 5]\n expected = df.loc[2000, 5][\"A\"]\n tm.assert_series_equal(result, expected)\n\n\ndef test_xs_level_series_slice_not_implemented(\n multiindex_year_month_day_dataframe_random_data,\n):\n # this test is not explicitly testing .xs functionality\n # TODO: move to another module or refactor\n # not implementing this for now\n df = multiindex_year_month_day_dataframe_random_data\n s = df[\"A\"]\n\n msg = r\"\\(2000, slice\\(3, 4, None\\)\\)\"\n with pytest.raises(TypeError, match=msg):\n s[2000, 3:4]\n\n\ndef test_series_getitem_multiindex_xs():\n # GH6258\n dt = list(date_range(\"20130903\", periods=3))\n idx = MultiIndex.from_product([list(\"AB\"), dt])\n s = Series([1, 3, 4, 1, 3, 4], index=idx)\n expected = Series([1, 1], index=list(\"AB\"))\n\n result = s.xs(\"20130903\", level=1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_getitem_multiindex_xs_by_label():\n # GH5684\n idx = MultiIndex.from_tuples(\n [(\"a\", \"one\"), (\"a\", \"two\"), (\"b\", \"one\"), (\"b\", \"two\")]\n )\n s = Series([1, 2, 3, 4], index=idx)\n s.index.set_names([\"L1\", \"L2\"], inplace=True)\n expected = Series([1, 3], index=[\"a\", \"b\"])\n expected.index.set_names([\"L1\"], inplace=True)\n\n result = s.xs(\"one\", level=\"L2\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_xs_levels_raises():\n df = DataFrame({\"A\": [1, 2, 3]})\n\n msg = \"Index must be a MultiIndex\"\n with pytest.raises(TypeError, match=msg):\n df.xs(0, level=\"as\")\n\n s = df.A\n with pytest.raises(TypeError, match=msg):\n s.xs(0, level=\"as\")\n",
"from collections import ChainMap\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, Index, MultiIndex\nimport pandas._testing as tm\n\n\nclass TestRename:\n def test_rename(self, float_frame):\n mapping = {\"A\": \"a\", \"B\": \"b\", \"C\": \"c\", \"D\": \"d\"}\n\n renamed = float_frame.rename(columns=mapping)\n renamed2 = float_frame.rename(columns=str.lower)\n\n tm.assert_frame_equal(renamed, renamed2)\n tm.assert_frame_equal(\n renamed2.rename(columns=str.upper), float_frame, check_names=False\n )\n\n # index\n data = {\"A\": {\"foo\": 0, \"bar\": 1}}\n\n # gets sorted alphabetical\n df = DataFrame(data)\n renamed = df.rename(index={\"foo\": \"bar\", \"bar\": \"foo\"})\n tm.assert_index_equal(renamed.index, Index([\"foo\", \"bar\"]))\n\n renamed = df.rename(index=str.upper)\n tm.assert_index_equal(renamed.index, Index([\"BAR\", \"FOO\"]))\n\n # have to pass something\n with pytest.raises(TypeError, match=\"must pass an index to rename\"):\n float_frame.rename()\n\n # partial columns\n renamed = float_frame.rename(columns={\"C\": \"foo\", \"D\": \"bar\"})\n tm.assert_index_equal(renamed.columns, Index([\"A\", \"B\", \"foo\", \"bar\"]))\n\n # other axis\n renamed = float_frame.T.rename(index={\"C\": \"foo\", \"D\": \"bar\"})\n tm.assert_index_equal(renamed.index, Index([\"A\", \"B\", \"foo\", \"bar\"]))\n\n # index with name\n index = Index([\"foo\", \"bar\"], name=\"name\")\n renamer = DataFrame(data, index=index)\n renamed = renamer.rename(index={\"foo\": \"bar\", \"bar\": \"foo\"})\n tm.assert_index_equal(renamed.index, Index([\"bar\", \"foo\"], name=\"name\"))\n assert renamed.index.name == renamer.index.name\n\n @pytest.mark.parametrize(\n \"args,kwargs\",\n [\n ((ChainMap({\"A\": \"a\"}, {\"B\": \"b\"}),), dict(axis=\"columns\")),\n ((), dict(columns=ChainMap({\"A\": \"a\"}, {\"B\": \"b\"}))),\n ],\n )\n def test_rename_chainmap(self, args, kwargs):\n # see gh-23859\n colAData = range(1, 11)\n colBdata = np.random.randn(10)\n\n df = DataFrame({\"A\": colAData, \"B\": colBdata})\n result = df.rename(*args, **kwargs)\n\n expected = DataFrame({\"a\": colAData, \"b\": colBdata})\n tm.assert_frame_equal(result, expected)\n\n def test_rename_multiindex(self):\n\n tuples_index = [(\"foo1\", \"bar1\"), (\"foo2\", \"bar2\")]\n tuples_columns = [(\"fizz1\", \"buzz1\"), (\"fizz2\", \"buzz2\")]\n index = MultiIndex.from_tuples(tuples_index, names=[\"foo\", \"bar\"])\n columns = MultiIndex.from_tuples(tuples_columns, names=[\"fizz\", \"buzz\"])\n df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)\n\n #\n # without specifying level -> across all levels\n\n renamed = df.rename(\n index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"},\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"},\n )\n new_index = MultiIndex.from_tuples(\n [(\"foo3\", \"bar1\"), (\"foo2\", \"bar3\")], names=[\"foo\", \"bar\"]\n )\n new_columns = MultiIndex.from_tuples(\n [(\"fizz3\", \"buzz1\"), (\"fizz2\", \"buzz3\")], names=[\"fizz\", \"buzz\"]\n )\n tm.assert_index_equal(renamed.index, new_index)\n tm.assert_index_equal(renamed.columns, new_columns)\n assert renamed.index.names == df.index.names\n assert renamed.columns.names == df.columns.names\n\n #\n # with specifying a level (GH13766)\n\n # dict\n new_columns = MultiIndex.from_tuples(\n [(\"fizz3\", \"buzz1\"), (\"fizz2\", \"buzz2\")], names=[\"fizz\", \"buzz\"]\n )\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=0)\n tm.assert_index_equal(renamed.columns, new_columns)\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=\"fizz\")\n tm.assert_index_equal(renamed.columns, new_columns)\n\n new_columns = MultiIndex.from_tuples(\n [(\"fizz1\", \"buzz1\"), (\"fizz2\", \"buzz3\")], names=[\"fizz\", \"buzz\"]\n )\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=1)\n tm.assert_index_equal(renamed.columns, new_columns)\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=\"buzz\")\n tm.assert_index_equal(renamed.columns, new_columns)\n\n # function\n func = str.upper\n new_columns = MultiIndex.from_tuples(\n [(\"FIZZ1\", \"buzz1\"), (\"FIZZ2\", \"buzz2\")], names=[\"fizz\", \"buzz\"]\n )\n renamed = df.rename(columns=func, level=0)\n tm.assert_index_equal(renamed.columns, new_columns)\n renamed = df.rename(columns=func, level=\"fizz\")\n tm.assert_index_equal(renamed.columns, new_columns)\n\n new_columns = MultiIndex.from_tuples(\n [(\"fizz1\", \"BUZZ1\"), (\"fizz2\", \"BUZZ2\")], names=[\"fizz\", \"buzz\"]\n )\n renamed = df.rename(columns=func, level=1)\n tm.assert_index_equal(renamed.columns, new_columns)\n renamed = df.rename(columns=func, level=\"buzz\")\n tm.assert_index_equal(renamed.columns, new_columns)\n\n # index\n new_index = MultiIndex.from_tuples(\n [(\"foo3\", \"bar1\"), (\"foo2\", \"bar2\")], names=[\"foo\", \"bar\"]\n )\n renamed = df.rename(index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"}, level=0)\n tm.assert_index_equal(renamed.index, new_index)\n\n def test_rename_nocopy(self, float_frame):\n renamed = float_frame.rename(columns={\"C\": \"foo\"}, copy=False)\n renamed[\"foo\"] = 1.0\n assert (float_frame[\"C\"] == 1.0).all()\n\n def test_rename_inplace(self, float_frame):\n float_frame.rename(columns={\"C\": \"foo\"})\n assert \"C\" in float_frame\n assert \"foo\" not in float_frame\n\n c_id = id(float_frame[\"C\"])\n float_frame = float_frame.copy()\n float_frame.rename(columns={\"C\": \"foo\"}, inplace=True)\n\n assert \"C\" not in float_frame\n assert \"foo\" in float_frame\n assert id(float_frame[\"foo\"]) != c_id\n\n def test_rename_bug(self):\n # GH 5344\n # rename set ref_locs, and set_index was not resetting\n df = DataFrame({0: [\"foo\", \"bar\"], 1: [\"bah\", \"bas\"], 2: [1, 2]})\n df = df.rename(columns={0: \"a\"})\n df = df.rename(columns={1: \"b\"})\n df = df.set_index([\"a\", \"b\"])\n df.columns = [\"2001-01-01\"]\n expected = DataFrame(\n [[1], [2]],\n index=MultiIndex.from_tuples(\n [(\"foo\", \"bah\"), (\"bar\", \"bas\")], names=[\"a\", \"b\"]\n ),\n columns=[\"2001-01-01\"],\n )\n tm.assert_frame_equal(df, expected)\n\n def test_rename_bug2(self):\n # GH 19497\n # rename was changing Index to MultiIndex if Index contained tuples\n\n df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=[\"a\"])\n df = df.rename({(1, 1): (5, 4)}, axis=\"index\")\n expected = DataFrame(\n data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=[\"a\"]\n )\n tm.assert_frame_equal(df, expected)\n\n def test_rename_errors_raises(self):\n df = DataFrame(columns=[\"A\", \"B\", \"C\", \"D\"])\n with pytest.raises(KeyError, match=\"'E'] not found in axis\"):\n df.rename(columns={\"A\": \"a\", \"E\": \"e\"}, errors=\"raise\")\n\n @pytest.mark.parametrize(\n \"mapper, errors, expected_columns\",\n [\n ({\"A\": \"a\", \"E\": \"e\"}, \"ignore\", [\"a\", \"B\", \"C\", \"D\"]),\n ({\"A\": \"a\"}, \"raise\", [\"a\", \"B\", \"C\", \"D\"]),\n (str.lower, \"raise\", [\"a\", \"b\", \"c\", \"d\"]),\n ],\n )\n def test_rename_errors(self, mapper, errors, expected_columns):\n # GH 13473\n # rename now works with errors parameter\n df = DataFrame(columns=[\"A\", \"B\", \"C\", \"D\"])\n result = df.rename(columns=mapper, errors=errors)\n expected = DataFrame(columns=expected_columns)\n tm.assert_frame_equal(result, expected)\n\n def test_rename_objects(self, float_string_frame):\n renamed = float_string_frame.rename(columns=str.upper)\n\n assert \"FOO\" in renamed\n assert \"foo\" not in renamed\n\n def test_rename_axis_style(self):\n # https://github.com/pandas-dev/pandas/issues/12392\n df = DataFrame({\"A\": [1, 2], \"B\": [1, 2]}, index=[\"X\", \"Y\"])\n expected = DataFrame({\"a\": [1, 2], \"b\": [1, 2]}, index=[\"X\", \"Y\"])\n\n result = df.rename(str.lower, axis=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.rename(str.lower, axis=\"columns\")\n tm.assert_frame_equal(result, expected)\n\n result = df.rename({\"A\": \"a\", \"B\": \"b\"}, axis=1)\n tm.assert_frame_equal(result, expected)\n\n result = df.rename({\"A\": \"a\", \"B\": \"b\"}, axis=\"columns\")\n tm.assert_frame_equal(result, expected)\n\n # Index\n expected = DataFrame({\"A\": [1, 2], \"B\": [1, 2]}, index=[\"x\", \"y\"])\n result = df.rename(str.lower, axis=0)\n tm.assert_frame_equal(result, expected)\n\n result = df.rename(str.lower, axis=\"index\")\n tm.assert_frame_equal(result, expected)\n\n result = df.rename({\"X\": \"x\", \"Y\": \"y\"}, axis=0)\n tm.assert_frame_equal(result, expected)\n\n result = df.rename({\"X\": \"x\", \"Y\": \"y\"}, axis=\"index\")\n tm.assert_frame_equal(result, expected)\n\n result = df.rename(mapper=str.lower, axis=\"index\")\n tm.assert_frame_equal(result, expected)\n\n def test_rename_mapper_multi(self):\n df = DataFrame({\"A\": [\"a\", \"b\"], \"B\": [\"c\", \"d\"], \"C\": [1, 2]}).set_index(\n [\"A\", \"B\"]\n )\n result = df.rename(str.upper)\n expected = df.rename(index=str.upper)\n tm.assert_frame_equal(result, expected)\n\n def test_rename_positional_named(self):\n # https://github.com/pandas-dev/pandas/issues/12392\n df = DataFrame({\"a\": [1, 2], \"b\": [1, 2]}, index=[\"X\", \"Y\"])\n result = df.rename(index=str.lower, columns=str.upper)\n expected = DataFrame({\"A\": [1, 2], \"B\": [1, 2]}, index=[\"x\", \"y\"])\n tm.assert_frame_equal(result, expected)\n\n def test_rename_axis_style_raises(self):\n # see gh-12392\n df = DataFrame({\"A\": [1, 2], \"B\": [1, 2]}, index=[\"0\", \"1\"])\n\n # Named target and axis\n over_spec_msg = \"Cannot specify both 'axis' and any of 'index' or 'columns'\"\n with pytest.raises(TypeError, match=over_spec_msg):\n df.rename(index=str.lower, axis=1)\n\n with pytest.raises(TypeError, match=over_spec_msg):\n df.rename(index=str.lower, axis=\"columns\")\n\n with pytest.raises(TypeError, match=over_spec_msg):\n df.rename(columns=str.lower, axis=\"columns\")\n\n with pytest.raises(TypeError, match=over_spec_msg):\n df.rename(index=str.lower, axis=0)\n\n # Multiple targets and axis\n with pytest.raises(TypeError, match=over_spec_msg):\n df.rename(str.lower, index=str.lower, axis=\"columns\")\n\n # Too many targets\n over_spec_msg = \"Cannot specify both 'mapper' and any of 'index' or 'columns'\"\n with pytest.raises(TypeError, match=over_spec_msg):\n df.rename(str.lower, index=str.lower, columns=str.lower)\n\n # Duplicates\n with pytest.raises(TypeError, match=\"multiple values\"):\n df.rename(id, mapper=id)\n\n def test_rename_positional_raises(self):\n # GH 29136\n df = DataFrame(columns=[\"A\", \"B\"])\n msg = r\"rename\\(\\) takes from 1 to 2 positional arguments\"\n\n with pytest.raises(TypeError, match=msg):\n df.rename(None, str.lower)\n\n def test_rename_no_mappings_raises(self):\n # GH 29136\n df = DataFrame([[1]])\n msg = \"must pass an index to rename\"\n with pytest.raises(TypeError, match=msg):\n df.rename()\n\n with pytest.raises(TypeError, match=msg):\n df.rename(None, index=None)\n\n with pytest.raises(TypeError, match=msg):\n df.rename(None, columns=None)\n\n with pytest.raises(TypeError, match=msg):\n df.rename(None, columns=None, index=None)\n\n def test_rename_mapper_and_positional_arguments_raises(self):\n # GH 29136\n df = DataFrame([[1]])\n msg = \"Cannot specify both 'mapper' and any of 'index' or 'columns'\"\n with pytest.raises(TypeError, match=msg):\n df.rename({}, index={})\n\n with pytest.raises(TypeError, match=msg):\n df.rename({}, columns={})\n\n with pytest.raises(TypeError, match=msg):\n df.rename({}, columns={}, index={})\n",
"\"\"\"\nRudimentary Apache Arrow-backed ExtensionArray.\n\nAt the moment, just a boolean array / type is implemented.\nEventually, we'll want to parametrize the type and support\nmultiple dtypes. Not all methods are implemented yet, and the\ncurrent implementation is not efficient.\n\"\"\"\nimport copy\nimport itertools\nfrom typing import Type\n\nimport numpy as np\nimport pyarrow as pa\n\nimport pandas as pd\nfrom pandas.api.extensions import (\n ExtensionArray,\n ExtensionDtype,\n register_extension_dtype,\n take,\n)\n\n\n@register_extension_dtype\nclass ArrowBoolDtype(ExtensionDtype):\n\n type = np.bool_\n kind = \"b\"\n name = \"arrow_bool\"\n na_value = pa.NULL\n\n @classmethod\n def construct_array_type(cls) -> Type[\"ArrowBoolArray\"]:\n \"\"\"\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n \"\"\"\n return ArrowBoolArray\n\n @property\n def _is_boolean(self) -> bool:\n return True\n\n\n@register_extension_dtype\nclass ArrowStringDtype(ExtensionDtype):\n\n type = str\n kind = \"U\"\n name = \"arrow_string\"\n na_value = pa.NULL\n\n @classmethod\n def construct_array_type(cls) -> Type[\"ArrowStringArray\"]:\n \"\"\"\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n \"\"\"\n return ArrowStringArray\n\n\nclass ArrowExtensionArray(ExtensionArray):\n @classmethod\n def from_scalars(cls, values):\n arr = pa.chunked_array([pa.array(np.asarray(values))])\n return cls(arr)\n\n @classmethod\n def from_array(cls, arr):\n assert isinstance(arr, pa.Array)\n return cls(pa.chunked_array([arr]))\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=False):\n return cls.from_scalars(scalars)\n\n def __repr__(self):\n return f\"{type(self).__name__}({repr(self._data)})\"\n\n def __getitem__(self, item):\n if pd.api.types.is_scalar(item):\n return self._data.to_pandas()[item]\n else:\n vals = self._data.to_pandas()[item]\n return type(self).from_scalars(vals)\n\n def __len__(self):\n return len(self._data)\n\n def astype(self, dtype, copy=True):\n # needed to fix this astype for the Series constructor.\n if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:\n if copy:\n return self.copy()\n return self\n return super().astype(dtype, copy)\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def nbytes(self):\n return sum(\n x.size\n for chunk in self._data.chunks\n for x in chunk.buffers()\n if x is not None\n )\n\n def isna(self):\n nas = pd.isna(self._data.to_pandas())\n return type(self).from_scalars(nas)\n\n def take(self, indices, allow_fill=False, fill_value=None):\n data = self._data.to_pandas()\n\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n\n result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)\n return self._from_sequence(result, dtype=self.dtype)\n\n def copy(self):\n return type(self)(copy.copy(self._data))\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n chunks = list(itertools.chain.from_iterable(x._data.chunks for x in to_concat))\n arr = pa.chunked_array(chunks)\n return cls(arr)\n\n def __invert__(self):\n return type(self).from_scalars(~self._data.to_pandas())\n\n def _reduce(self, method, skipna=True, **kwargs):\n if skipna:\n arr = self[~self.isna()]\n else:\n arr = self\n\n try:\n op = getattr(arr, method)\n except AttributeError as err:\n raise TypeError from err\n return op(**kwargs)\n\n def any(self, axis=0, out=None):\n return self._data.to_pandas().any()\n\n def all(self, axis=0, out=None):\n return self._data.to_pandas().all()\n\n\nclass ArrowBoolArray(ArrowExtensionArray):\n def __init__(self, values):\n if not isinstance(values, pa.ChunkedArray):\n raise ValueError\n\n assert values.type == pa.bool_()\n self._data = values\n self._dtype = ArrowBoolDtype()\n\n\nclass ArrowStringArray(ArrowExtensionArray):\n def __init__(self, values):\n if not isinstance(values, pa.ChunkedArray):\n raise ValueError\n\n assert values.type == pa.string()\n self._data = values\n self._dtype = ArrowStringDtype()\n"
] |
[
[
"pandas.core.common.any_not_none",
"matplotlib.pyplot.cm.get_cmap",
"pandas.core.dtypes.common.is_float",
"pandas.compat._optional.import_optional_dependency",
"matplotlib.colors.Normalize",
"pandas.api.types.is_dict_like",
"pandas._config.get_option",
"pandas.io.formats.excel.ExcelFormatter",
"pandas.api.types.is_list_like",
"pandas.core.indexing._maybe_numeric_slice",
"pandas.core.common.pipe",
"pandas.util._decorators.doc",
"pandas.isna",
"pandas.core.indexing._non_reducing_slice",
"numpy.where",
"matplotlib.colors.rgb2hex"
],
[
"pandas._testing.assert_almost_equal",
"pandas.concat",
"pandas.Series",
"pandas.MultiIndex",
"pandas.Index",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas._testing.assert_series_equal",
"numpy.random.randn",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.array",
"pandas._testing.assert_frame_equal"
],
[
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.Index",
"pandas.DataFrame",
"numpy.random.randn",
"pandas._testing.assert_frame_equal",
"pandas._testing.assert_index_equal"
],
[
"numpy.asarray",
"pandas.api.types.is_scalar",
"pandas.api.extensions.take"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jiwoncpark/magnify
|
[
"04421d43b9f5340989e8614d961ac9f5988bde0c"
] |
[
"magnify/attentive_neural_process/modules/layers.py"
] |
[
"\"\"\"Modified from the great implementation in\n\nhttps://github.com/3springs/attentive-neural-processes/blob/af431a267bad309b2d5698f25551986e2c4e7815/neural_processes/modules/modules.py\n\n\"\"\"\n\nfrom torch import nn\nfrom torch import Tensor\nimport torch.nn.functional as F\n\n\nclass MCDropout(nn.Dropout):\n def forward(self, input: Tensor) -> Tensor:\n return F.dropout(input, self.p, True, self.inplace)\n\n\nclass MCDropout2d(nn.Dropout2d):\n def forward(self, input: Tensor) -> Tensor:\n return F.dropout2d(input, self.p, True, self.inplace)\n\n\nclass LSTMBlock(nn.Module):\n def __init__(\n self, in_channels, out_channels, dropout=0, batchnorm=False, bias=False, num_layers=1\n ):\n super().__init__()\n self._lstm = nn.LSTM(\n input_size=in_channels,\n hidden_size=out_channels,\n num_layers=num_layers,\n dropout=dropout,\n batch_first=True,\n bias=bias\n )\n\n def forward(self, x):\n return self._lstm(x)[0]\n\n\nclass BatchNormSequence(nn.Module):\n \"\"\"Applies batch norm on features of a batch first sequence.\"\"\"\n def __init__(\n self, out_channels, **kwargs\n ):\n super().__init__()\n self.norm = nn.BatchNorm1d(out_channels, **kwargs)\n\n def forward(self, x):\n # x.shape is (Batch, Sequence, Channels)\n # Now we want to apply batchnorm and dropout to the channels. So we put it in shape\n # (Batch, Channels, Sequence) which is what BatchNorm1d expects\n x = x.permute(0, 2, 1)\n x = self.norm(x)\n return x.permute(0, 2, 1)\n\n\nclass NPBlockRelu2d(nn.Module):\n \"\"\"Block for Neural Processes.\"\"\"\n\n def __init__(\n self, in_channels, out_channels, dropout=0, batchnorm=False, bias=False\n ):\n super().__init__()\n self.linear = nn.Linear(in_channels, out_channels, bias=bias)\n self.act = nn.ReLU()\n self.dropout2d = MCDropout2d(dropout)\n self.norm = nn.BatchNorm2d(out_channels) if batchnorm else False\n\n def forward(self, x):\n # x.shape is (Batch, Sequence, Channels)\n # We pass a linear over it which operates on the Channels\n x = self.act(self.linear(x))\n\n # Now we want to apply batchnorm and dropout to the channels. So we put it in shape\n # (Batch, Channels, Sequence, None) so we can use Dropout2d & BatchNorm2d\n x = x.permute(0, 2, 1)[:, :, :, None]\n\n if self.norm:\n x = self.norm(x)\n\n x = self.dropout2d(x)\n return x[:, :, :, 0].permute(0, 2, 1)\n\n\nclass BatchMLP(nn.Module):\n \"\"\"Apply MLP to the final axis of a 3D tensor (reusing already defined MLPs).\n\n Args:\n input: input tensor of shape [B,n,d_in].\n output_sizes: An iterable containing the output sizes of the MLP as defined\n in `basic.Linear`.\n Returns:\n tensor of shape [B,n,d_out] where d_out=output_size\n \"\"\"\n\n def __init__(\n self, input_size, output_size, num_layers=2, dropout=0, batchnorm=False\n ):\n super().__init__()\n self.input_size = input_size\n self.output_size = output_size\n self.num_layers = num_layers\n\n self.initial = NPBlockRelu2d(\n input_size, output_size, dropout=dropout, batchnorm=batchnorm\n )\n self.encoder = nn.Sequential(\n *[\n NPBlockRelu2d(\n output_size, output_size, dropout=dropout, batchnorm=batchnorm\n )\n for _ in range(num_layers - 2)\n ]\n )\n self.final = nn.Linear(output_size, output_size)\n\n def forward(self, x):\n x = self.initial(x)\n x = self.encoder(x)\n return self.final(x)\n\n"
] |
[
[
"torch.nn.functional.dropout2d",
"torch.nn.BatchNorm1d",
"torch.nn.LSTM",
"torch.nn.functional.dropout",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liamcli/nasbench-1shot1
|
[
"e369bd77c69fd03070e8248f39e4f57cd901d1f3",
"e369bd77c69fd03070e8248f39e4f57cd901d1f3",
"e369bd77c69fd03070e8248f39e4f57cd901d1f3"
] |
[
"nasbench_analysis/architecture_inductive_bias/train.py",
"bohb_default/plots/util.py",
"optimizers/edarts/architect.py"
] |
[
"import argparse\nimport glob\nimport json\nimport logging\nimport os\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.utils\nimport torchvision.datasets as dset\nfrom torch.autograd import Variable\n\nfrom nasbench_analysis import eval_darts_one_shot_model_in_nasbench as naseval\nfrom nasbench_analysis.architecture_inductive_bias.model_search import NetworkIndependent as Network\nfrom nasbench_analysis.search_spaces.search_space_1 import SearchSpace1\nfrom nasbench_analysis.search_spaces.search_space_2 import SearchSpace2\nfrom nasbench_analysis.search_spaces.search_space_3 import SearchSpace3\nfrom optimizers.darts import utils\nfrom optimizers.darts.architect import Architect\nfrom optimizers.darts.genotypes import PRIMITIVES\n\nparser = argparse.ArgumentParser(\"cifar\")\nparser.add_argument('--data', type=str, default='../data', help='location of the darts corpus')\nparser.add_argument('--batch_size', type=int, default=96, help='batch size')\nparser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')\nparser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum')\nparser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')\nparser.add_argument('--report_freq', type=float, default=50, help='report frequency')\nparser.add_argument('--gpu', type=int, default=0, help='gpu device id')\nparser.add_argument('--epochs', type=int, default=20, help='num of training epochs')\nparser.add_argument('--init_channels', type=int, default=16, help='num of init channels')\nparser.add_argument('--num_linear_layers', type=int, default=2, help='num of linear layers')\nparser.add_argument('--layers', type=int, default=9, help='total number of layers')\nparser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')\nparser.add_argument('--cutout', action='store_true', default=False, help='use cutout')\nparser.add_argument('--cutout_length', type=int, default=16, help='cutout length')\nparser.add_argument('--cutout_prob', type=float, default=1.0, help='cutout probability')\nparser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')\nparser.add_argument('--save', type=str, default='EXP', help='experiment name')\nparser.add_argument('--seed', type=int, default=2, help='random_ws seed')\nparser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')\nparser.add_argument('--train_portion', type=float, default=0.5, help='portion of training darts')\nparser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')\nparser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')\nparser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')\nparser.add_argument('--arch_idx', type=int, help='weight decay for arch encoding')\nparser.add_argument('--output_weights', type=bool, default=True, help='Whether to use weights on the output nodes')\nparser.add_argument('--search_space', choices=['1', '2', '3'], default='1')\nparser.add_argument('--debug', action='store_true', default=False, help='run only for some batches')\nparser.add_argument('--warm_start_epochs', type=int, default=0,\n help='Warm start one-shot model before starting architecture updates.')\nargs = parser.parse_args()\n\nargs.save = 'experiments/inductive_bias_same_models/search_space_{}/search-{}-{}-{}-{}'.format(args.search_space,\n args.save,\n time.strftime(\n \"%Y%m%d-%H%M%S\"),\n args.seed,\n args.search_space)\nutils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))\n\n# Dump the config of the run\nwith open(os.path.join(args.save, 'config.json'), 'w') as fp:\n json.dump(args.__dict__, fp)\n\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\nfh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\n\nCIFAR_CLASSES = 10\n\n\ndef main():\n # Select the search space to search in\n if args.search_space == '1':\n search_space = SearchSpace1()\n elif args.search_space == '2':\n search_space = SearchSpace2()\n elif args.search_space == '3':\n search_space = SearchSpace3()\n else:\n raise ValueError('Unknown search space')\n\n if not torch.cuda.is_available():\n logging.info('no gpu device available')\n sys.exit(1)\n\n np.random.seed(args.seed)\n torch.cuda.set_device(args.gpu)\n cudnn.benchmark = True\n torch.manual_seed(args.seed)\n cudnn.enabled = True\n torch.cuda.manual_seed(args.seed)\n logging.info('gpu device = %d' % args.gpu)\n logging.info(\"args = %s\", args)\n\n criterion = nn.CrossEntropyLoss()\n criterion = criterion.cuda()\n model = Network(args.num_linear_layers, args.init_channels, CIFAR_CLASSES, args.layers, criterion,\n output_weights=args.output_weights, steps=search_space.num_intermediate_nodes,\n search_space=search_space)\n model = model.cuda()\n logging.info(\"param size = %fMB\", utils.count_parameters_in_MB(model))\n\n optimizer = torch.optim.SGD(\n model.parameters(),\n args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n train_transform, valid_transform = utils._data_transforms_cifar10(args)\n train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)\n\n num_train = len(train_data)\n indices = list(range(num_train))\n split = int(np.floor(args.train_portion * num_train))\n\n train_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),\n pin_memory=True)\n\n valid_queue = torch.utils.data.DataLoader(\n train_data, batch_size=args.batch_size,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),\n pin_memory=True)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, float(args.epochs), eta_min=args.learning_rate_min)\n\n architect = Architect(model, args)\n # Read a random sample of architectures\n archs = pickle.load(open(\n '/home/siemsj/projects/darts_weight_sharing_analysis/nasbench_analysis/architecture_inductive_bias/sampled_architectures_from_search_space_3.obj',\n 'rb'))\n arch = archs[args.arch_idx]\n arch_parameters = get_weights_from_arch(arch, model)\n model._arch_parameters = arch_parameters\n try:\n for epoch in range(args.epochs):\n scheduler.step()\n lr = scheduler.get_lr()[0]\n # increase the cutout probability linearly throughout search\n train_transform.transforms[-1].cutout_prob = args.cutout_prob * epoch / (args.epochs - 1)\n logging.info('epoch %d lr %e cutout_prob %e', epoch, lr,\n train_transform.transforms[-1].cutout_prob)\n\n # Save the one shot model architecture weights for later analysis\n arch_filename = os.path.join(args.save, 'one_shot_architecture_{}.obj'.format(epoch))\n with open(arch_filename, 'wb') as filehandler:\n numpy_tensor_list = []\n for tensor in model.arch_parameters():\n numpy_tensor_list.append(tensor.detach().cpu().numpy())\n pickle.dump(numpy_tensor_list, filehandler)\n\n # Save the entire one-shot-model\n # filepath = os.path.join(args.save, 'one_shot_model_{}.obj'.format(epoch))\n # torch.save(model.state_dict(), filepath)\n\n logging.info('architecture', numpy_tensor_list)\n\n # training\n train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch)\n logging.info('train_acc %f', train_acc)\n\n # validation\n valid_acc, valid_obj = infer(valid_queue, model, criterion)\n logging.info('valid_acc %f', valid_acc)\n\n utils.save(model, os.path.join(args.save, 'weights.pt'))\n\n logging.info('STARTING EVALUATION')\n test, valid, runtime, params = naseval.eval_one_shot_model(config=args.__dict__,\n model=arch_filename)\n index = np.random.choice(list(range(3)))\n logging.info('TEST ERROR: %.3f | VALID ERROR: %.3f | RUNTIME: %f | PARAMS: %d'\n % (test[index],\n valid[index],\n runtime[index],\n params[index])\n )\n except Exception as e:\n logging.exception('message')\n\n\ndef get_weights_from_arch(arch, model):\n adjacency_matrix, node_list = arch\n num_ops = len(PRIMITIVES)\n\n # Assign the sampled ops to the mixed op weights.\n # These are not optimized\n alphas_mixed_op = Variable(torch.zeros(model._steps, num_ops).cuda(), requires_grad=False)\n for idx, op in enumerate(node_list):\n alphas_mixed_op[idx][PRIMITIVES.index(op)] = 1\n\n # Set the output weights\n alphas_output = Variable(torch.zeros(1, model._steps + 1).cuda(), requires_grad=False)\n for idx, label in enumerate(list(adjacency_matrix[:, -1][:-1])):\n alphas_output[0][idx] = label\n\n # Initialize the weights for the inputs to each choice block.\n if type(model.search_space) == SearchSpace1:\n begin = 3\n else:\n begin = 2\n alphas_inputs = [Variable(torch.zeros(1, n_inputs).cuda(), requires_grad=False) for n_inputs in\n range(begin, model._steps + 1)]\n for alpha_input in alphas_inputs:\n connectivity_pattern = list(adjacency_matrix[:alpha_input.shape[1], alpha_input.shape[1]])\n for idx, label in enumerate(connectivity_pattern):\n alpha_input[0][idx] = label\n\n # Total architecture parameters\n arch_parameters = [\n alphas_mixed_op,\n alphas_output,\n *alphas_inputs\n ]\n return arch_parameters\n\n\ndef train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n\n for step, (input, target) in enumerate(train_queue):\n model.train()\n n = input.size(0)\n\n input = input.cuda()\n target = target.cuda(non_blocking=True)\n\n # get a minibatch from the search queue with replacement\n try:\n input_search, target_search = next(valid_queue_iter)\n except:\n valid_queue_iter = iter(valid_queue)\n input_search, target_search = next(valid_queue_iter)\n\n optimizer.zero_grad()\n logits = model(input, discrete=True)\n loss = criterion(logits, target)\n\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)\n optimizer.step()\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n objs.update(loss.data.item(), n)\n top1.update(prec1.data.item(), n)\n top5.update(prec5.data.item(), n)\n\n if step % args.report_freq == 0:\n logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)\n if args.debug:\n break\n\n return top1.avg, objs.avg\n\n\ndef infer(valid_queue, model, criterion):\n objs = utils.AvgrageMeter()\n top1 = utils.AvgrageMeter()\n top5 = utils.AvgrageMeter()\n model.eval()\n\n for step, (input, target) in enumerate(valid_queue):\n input = input.cuda()\n target = target.cuda(non_blocking=True)\n\n logits = model(input, discrete=True)\n loss = criterion(logits, target)\n\n prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))\n n = input.size(0)\n objs.update(loss.data.item(), n)\n top1.update(prec1.data.item(), n)\n top5.update(prec5.data.item(), n)\n\n if step % args.report_freq == 0:\n logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)\n if args.debug:\n break\n\n return top1.avg, objs.avg\n\n\nif __name__ == '__main__':\n main()\n",
"import os\nimport pickle\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython import embed\n\nfrom optimizers.utils_1 import Model_1, Architecture_1\nfrom optimizers.utils import Model, Architecture\n\ncolors={\n 'BOHB-PC-DARTS': 'darkorange',\n 'BOHB-DARTS': 'dodgerblue',\n 'BOHB-GDAS' : 'forestgreen',\n 'RE': 'crimson',\n\t\t'RS': 'darkorchid',\n\t\t'RL': 'sienna',\n\t\t'TPE': 'deepskyblue',\n 'SMAC': 'violet',\n 'HB': 'darkgray',\n 'BOHB': 'gold'\n}\n\nmarkers={\n 'BOHB-DARTS': '^',\n 'BOHB-PC-DARTS': 'v',\n 'BOHB-GDAS' : 'x',\n 'RS': 'D',\n\t\t'RE': 'o',\n\t\t'RL': 's',\n\t\t'SMAC': 'h',\n 'HB': '>',\n 'BOHB': '*',\n 'TPE': '<'\n}\n\n\ndef get_incumbent(losses, time_stamps):\n return_dict = {'time_stamps': [],\n 'losses': [],\n }\n\n current_incumbent = float('inf')\n incumbent_budget = -float('inf')\n\n for l, t in zip(losses, time_stamps):\n if l < current_incumbent:\n current_incumbent = l\n return_dict['losses'].append(l)\n return_dict['time_stamps'].append(t)\n else:\n return_dict['losses'].append(return_dict['losses'][-1])\n return_dict['time_stamps'].append(t)\n return return_dict.values()\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m,\n 'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,\n seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [1 - x.test_accuracy - global_min for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i+1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array(\n [np.sum(costs[:i+1]) for i in range(len(costs))]\n )\n n = len(np.where(costs <= 280*108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n #embed()\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n\n all_trajectories[m] = {\n 'time_stamps': np.array(df.index),\n 'losses': np.array(df.T)\n }\n\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n\t# merge all tracjectories keeping all time steps\n\tdf = pd.DataFrame().join(pandas_data_frames, how='outer')\n\n\t# forward fill to make it a propper step function\n\tdf=df.fillna(method='ffill')\n\n\tif default_value is None:\n\t# backward fill to replace the NaNs for the early times by\n\t# the performance of a random configuration\n\t\tdf=df.fillna(method='bfill')\n\telse:\n\t\tdf=df.fillna(default_value)\n\n\treturn(df)\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10,\n xscale='log', xlabel='wall clock time [s]', yscale='log',\n ylabel=None, legend_loc = 'best', xlim=None, ylim=None,\n plot_mean=True, labels={}, markers=markers, colors=colors,\n figsize=(16,9)):\n\n if regret:\n if ylabel is None: ylabel = 'regret'\n\t\t# find lowest performance in the data to update incumbent\n\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:,-1].min(), incumbent)\n print('incumbent value: ', incumbent)\n\n for m,tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if (trajectory.shape[0] == 0): continue\n if regret: trajectory -= incumbent\n\n sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory,axis=0)\n sem *= 1.253\n\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,\n color=colors[m], alpha=0.2)\n\n ax.plot(tr['time_stamps'],mean,\n label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,\n marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))\n\n if axins is not None:\n axins.plot(tr['time_stamps'],mean,\n label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,\n marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))\n\n return (fig, ax)\n",
"import numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nclass AdaptiveLR(object):\n def __init__(self, base_lrs, min_lr, max_lr):\n self.base_lrs = base_lrs\n self.min_lr = min_lr\n self.max_lr = max_lr\n self.arch_grad_norms = np.zeros(len(base_lrs))\n\n def update_norm_get_lr(self, i, p):\n value = torch.norm(p, p=float('inf')).item()\n self.arch_grad_norms[i] += value**2\n lr = self.base_lrs[i] / np.sqrt(max(1, self.arch_grad_norms[i]))\n return max(self.min_lr, min(lr, self.max_lr))\n\nclass History:\n \"\"\"\n Data class for saving architecture search history. \n \"\"\"\n\n def __init__(\n self,\n model,\n architect,\n to_save=(\"alphas\", \"edges\")\n ):\n\n self.model = model\n self.architect = architect\n self.to_save = to_save\n self.dict = {}\n\n for field in to_save:\n self.dict[field] = []\n\n def update_history(self):\n for field in self.to_save:\n if field == \"alphas\":\n values = self.architect.alphas.data.cpu().numpy()\n self.dict[\"alphas\"].append(values)\n elif field == \"edges\":\n values = [v.data.cpu().numpy() for v in self.architect.edges]\n values.append(self.architect.output_weights.data.cpu().numpy())\n self.dict[\"edges\"] .append(values)\n\ndef _concat(xs):\n return torch.cat([x.view(-1) for x in xs])\n\ndef normalize(x, dim):\n x = torch.clamp(x, min=1e-5)\n return x / x.sum(dim=dim, keepdim=True)\n\nclass Architect(object):\n\n def __init__(self, model, args):\n self.alpha_lr = args.arch_learning_rate \n self.edge_lr = args.edge_learning_rate \n self.model = model\n self.alphas = model._arch_parameters[0]\n self.output_weights = model._arch_parameters[1]\n self.edges = model._arch_parameters[2:]\n self.history = History(model, self)\n base_lrs = [self.alpha_lr] + [self.edge_lr] * (len(self.edges) + 1)\n self.adapt_lr = False\n self.adaptive_lr = AdaptiveLR(base_lrs, 0.0001, 0.1)\n self.steps = 0\n self.arch_weight_decay = args.arch_weight_decay\n\n def _train_loss(self, model, input, target):\n return model._loss(input, target)\n\n def _val_loss(self, model, input, target):\n return model._loss(input, target)\n\n\n def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):\n self.steps += 1\n self.model.zero_grad()\n self._backward_step(input_valid, target_valid)\n\n for p in self.model._arch_parameters[0:1]:\n if self.adapt_lr:\n norm_inf = max(torch.norm(p.grad.data, p=float('inf'), dim=-1))\n lr = self.alpha_lr / norm_inf\n #lr = self.adaptive_lr.update_norm_get_lr(0, p.grad.data)\n else:\n lr = self.alpha_lr\n if self.steps % 100==0:\n print('operation lr: {}'.format(lr))\n p.data.mul_(torch.exp(-lr * p.grad.data))\n p.data = normalize(p.data, -1)\n p.grad.detach_()\n p.grad.zero_()\n\n i = 1\n for p in self.model._arch_parameters[1:]:\n if self.adapt_lr:\n #lr = self.adaptive_lr.update_norm_get_lr(i, p.grad.data)\n norm_inf = torch.norm(p.grad.data, p=float('inf'), dim=-1)\n lr = self.edge_lr / norm_inf.item()\n else:\n lr = self.edge_lr\n if self.steps % 100==0:\n print('edge lr {}: {}'.format(i, lr))\n i += 1\n p.data.mul_(torch.exp(-lr * p.grad.data))\n p.data = normalize(p.data, -1)\n p.grad.detach_()\n p.grad.zero_()\n\n def _backward_step(self, input_valid, target_valid):\n entropic_reg = 0\n for p in self.model._arch_parameters:\n entropic_reg += torch.sum(p * torch.log(p/(1/p.size()[1])))\n loss = self._val_loss(self.model, input_valid, target_valid) + self.arch_weight_decay * entropic_reg\n loss = self._val_loss(self.model, input_valid, target_valid)\n loss.backward()\n\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.set_device",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"torch.zeros",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.cuda.is_available",
"numpy.floor"
],
[
"numpy.median",
"pandas.DataFrame",
"numpy.copy",
"numpy.array",
"numpy.where",
"numpy.sum"
],
[
"torch.exp",
"torch.clamp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Cangonin/audiomentations
|
[
"fd1c0fd9bcfb9f62fa961938191e13d050752450",
"fd1c0fd9bcfb9f62fa961938191e13d050752450",
"fd1c0fd9bcfb9f62fa961938191e13d050752450",
"fd1c0fd9bcfb9f62fa961938191e13d050752450"
] |
[
"audiomentations/spec_augmentations/spec_frequency_mask.py",
"audiomentations/augmentations/add_background_noise.py",
"audiomentations/augmentations/frequency_mask.py",
"audiomentations/augmentations/high_shelf_filter.py"
] |
[
"import random\n\nimport numpy as np\n\nfrom audiomentations.core.transforms_interface import BaseSpectrogramTransform\n\n\nclass SpecFrequencyMask(BaseSpectrogramTransform):\n \"\"\"\n Mask a set of frequencies in a spectrogram, à la Google AI SpecAugment. This type of data\n augmentation has proved to make speech recognition models more robust.\n\n The masked frequencies can be replaced with either the mean of the original values or a\n given constant (e.g. zero).\n \"\"\"\n\n supports_multichannel = True\n\n def __init__(\n self,\n min_mask_fraction: float = 0.03,\n max_mask_fraction: float = 0.25,\n fill_mode: str = \"constant\",\n fill_constant: float = 0.0,\n p: float = 0.5,\n ):\n super().__init__(p)\n self.min_mask_fraction = min_mask_fraction\n self.max_mask_fraction = max_mask_fraction\n assert fill_mode in (\"mean\", \"constant\")\n self.fill_mode = fill_mode\n self.fill_constant = fill_constant\n\n def randomize_parameters(self, magnitude_spectrogram):\n super().randomize_parameters(magnitude_spectrogram)\n if self.parameters[\"should_apply\"]:\n num_frequency_bins = magnitude_spectrogram.shape[0]\n min_frequencies_to_mask = int(\n round(self.min_mask_fraction * num_frequency_bins)\n )\n max_frequencies_to_mask = int(\n round(self.max_mask_fraction * num_frequency_bins)\n )\n num_frequencies_to_mask = random.randint(\n min_frequencies_to_mask, max_frequencies_to_mask\n )\n self.parameters[\"start_frequency_index\"] = random.randint(\n 0, num_frequency_bins - num_frequencies_to_mask\n )\n self.parameters[\"end_frequency_index\"] = (\n self.parameters[\"start_frequency_index\"] + num_frequencies_to_mask\n )\n\n def apply(self, magnitude_spectrogram):\n if self.fill_mode == \"mean\":\n fill_value = np.mean(\n magnitude_spectrogram[\n self.parameters[\"start_frequency_index\"] : self.parameters[\n \"end_frequency_index\"\n ]\n ]\n )\n else:\n # self.fill_mode == \"constant\"\n fill_value = self.fill_constant\n magnitude_spectrogram = magnitude_spectrogram.copy()\n magnitude_spectrogram[\n self.parameters[\"start_frequency_index\"] : self.parameters[\n \"end_frequency_index\"\n ]\n ] = fill_value\n return magnitude_spectrogram\n\n",
"import functools\nimport random\nimport warnings\n\nimport numpy as np\n\nfrom audiomentations.core.audio_loading_utils import load_sound_file\nfrom audiomentations.core.transforms_interface import BaseWaveformTransform\nfrom audiomentations.core.utils import (\n calculate_desired_noise_rms,\n calculate_rms,\n convert_decibels_to_amplitude_ratio,\n get_file_paths,\n)\n\n\nclass AddBackgroundNoise(BaseWaveformTransform):\n \"\"\"Mix in another sound, e.g. a background noise. Useful if your original sound is clean and\n you want to simulate an environment where background noise is present.\n\n Can also be used for mixup, as in https://arxiv.org/pdf/1710.09412.pdf\n\n A folder of (background noise) sounds to be mixed in must be specified. These sounds should\n ideally be at least as long as the input sounds to be transformed. Otherwise, the background\n sound will be repeated, which may sound unnatural.\n\n Note that the gain of the added noise is relative to the amount of signal in the input if the parameter noise_rms\n is set to \"relative\" (default option). This implies that if the input is completely silent, no noise will be added.\n\n Here are some examples of datasets that can be downloaded and used as background noise:\n\n * https://github.com/karolpiczak/ESC-50#download\n * https://github.com/microsoft/DNS-Challenge/\n \"\"\"\n\n def __init__(\n self,\n sounds_path=None,\n min_snr_in_db=3,\n max_snr_in_db=30,\n noise_rms=\"relative\",\n min_absolute_rms_in_db=-45,\n max_absolute_rms_in_db=-15,\n p=0.5,\n lru_cache_size=2,\n ):\n \"\"\"\n :param sounds_path: Path to a folder that contains sound files to randomly mix in. These\n files can be flac, mp3, ogg or wav.\n :param min_snr_in_db: Minimum signal-to-noise ratio in dB. Is only used if noise_rms is set to \"relative\"\n :param max_snr_in_db: Maximum signal-to-noise ratio in dB. Is only used if noise_rms is set to \"relative\"\n :param noise_rms: Defines how the background noise will be added to the audio input. If the chosen\n option is \"relative\", the rms of the added noise will be proportional to the rms of\n the input sound. If the chosen option is \"absolute\", the background noise will have\n a rms independent of the rms of the input audio file. The default option is \"relative\".\n :param min_absolute_rms_in_db: Is only used if noise_rms is set to \"absolute\". It is\n the minimum rms value in dB that the added noise can take. The lower the rms is, the\n lower will the added sound be.\n :param max_absolute_rms_in_db: Is only used if noise_rms is set to \"absolute\". It is\n the maximum rms value in dB that the added noise can take. Note that this value\n can not exceed 0.\n :param p: The probability of applying this transform\n :param lru_cache_size: Maximum size of the LRU cache for storing noise files in memory\n \"\"\"\n super().__init__(p)\n self.sound_file_paths = get_file_paths(sounds_path)\n self.sound_file_paths = [str(p) for p in self.sound_file_paths]\n\n assert min_absolute_rms_in_db <= max_absolute_rms_in_db <= 0\n assert min_snr_in_db <= max_snr_in_db\n assert len(self.sound_file_paths) > 0\n\n self.noise_rms = noise_rms\n self.min_snr_in_db = min_snr_in_db\n self.min_absolute_rms_in_db = min_absolute_rms_in_db\n self.max_absolute_rms_in_db = max_absolute_rms_in_db\n self.max_snr_in_db = max_snr_in_db\n self._load_sound = functools.lru_cache(maxsize=lru_cache_size)(\n AddBackgroundNoise._load_sound\n )\n\n @staticmethod\n def _load_sound(file_path, sample_rate):\n return load_sound_file(file_path, sample_rate)\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"snr_in_db\"] = random.uniform(\n self.min_snr_in_db, self.max_snr_in_db\n )\n self.parameters[\"rms_in_db\"] = random.uniform(\n self.min_absolute_rms_in_db, self.max_absolute_rms_in_db\n )\n self.parameters[\"noise_file_path\"] = random.choice(self.sound_file_paths)\n\n num_samples = len(samples)\n noise_sound, _ = self._load_sound(\n self.parameters[\"noise_file_path\"], sample_rate\n )\n\n num_noise_samples = len(noise_sound)\n min_noise_offset = 0\n max_noise_offset = max(0, num_noise_samples - num_samples - 1)\n self.parameters[\"noise_start_index\"] = random.randint(\n min_noise_offset, max_noise_offset\n )\n self.parameters[\"noise_end_index\"] = (\n self.parameters[\"noise_start_index\"] + num_samples\n )\n\n def apply(self, samples, sample_rate):\n noise_sound, _ = self._load_sound(\n self.parameters[\"noise_file_path\"], sample_rate\n )\n noise_sound = noise_sound[\n self.parameters[\"noise_start_index\"] : self.parameters[\"noise_end_index\"]\n ]\n\n noise_rms = calculate_rms(noise_sound)\n if noise_rms < 1e-9:\n warnings.warn(\n \"The file {} is too silent to be added as noise. Returning the input\"\n \" unchanged.\".format(self.parameters[\"noise_file_path\"])\n )\n return samples\n\n clean_rms = calculate_rms(samples)\n\n if self.noise_rms == \"relative\":\n desired_noise_rms = calculate_desired_noise_rms(\n clean_rms, self.parameters[\"snr_in_db\"]\n )\n\n # Adjust the noise to match the desired noise RMS\n noise_sound = noise_sound * (desired_noise_rms / noise_rms)\n\n if self.noise_rms == \"absolute\":\n desired_noise_rms_db = self.parameters[\"rms_in_db\"]\n desired_noise_rms_amp = convert_decibels_to_amplitude_ratio(\n desired_noise_rms_db\n )\n gain = desired_noise_rms_amp / noise_rms\n noise_sound = noise_sound * gain\n\n # Repeat the sound if it shorter than the input sound\n num_samples = len(samples)\n while len(noise_sound) < num_samples:\n noise_sound = np.concatenate((noise_sound, noise_sound))\n\n if len(noise_sound) > num_samples:\n noise_sound = noise_sound[0:num_samples]\n\n # Return a mix of the input sound and the background noise sound\n return samples + noise_sound\n\n def __getstate__(self):\n state = self.__dict__.copy()\n warnings.warn(\n \"Warning: the LRU cache of AddBackgroundNoise gets discarded when pickling it.\"\n \" E.g. this means the cache will not be used when using AddBackgroundNoise together\"\n \" with multiprocessing on Windows\"\n )\n del state[\"_load_sound\"]\n return state\n",
"import random\nimport warnings\n\nimport numpy as np\nfrom scipy.signal import butter, sosfilt\n\nfrom audiomentations.core.transforms_interface import BaseWaveformTransform\n\n\nclass FrequencyMask(BaseWaveformTransform):\n \"\"\"\n Mask some frequency band on the spectrogram.\n Inspired by https://arxiv.org/pdf/1904.08779.pdf\n\n This transform does basically the same as BandStopFilter\n \"\"\"\n\n supports_multichannel = True\n\n def __init__(self, min_frequency_band=0.0, max_frequency_band=0.5, p=0.5):\n \"\"\"\n :param min_frequency_band: Minimum bandwidth, float\n :param max_frequency_band: Maximum bandwidth, float\n :param p: The probability of applying this transform\n \"\"\"\n super().__init__(p)\n warnings.warn(\n \"The FrequencyMask class has been deprecated and may be removed in a future\"\n \" version of audiomentations. You can use BandStopFilter instead. It has\"\n \" different defaults and different parameter randomization that is better\"\n \" aligned with human hearing.\",\n DeprecationWarning,\n stacklevel=2,\n )\n self.min_frequency_band = min_frequency_band\n self.max_frequency_band = max_frequency_band\n\n def __butter_bandstop(self, lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n sos = butter(order, [low, high], btype=\"bandstop\", output=\"sos\")\n return sos\n\n def __butter_bandstop_filter(self, data, lowcut, highcut, fs, order=5):\n sos = self.__butter_bandstop(lowcut, highcut, fs, order=order)\n y = sosfilt(sos, data).astype(np.float32)\n return y\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n if self.parameters[\"should_apply\"]:\n self.parameters[\"bandwidth\"] = random.randint(\n self.min_frequency_band * sample_rate // 2,\n self.max_frequency_band * sample_rate // 2,\n )\n self.parameters[\"freq_start\"] = random.randint(\n 16, sample_rate // 2 - self.parameters[\"bandwidth\"] - 1\n )\n\n def apply(self, samples, sample_rate):\n bandwidth = self.parameters[\"bandwidth\"]\n freq_start = self.parameters[\"freq_start\"]\n samples = self.__butter_bandstop_filter(\n samples, freq_start, freq_start + bandwidth, sample_rate, order=6\n )\n return samples\n",
"import random\n\nimport numpy as np\nfrom scipy.signal import sosfilt, sosfilt_zi\n\nfrom audiomentations.core.transforms_interface import BaseWaveformTransform\nfrom audiomentations.core.utils import (\n convert_frequency_to_mel,\n convert_mel_to_frequency,\n)\n\n\nclass HighShelfFilter(BaseWaveformTransform):\n \"\"\"\n High-shelf filter transform. Applies a high-shelf filter at a specific center frequency in hertz.\n The gain at nyquist frequency is controlled by `{min,max}_gain_db` (note: can be positive or negative!).\n Filter coefficients are taken from the W3 Audio EQ Cookbook: https://www.w3.org/TR/audio-eq-cookbook/\n \"\"\"\n\n supports_multichannel = True\n\n def __init__(\n self,\n min_center_freq=300.0,\n max_center_freq=7500.0,\n min_gain_db=-18.0,\n max_gain_db=18.0,\n min_q=0.1,\n max_q=0.999,\n p=0.5,\n ):\n \"\"\"\n :param min_center_freq: The minimum center frequency of the shelving filter\n :param max_center_freq: The maximum center frequency of the shelving filter\n :param min_gain_db: The minimum gain at the nyquist frequency\n :param max_gain_db: The maximum gain at the nyquist frequency\n :param min_q: The minimum quality factor Q. The higher the Q, the steeper the\n transition band will be.\n :param max_q: The maximum quality factor Q. The higher the Q, the steeper the\n transition band will be.\n \"\"\"\n\n assert (\n min_center_freq <= max_center_freq\n ), \"`min_center_freq` should be no greater than `max_center_freq`\"\n assert (\n min_gain_db <= max_gain_db\n ), \"`min_gain_db` should be no greater than `max_gain_db`\"\n\n assert 0 < min_q <= 1, \"`min_q` should be greater than 0 and less or equal to 1\"\n assert 0 < max_q <= 1, \"`max_q` should be greater than 0 and less or equal to 1\"\n\n super().__init__(p)\n\n self.min_center_freq = min_center_freq\n self.max_center_freq = max_center_freq\n\n self.min_gain_db = min_gain_db\n self.max_gain_db = max_gain_db\n\n self.min_q = min_q\n self.max_q = max_q\n\n def _get_biquad_coefficients_from_input_parameters(\n self, center_freq, gain_db, q_factor, sample_rate\n ):\n normalized_frequency = 2 * np.pi * center_freq / sample_rate\n gain = 10 ** (gain_db / 40)\n alpha = np.sin(normalized_frequency) / 2 / q_factor\n\n b0 = gain * (\n (gain + 1)\n + (gain - 1) * np.cos(normalized_frequency)\n + 2 * np.sqrt(gain) * alpha\n )\n\n b1 = -2 * gain * ((gain - 1) + (gain + 1) * np.cos(normalized_frequency))\n\n b2 = gain * (\n (gain + 1)\n + (gain - 1) * np.cos(normalized_frequency)\n - 2 * np.sqrt(gain) * alpha\n )\n\n a0 = (\n (gain + 1)\n - (gain - 1) * np.cos(normalized_frequency)\n + 2 * np.sqrt(gain) * alpha\n )\n\n a1 = 2 * ((gain - 1) - (gain + 1) * np.cos(normalized_frequency))\n\n a2 = (\n (gain + 1)\n - (gain - 1) * np.cos(normalized_frequency)\n - 2 * np.sqrt(gain) * alpha\n )\n\n # Return it in `sos` format\n sos = np.array([[b0 / a0, b1 / a0, b2 / a0, 1, a1 / a0, a2 / a0]])\n\n return sos\n\n def randomize_parameters(self, samples, sample_rate):\n super().randomize_parameters(samples, sample_rate)\n\n center_mel = np.random.uniform(\n low=convert_frequency_to_mel(self.min_center_freq),\n high=convert_frequency_to_mel(self.max_center_freq),\n )\n self.parameters[\"center_freq\"] = convert_mel_to_frequency(center_mel)\n self.parameters[\"gain_db\"] = random.uniform(self.min_gain_db, self.max_gain_db)\n self.parameters[\"q_factor\"] = random.uniform(self.min_q, self.max_q)\n\n def apply(self, samples, sample_rate):\n assert samples.dtype == np.float32\n\n sos = self._get_biquad_coefficients_from_input_parameters(\n self.parameters[\"center_freq\"],\n self.parameters[\"gain_db\"],\n self.parameters[\"q_factor\"],\n sample_rate,\n )\n\n # The processing takes place here\n zi = sosfilt_zi(sos)\n if len(samples.shape) == 1:\n processed_samples, _ = sosfilt(sos, samples, zi=zi * samples[0])\n processed_samples = processed_samples.astype(np.float32)\n else:\n processed_samples = np.zeros_like(samples, dtype=np.float32)\n for chn_idx in range(samples.shape[0]):\n processed_samples[chn_idx, :], _ = sosfilt(\n sos, samples[chn_idx, :], zi=zi * samples[chn_idx, 0]\n )\n\n return processed_samples\n"
] |
[
[
"numpy.mean"
],
[
"numpy.concatenate"
],
[
"scipy.signal.butter",
"scipy.signal.sosfilt"
],
[
"scipy.signal.sosfilt",
"numpy.sqrt",
"numpy.cos",
"scipy.signal.sosfilt_zi",
"numpy.sin",
"numpy.zeros_like",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jannster/heartandsole
|
[
"af5843ca82b3671002098c527a4437a858f2be1c"
] |
[
"tests/test_fields.py"
] |
[
"\"\"\"Based somewhat on pandas accessor testing.\n\nhttps://github.com/pandas-dev/pandas/blob/master/pandas/tests/strings/test_strings.py\nhttps://github.com/pandas-dev/pandas/blob/master/pandas/tests/strings/test_api.py\nhttps://github.com/pandas-dev/pandas/blob/master/pandas/tests/series/accessors/test_str_accessor.py\nhttps://github.com/pandas-dev/pandas/blob/master/pandas/tests/series/accessors/test_cat_accessor.py\n\n\"\"\"\nimport datetime\nimport unittest\n\nimport dateutil\nimport pandas as pd\nimport pandas.testing as tm\n\nimport heartandsole\nfrom heartandsole import Activity\nfrom heartandsole.core.field import CachedField\nfrom heartandsole.core.fields.base import ActivityField\n\n\ndef safe_import(mod_name):\n \"\"\"\n Args:\n mod_name (str): Name of the module to be imported\n Returns:\n The imported module if successful, or False\n \"\"\"\n try:\n return __import__(mod_name)\n except ImportError:\n return False\n\n\ndef skip_if_installed(package):\n \"\"\"Skip a test if a package is installed.\n \n Args:\n package (str): The name of the package.\n \"\"\"\n return unittest.skipIf(\n safe_import(package), reason=f'Skipping because {package} is installed.'\n )\n\n\nclass MyField(ActivityField):\n _field_name = 'mine'\n\n\nclass TestActivityField(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # Same way that fields are added to Activity in activity.py.\n heartandsole.Activity._fields.add('mine')\n heartandsole.Activity.mine = CachedField('mine', MyField)\n \n @classmethod\n def tearDownClass(cls):\n delattr(heartandsole.Activity, 'mine')\n heartandsole.Activity._fields.remove('mine')\n\n def test_exists(self):\n self.assertIn('mine', heartandsole.Activity._fields)\n self.assertIn('mine', dir(heartandsole.Activity))\n self.assertIs(heartandsole.Activity.mine, MyField)\n \n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'mine': [0.0, 1.0, 2.0, 3.0, 4.0],\n })\n )\n self.assertIsInstance(activity.mine, MyField)\n\n def test_has(self):\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'mine': [0.0, 1.0, 2.0, 3.0, 4.0],\n })\n )\n self.assertTrue(activity.has_streams('mine'))\n self.assertFalse(activity.has_streams('not_mine'))\n\n def test_stream(self):\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'mine': [0.0, 1.0, 2.0, 3.0, 4.0],\n })\n )\n data = activity.mine.stream\n # data = activity.mine.stream('records')\n self.assertIs(data, activity.records['mine'])\n\n activity_not_mine = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'not_mine': [0.0, 1.0, 2.0, 3.0, 4.0],\n })\n )\n self.assertIsNone(activity_not_mine.mine.stream)\n\n def test_laps(self):\n activity = heartandsole.Activity(\n pd.DataFrame([]),\n laps=pd.DataFrame.from_dict({\n 'mine_a': ['a', 'a'],\n 'mine_1': [1, 1],\n 'other_b': ['b', 'b'],\n 'c_mine': ['c', 'c']\n })\n )\n\n result = activity.mine.laps\n expected = pd.DataFrame.from_dict({\n 'a': ['a', 'a'],\n '1': [1, 1],\n 'c': ['c', 'c'],\n })\n tm.assert_frame_equal(result, expected)\n\n result = activity.mine.lap_cols\n expected = pd.Index(['mine_a', 'mine_1', 'c_mine'])\n tm.assert_index_equal(result, expected)\n\n def test_summary(self):\n activity = heartandsole.Activity(\n pd.DataFrame([]),\n summary=pd.Series({\n 'mine_a': 'a',\n 'mine_1': 1,\n 'other_b': 'b',\n 'c_mine': 'c',\n })\n )\n result = activity.mine.summary\n expected = pd.Series({'a': 'a', '1': 1, 'c': 'c'})\n tm.assert_series_equal(result, expected)\n\n result = activity.mine.summary_rows\n expected = pd.Index(['mine_a', 'mine_1', 'c_mine'])\n tm.assert_index_equal(result, expected)\n\nclass TestDistance(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'distance': [0.0, 25.0, 50.0, 75.0, 100.0],\n 'lat': [40.0, 40.1, 40.2, 40.3, 40.4],\n 'lon': [-105.0, -105.0, -105.0, -105.0, -105.0]\n }),\n summary=pd.Series({'distance': 100.0}),\n laps=pd.DataFrame.from_dict({'distance': [50.0, 50.0]}),\n )\n\n def test_total(self):\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'distance': [0.0, 25.0, 50.0, 75.0, 100.0],\n 'lat': [40 + 0.00001 * i for i in range(5)],\n 'lon': [-105.0, -105.0, -105.0, -105.0, -105.0]\n }),\n summary=pd.Series({'distance_total': 101.0}),\n laps=pd.DataFrame.from_dict({'distance_total': [50.0, 52.0]}),\n )\n for src, expected in zip(\n ('records', 'summary', 'laps', 'position'),\n (100.0, 101.0, 102.0, 4 * 0.00001 * 111200)\n ):\n result = activity.distance.total(src)\n self.assertIsInstance(result, float)\n self.assertAlmostEqual(result, expected, places=3)\n # print(self.activity.distance.total(src))\n\n activity_blank = heartandsole.Activity(pd.DataFrame([]))\n for src in ['records', 'summary', 'laps']:\n # Might wanna raise a warning here, or something.\n self.assertIsNone(activity_blank.distance.total(src))\n\n def test_records_from_position(self):\n distances = self.activity.distance.records_from_position()\n self.assertIsInstance(distances, pd.Series)\n self.assertTrue(pd.api.types.is_float_dtype(distances))\n\n @skip_if_installed('pandas_xyz')\n def test_raises(self):\n with self.assertRaisesRegex(ImportError, 'pandas_xyz'):\n self.activity.distance.total('position')\n\n\nclass TestElevation(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.activity = heartandsole.Activity(pd.DataFrame.from_dict({\n 'elevation': [0.0, 1.0, 2.0, 1.0, 3.0],\n }))\n\n @unittest.skip('Not implemented this round')\n def test_convert_units(self):\n # Convert units from native to default.\n self.activity.elevation._convert_record_units(orig='feet')\n\n @skip_if_installed('pandas_xyz')\n def test_raises(self):\n with self.assertRaisesRegex(ImportError, 'pandas_xyz'):\n self.activity.elevation.gain('records')\n\n def test_gain(self):\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({'elevation': [0.0, 1.0, 0.0, 6.0]}),\n summary=pd.Series({'elevation_gain': 100.0}),\n laps=pd.DataFrame.from_dict({'elevation_gain': [50.0, 75.0]}),\n )\n\n for src, expected in zip(\n ('records', 'summary', 'laps'),\n (6.0, 100.0, 125.0)\n ):\n result = activity.elevation.gain(src)\n self.assertIsInstance(result, float)\n self.assertEqual(result, expected)\n\n activity_blank = heartandsole.Activity(pd.DataFrame([]))\n for src in ['records', 'summary', 'laps']:\n # Might wanna raise a warning here, or something.\n self.assertIsNone(activity_blank.elevation.gain(src))\n\n def test_loss(self):\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({'elevation': [6.0, 0.0, 1.0, 0.0]}),\n summary=pd.Series({'elevation_loss': 100.0}),\n laps=pd.DataFrame.from_dict({'elevation_loss': [50.0, 75.0]}),\n )\n\n for src, expected in zip(\n ('records', 'summary', 'laps'),\n (6.0, 100.0, 125.0)\n ):\n result = activity.elevation.loss(src)\n self.assertIsInstance(result, float)\n self.assertEqual(result, expected)\n\n activity_blank = heartandsole.Activity(pd.DataFrame([]))\n for src in ['records', 'summary', 'laps']:\n # Might wanna raise a warning here, or something.\n self.assertIsNone(activity_blank.elevation.loss(src))\n\n\nclass TestPosition(unittest.TestCase):\n\n def test_convert_units(self):\n # Semicircle units\n activity = heartandsole.Activity(pd.DataFrame.from_dict({\n 'lat': [40.0 * 2 ** 31 / 180],\n 'lon': [-105.0 * 2 ** 31 / 180]\n }))\n\n activity.lat._convert_record_units(inplace=True)\n activity.lon._convert_record_units(inplace=True)\n\n self.assertEqual(activity.records['lat'].iloc[0], 40.0)\n self.assertEqual(activity.records['lon'].iloc[0], -105.0)\n\n def test_center(self):\n activity = heartandsole.Activity(pd.DataFrame.from_dict({\n 'lat': [40.0, 40.1, 40.1, 45.0]\n }))\n self.assertEqual(activity.lat.center, 42.5)\n\n\nclass TestTime(unittest.TestCase):\n\n def test_time_from_timestamp(self):\n t0 = datetime.datetime.now()\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'timestamp': [t0 + datetime.timedelta(seconds=s) for s in (0, 50, 75, 100)],\n })\n )\n\n result = activity.time.records_from_timestamps()\n expected = pd.Series([0, 50, 75, 100], name='time')\n tm.assert_series_equal(result, expected)\n\n def test_elapsed(self):\n t0 = datetime.datetime.now()\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'timestamp': [t0 + datetime.timedelta(seconds=s) for s in (0, 50, 75, 100)],\n 'time': [0, 100, 200, 400],\n }),\n summary=pd.Series({\n 'time_elapsed': 300.0,\n 'timestamp_start': t0,\n 'timestamp_end': t0 + datetime.timedelta(seconds=101)\n }),\n laps=pd.DataFrame.from_dict({\n 'time_elapsed': [200.0, 150.0],\n 'timestamp_start': [t0, t0 + datetime.timedelta(seconds=49)],\n 'timestamp_end': [t0 + datetime.timedelta(seconds=49), t0 + datetime.timedelta(seconds=99)],\n }),\n )\n\n self.assertEqual(\n activity.timestamp.elapsed('records'), # pd.Timedelta\n datetime.timedelta(seconds=100)\n )\n self.assertEqual(\n activity.timestamp.elapsed('summary'), # dt.timedelta\n datetime.timedelta(seconds=101)\n )\n self.assertEqual(\n activity.timestamp.elapsed('laps'), # pd.Timedelta\n datetime.timedelta(seconds=99)\n )\n self.assertEqual(\n activity.time.elapsed('records'),\n 400\n )\n self.assertEqual(\n activity.time.elapsed('summary'),\n 300\n )\n self.assertEqual(\n activity.time.elapsed('laps'),\n 350\n )\n \n activity_blank = Activity(pd.DataFrame([]))\n for src in ['records', 'summary', 'laps']:\n self.assertIsNone(activity_blank.time.elapsed(src))\n self.assertIsNone(activity_blank.timestamp.elapsed(src))\n\n def test_timer(self):\n activity = heartandsole.Activity(\n pd.DataFrame([]),\n summary=pd.Series({\n 'time_timer': 300.0,\n }),\n laps=pd.DataFrame.from_dict({\n 'time_timer': [200.0, 150.0],\n }),\n )\n\n self.assertEqual(\n activity.time.timer('summary'),\n 300\n )\n self.assertEqual(\n activity.time.timer('laps'),\n 350\n )\n\n activity_blank = Activity(pd.DataFrame([]))\n for src in ['summary', 'laps']:\n self.assertIsNone(activity_blank.time.timer(src))\n\n def test_tz(self):\n for tz_local in [\n dateutil.tz.gettz(name='UTC'),\n dateutil.tz.gettz(name='America/Denver'),\n # dateutil.tz.gettz(name='AEST-10AEDT-11,M10.1.0/2,M4.1.0/3'), # error\n 'UTC',\n 'America/Denver'\n ]:\n\n t0 = datetime.datetime.now()\n activity = heartandsole.Activity(\n pd.DataFrame.from_dict({\n 'timestamp': [t0 + datetime.timedelta(seconds=s) for s in (0, 50, 75, 100)],\n }),\n summary=pd.Series({\n 'timestamp_start': t0,\n 'timestamp_end': t0 + datetime.timedelta(seconds=101)\n }),\n laps=pd.DataFrame.from_dict({\n 'timestamp_start': [\n t0, \n t0 + datetime.timedelta(seconds=49)\n ],\n 'timestamp_end': [\n t0 + datetime.timedelta(seconds=49),\n t0 + datetime.timedelta(seconds=99)\n ],\n }),\n )\n\n activity.timestamp.ensure_aware(tz_local)\n\n for series in [\n activity.records['timestamp'],\n activity.laps['timestamp_start'],\n activity.laps['timestamp_end']\n ]:\n self.assertTrue(pd.api.types.is_datetime64tz_dtype(series))\n \n for row in [\n activity.summary['timestamp_start'],\n activity.summary['timestamp_end'],\n ]:\n self.assertIsNotNone(row.tz)\n\n # print(activity.timestamp.start('records').tz)\n # print(activity.timestamp.start('summary').astimezone('America/Denver'))\n # print(activity.timestamp.start('laps').astimezone('America/Denver'))"
] |
[
[
"pandas.testing.assert_series_equal",
"pandas.Series",
"pandas.api.types.is_float_dtype",
"pandas.Index",
"pandas.DataFrame",
"pandas.api.types.is_datetime64tz_dtype",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame.from_dict",
"pandas.testing.assert_index_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
NVIDIA-AI-IOT/a2j_handpose_3d
|
[
"653f1ed855699f774349523e9e4cbc9a36a95dbe"
] |
[
"model/back_bone/resnet.py"
] |
[
"'''\nCopyright (c) 2019 Boshen Zhang\nCopyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\n\nPRETRAINED_MODELS = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\ndef conv3x3(in_planes, out_planes, stride=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, dilation=dilation,\n padding=dilation, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\n\nclass BasicBlock(nn.Module):\n \"\"\"\n Resnet Basic Residual Block\n \"\"\"\n expansion = 1\n def __init__(self, input_channels, output_channels, stride=1, dilation=1, downsample=None):\n \"\"\"\n Class constructor\n\n :param input_channels: number of input channels to the residual block\n :param output channels: number of putput channels of the residual block\n :param stride: stride of the first convolution in the residual block\n :param dilation: dilation of the second convolution in the residual block\n :param downsample: torch.nn function for down sampling the input x for concatenation in the residual layer\n \"\"\"\n super(BasicBlock, self).__init__()\n \n self.conv1 = conv3x3(input_channels, output_channels, stride=stride)\n self.bn1 = nn.BatchNorm2d(output_channels)\n \n self.conv2 = conv3x3(output_channels, output_channels, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(output_channels)\n \n self.downsample = downsample\n self.stride = stride\n\n # Actiation function\n self.relu = nn.LeakyReLU(inplace=True)\n\n \n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n \n out += identity\n out = self.relu(out)\n\n return out\n\nclass Bottleneck(nn.Module):\n \"\"\"\n Resnet Bottleneck network\n \"\"\"\n expansion = 4\n def __init__(self, input_channels, output_channels, stride=1, dilation=1, downsample=None):\n \"\"\"\n Class constructor\n\n :param input_channels: number of input channels to the residual block\n :param output channels: number of putput channels of the residual block\n :param stride: stride of the second convolution in the residual block\n :param dilation: dilation of the second convolution in the residual block\n :param downsample: torch.nn function for down sampling the input x for concatenation in the residual layer\n \"\"\"\n super(Bottleneck, self).__init__()\n\n self.conv1 = conv1x1(input_channels, output_channels)\n self.bn1 = nn.BatchNorm2d(output_channels)\n\n self.conv2 = conv3x3(output_channels, output_channels, stride=stride, dilation=dilation)\n self.bn2 = nn.BatchNorm2d(output_channels)\n\n self.conv3 = conv1x1(output_channels, output_channels*self.expansion)\n self.bn3 = nn.BatchNorm2d(output_channels*self.expansion)\n\n self.downsample = downsample\n self.stride = stride\n\n # Activation function\n self.relu = nn.LeakyReLU(inplace=True)\n\n\n \n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"\"\"\n ResNet Definition\n\n could create resnet (18, 34, 50, 101, 152) by setting the parameters\n \"\"\"\n def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):\n \"\"\"\n Class constructor\n\n :param block: type toch.nn, A residual block class instance (i.e. BasicBlock or Bottleneck)\n :param layers: type list, A list holding the number of residual blocks in each ResNet layer\n :param num_classes: if using a pretrained network make sure the number of classes are the same\n :param zero_init_residual: Zero Initialiaze the last batchnorm in each residual layer for higher accuracy\n \"\"\"\n super(ResNet, self).__init__()\n \n self.input_channels = 64\n \n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_resnet_layer(block, 64, layers[0])\n self.layer2 = self._make_resnet_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_resnet_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_resnet_layer(block, 512, layers[3], stride=1, dilation=2)\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512*block.expansion, num_classes)\n\n # Activation function\n self.relu = nn.LeakyReLU(inplace=True)\n\n self._initialize()\n if zero_init_residual:\n self._zero_initialize()\n \n def _make_resnet_layer(self, block, output_channels, blocks, stride=1, dilation=1):\n \"\"\"\n Method to create residual block layer in resnet\n\n :param block: type torch.nn, a residual block block class instance (i.e. BasicBlock or Bottleneck)\n :param output_channels: type int, number of output channels of the residual block layer\n :param blocks: type int, number of residual blocks in this layer\n :param stride: type int\n :param dilation: type int\n \"\"\"\n downsample = None\n \n if (stride != 1) or (self.input_channels != output_channels*block.expansion):\n downsample = nn.Sequential(\n conv1x1(self.input_channels, output_channels*block.expansion, stride=stride),\n nn.BatchNorm2d(output_channels*block.expansion),\n )\n \n layers = list()\n layers.append(block(self.input_channels, output_channels, stride=stride, downsample=downsample))\n\n self.input_channels = output_channels * block.expansion\n \n for _ in range(1, blocks):\n layers.append(block(self.input_channels, output_channels, dilation=dilation))\n \n return nn.Sequential(*layers)\n\n def _initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n \n def _zero_initialize(self):\n for m in self.modules():\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef get_ResNet(resnet_model=\"resnet18\", pretrained=False):\n \n resnet_setups = {\n \"resnet18\": {\"block\": BasicBlock, \"layers\": [2, 2, 2, 2]},\n \"resnet34\": {\"block\": BasicBlock, \"layers\": [3, 4, 6, 3]},\n \"resnet50\": {\"block\": Bottleneck, \"layers\": [3, 4, 6, 3]},\n \"resnet101\": {\"block\": Bottleneck, \"layers\": [3, 4, 23, 3]},\n \"resnet152\": {\"block\": Bottleneck, \"layers\": [3, 8, 36, 3]},\n }\n model = ResNet(resnet_setups[resnet_model][\"block\"], resnet_setups[resnet_model][\"layers\"])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(PRETRAINED_MODELS[resnet_model]))\n \n return model\n\nclass ResnetBackbone(nn.Module):\n \"\"\"\n The Resnet Backbone module\n \"\"\"\n def __init__(self, name=\"resnet18\", pretrained=True):\n \"\"\"\n Class constructor\n\n :param name: name of the resnet model to load\n :param pretrained: weather or not to load the weight of a pretrained model on ImageNet\n \"\"\"\n super(ResnetBackbone, self).__init__()\n self.model = get_ResNet(resnet_model=name, pretrained=pretrained)\n \n def forward(self, x):\n n, c, h, w = x.size()\n\n x = x[:,0:1,:,:] # depth\n x = x.expand(n, 3, h, w)\n\n out = self.model.conv1(x)\n out = self.model.bn1(out)\n out = self.model.relu(out)\n out = self.model.maxpool(out)\n\n out1 = self.model.layer1(out)\n out2 = self.model.layer2(out1)\n out3 = self.model.layer3(out2)\n out4 = self.model.layer4(out3)\n\n return out3, out4\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrsthur/mne-nirs
|
[
"6a05ff2e744fb362e2253dd7953657795c646207"
] |
[
"mne_nirs/preprocessing/_scalp_coupling_segmented.py"
] |
[
"# Authors: Robert Luke <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\nfrom mne import pick_types\nfrom mne.io import BaseRaw\nfrom mne.utils import _validate_type, verbose\nfrom mne.preprocessing.nirs import (_channel_frequencies,\n _check_channels_ordered)\nfrom mne.filter import filter_data\n\n\n@verbose\ndef scalp_coupling_index_windowed(raw, time_window=10, threshold=0.1,\n l_freq=0.7, h_freq=1.5,\n l_trans_bandwidth=0.3,\n h_trans_bandwidth=0.3,\n verbose=False):\n \"\"\"\n Compute scalp coupling index for each channel and time window.\n\n As described in [1]_ and [2]_.\n This method provides a metric of data quality along the duration of\n the measurement. The user can specify the window over which the\n metric is computed.\n\n Parameters\n ----------\n raw : instance of Raw\n The haemoglobin data.\n time_window : number\n The duration of the window over which to calculate the metric.\n Default is 10 seconds as in PHOEBE paper.\n threshold : number\n Values below this are marked as bad and annotated in the raw file.\n %(l_freq)s\n %(h_freq)s\n %(l_trans_bandwidth)s\n %(h_trans_bandwidth)s\n %(verbose)s\n\n Returns\n -------\n scores : array (n_nirs, n_windows)\n Array of peak power values.\n times : list\n List of the start and end times of each window used to compute the\n peak spectral power.\n\n References\n ----------\n .. [1] Pollonini L et al., “PHOEBE: a method for real time mapping of\n optodes-scalp coupling in functional near-infrared spectroscopy” in\n Biomed. Opt. Express 7, 5104-5119 (2016).\n .. [2] Hernandez, Samuel Montero, and Luca Pollonini. \"NIRSplot: a tool for\n quality assessment of fNIRS scans.\" Optics and the Brain.\n Optical Society of America, 2020.\n \"\"\"\n\n raw = raw.copy().load_data()\n _validate_type(raw, BaseRaw, 'raw')\n\n if not len(pick_types(raw.info, fnirs='fnirs_od')):\n raise RuntimeError('Scalp coupling index '\n 'should be run on optical density data.')\n\n freqs = np.unique(_channel_frequencies(raw.info))\n picks = _check_channels_ordered(raw.info, freqs)\n\n filtered_data = filter_data(raw._data, raw.info['sfreq'], l_freq, h_freq,\n picks=picks, verbose=verbose,\n l_trans_bandwidth=l_trans_bandwidth,\n h_trans_bandwidth=h_trans_bandwidth)\n\n window_samples = int(np.ceil(time_window * raw.info['sfreq']))\n n_windows = int(np.floor(len(raw) / window_samples))\n\n scores = np.zeros((len(picks), n_windows))\n times = []\n\n for window in range(n_windows):\n\n start_sample = int(window * window_samples)\n end_sample = start_sample + window_samples\n end_sample = np.min([end_sample, len(raw) - 1])\n\n t_start = raw.times[start_sample]\n t_stop = raw.times[end_sample]\n times.append((t_start, t_stop))\n\n for ii in picks[::2]:\n\n c1 = filtered_data[ii][start_sample:end_sample]\n c2 = filtered_data[ii + 1][start_sample:end_sample]\n c = np.corrcoef(c1, c2)[0][1]\n scores[ii, window] = c\n scores[ii + 1, window] = c\n\n if (threshold is not None) & (c < threshold):\n raw.annotations.append(t_start, time_window, 'BAD_SCI',\n ch_names=[raw.ch_names[ii:ii + 2]])\n\n return raw, scores, times\n"
] |
[
[
"numpy.corrcoef",
"numpy.ceil"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MSeeker1340/num2-spring2018
|
[
"61f87576736d146821d20107bed6f8480a0c2a6a"
] |
[
"src/expint.py"
] |
[
"# module expint\nimport numpy as np\nimport scipy.linalg as la\n\n##########################\n# Matrix functions using scipy.linalg.funm\n# Special care is given to small arguments for numerical stability (e.g. \n# expm1 instead of exp and using leading order Taylor expansion when x \n# is smaller than some threshold).\n# Alternatively, we can also use BigFloat for higher precision.\n\[email protected]\ndef _phi1(x):\n # phi1(x) = (exp(x) - 1) / x\n if x == 0.0:\n return 1.0\n else:\n return np.expm1(x)/x # this is stabel\n\[email protected]\ndef _phi2(x):\n # phi2(x) = (exp(x) - 1 - x) / x^2\n # = 1/2 + 1/6x + O(x^2)\n if np.abs(x) < 1e-7:\n return 0.5 + 1/6*x\n else:\n return (np.expm1(x) - x) / x**2\n\[email protected]\ndef _phi3(x):\n # phi3(x) = (exp(x) - 1 - x - 0.5x^2) / x^3\n # = 1/6 + 1/24*x + O(x^2)\n if np.abs(x) < 1e-5:\n return 1/6 + 1/24*x\n else:\n return (np.expm1(x) - x - 0.5*x**2) / x**3\n\nexpm = la.expm\ndef phi1m(A):\n return la.funm(A, _phi1)\ndef phi2m(A):\n return la.funm(A, _phi2)\ndef phi3m(A):\n return la.funm(A, _phi3)\n\n#########################\n# Integrator interface for semilinear problems\n# The interface is a simplified version of scipy.integrate.OdeSolver which \n# solves the semilinear system y'(t) = Ly(t) + N(t,y)\n\nclass SemilinearOdeSolver:\n def __init__(self, L, N, t0, y0, dt):\n self.L = L\n self.N = N\n self.t = t0\n self.y = y0\n self.dt = dt\n\n def __str__(self):\n # For pretty printing\n return \"{} solver\\nt = {}\\ny = {}\".format(type(self).__name__, self.t, self.y)\n\n# First order methods\nclass LawsonEuler(SemilinearOdeSolver):\n def __init__(self, L, N, t0, y0, dt):\n super().__init__(L, N, t0, y0, dt)\n # Precompute matrix functions\n self.exphL = expm(dt*L)\n\n def step(self):\n t, y, dt, exphL = self.t, self.y, self.dt, self.exphL\n nl = self.N(t, y)\n self.y = exphL @ (y + dt*nl)\n self.t = t + dt\n\nclass NorsettEuler(SemilinearOdeSolver):\n def __init__(self, L, N, t0, y0, dt):\n super().__init__(L, N, t0, y0, dt)\n # Precompute matrix functions\n self.exphL = expm(dt*L)\n self.phihL = phi1m(dt*L)\n\n def step(self):\n t, y, dt, exphL, phihL = self.t, self.y, self.dt, self.exphL, self.phihL\n nl = self.N(t, y)\n self.y = exphL @ y + dt * (phihL @ nl)\n self.t = t + dt\n\n# Second order methods\nclass ExpMidpoint(SemilinearOdeSolver):\n def __init__(self, L, N, t0, y0, dt):\n super().__init__(L, N, t0, y0, dt)\n # Precompute matrix functions\n hL = dt * L\n half_hL = dt/2 * L\n self.E = expm(hL)\n self.Emid = expm(half_hL)\n self.P = phi1m(hL)\n self.Pmid = phi1m(half_hL)\n\n def step(self):\n t, y, dt = self.t, self.y, self.dt\n E, Emid, P, Pmid = self.E, self.Emid, self.P, self.Pmid\n N1 = self.N(t, y)\n Y2 = Emid @ y + 0.5*dt*(Pmid @ N1)\n N2 = self.N(t + 0.5*dt, Y2)\n self.y = E @ y + dt*(P @ N2)\n self.t = t + dt\n\nclass ExpTrapezoid(SemilinearOdeSolver):\n def __init__(self, L, N, t0, y0, dt):\n super().__init__(L, N, t0, y0, dt)\n # Precompute matrix functions\n hL = dt * L\n self.exphL = expm(hL)\n self.phi1hL = phi1m(hL)\n self.phi2hL = phi2m(hL)\n\n def step(self):\n t, y, dt = self.t, self.y, self.dt\n exphL, phi1hL, phi2hL = self.exphL, self.phi1hL, self.phi2hL\n Ey = exphL @ y # exp(dt*L) * y\n N1 = self.N(t, y)\n P1N1 = phi1hL @ N1 # phi1(dt*L) * N1\n P2N1 = phi2hL @ N1 # phi2(dt*L) * N1\n Y2 = Ey + dt*P1N1\n N2 = self.N(t+dt, Y2)\n P2N2 = phi2hL @ N2 # phi2(dt*L) * N2\n self.y = Ey + dt*(P1N1 - P2N1 + P2N2)\n self.t = t + dt\n\n# Fourth order methods\nclass ETDRK4(SemilinearOdeSolver):\n def __init__(self, L, N, t0, y0, dt):\n super().__init__(L, N, t0, y0, dt)\n # Precompute matrix functions\n hL = dt * L\n half_hL = dt/2 * L\n self.E = expm(hL)\n self.Emid = expm(half_hL)\n self.P1 = phi1m(hL)\n self.P1mid = phi1m(half_hL)\n self.P2 = phi2m(hL)\n self.P3 = phi3m(hL)\n self.B1 = self.P1 - 3*self.P2 + 4*self.P3\n self.B2 = 2*self.P2 - 4*self.P3 # same as B3\n self.B4 = 4*self.P3 - self.P2\n \n def step(self):\n # Unpack variables\n t, y, dt = self.t, self.y, self.dt\n E, Emid = self.E, self.Emid\n P1, P1mid, P2, P3 = self.P1, self.P1mid, self.P2, self.P3\n B1, B2, B4 = self.B1, self.B2, self.B4\n Ey = E @ y\n Emidy = Emid @ y\n # Stage 1\n N1 = self.N(t, y)\n # Stage 2\n Y2 = Emidy + dt/2*(P1mid @ N1)\n N2 = self.N(t + dt/2, Y2)\n # Stage 3\n Y3 = Emidy + dt/2*(P1mid @ N2)\n N3 = self.N(t + dt/2, Y3)\n # Stage 4\n Y4 = Emid @ Y2 + dt/2*(P1mid @ (2*N3 - N1))\n N4 = self.N(t + dt, Y4)\n self.y = Ey + dt*(B1 @ N1 + B2 @ (N2 + N3) + B4 @ N4)\n self.t = t + dt\n\n##########################\n# Function interface for the solvers, similar to scipy.integrate.solve_ivp\n# The interface is much more simplified. We will always use fixed-dt methods \n# and return dense outputs. We also assume that the integration goes forward \n# in time. Functionality related to events handling is also removed, as well \n# as options for Jacobians and tolerances because we do not have implicit \n# methods.\n\ndef solve_semilinear(L, N, t_span, y0, dt, method=LawsonEuler):\n t0, tend = t_span\n solver = method(L, N, t0, y0, dt)\n ts = [t0]\n ys = [y0]\n while solver.t < tend:\n solver.step()\n ts.append(solver.t)\n ys.append(solver.y)\n return np.array(ts), np.array(ys)\n"
] |
[
[
"scipy.linalg.funm",
"numpy.array",
"numpy.abs",
"numpy.expm1"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
liangkatherine/serdespy
|
[
"9aa0c20ce66dad60e6488d74364a130e6d71b6fb"
] |
[
"examples/FEC_BERT_plotter.py"
] |
[
"import matplotlib.pyplot as plt\nimport matplotlib.axes as ax\n \n\n\nclass BERT:\n def __init__(self):\n self.lines = []\n self.tap_weights = []\n self.FEC_codes = []\n \n def add_point(self, pre_FEC_BER, post_FEC_BER, tap_weights , FEC_code):\n \n added = False\n \n for line in self.lines:\n if line.FEC_code == FEC_code and line.tap_weights == tap_weights :\n line.add(pre_FEC_BER, post_FEC_BER)\n added = True\n \n if not added:\n newline = self.line(FEC_code, tap_weights)\n newline.add(pre_FEC_BER, post_FEC_BER)\n self.lines = self.lines + [newline]\n \n def plot(self):\n #plot = plt.figure()\n for line in self.lines:\n post_FEC = []\n pre_FEC = []\n for i in range (len(line.points)):\n #print(line.points[i])\n post_FEC = post_FEC + [line.points[i][0]]\n pre_FEC = pre_FEC + [line.points[i][1]]\n \n #print(post_FEC,pre_FEC)\n #plt.loglog(post_FEC,pre_FEC)\n plt.loglog(pre_FEC,post_FEC, \"b*\" ,label = \"RS(544,536,4), h = [0.6, 0.2, -0.2]\")\n \n plt.xlabel(\"pre-FEC BER\")\n plt.ylabel(\"post-FEC BER\")\n plt.grid()\n \n #plt.xlim([1e-2, 1e-7])\n #plt.ylim([1e-5, 1e-2])\n plt.show()\n \n class line:\n def __init__(self, FEC_code, tap_weights):\n self.FEC_code = FEC_code\n self.tap_weights = tap_weights\n self.points = []\n \n def add(self, pre_FEC_BER, post_FEC_BER):\n #print(0)\n \n if len(self.points) == 0:\n #print(1)\n self.points = [[post_FEC_BER, pre_FEC_BER]]\n return True\n \n if self.points[0][0] < post_FEC_BER:\n #print(2)\n self.points = [[post_FEC_BER, pre_FEC_BER]] + self.points\n return True\n \n for point_idx in range(len(self.points)):\n if self.points[point_idx][0] < post_FEC_BER:\n #print(3,point_idx)\n self.points = self.points[:point_idx] + [[post_FEC_BER, pre_FEC_BER]] + self.points[point_idx:]\n return True\n \n #print(3)\n self.points = self.points + [[post_FEC_BER, pre_FEC_BER]]\n return True\n\n#%%\n\n\nbert = BERT()\n\n\ntap_weights = '[0.6, 0.2, -0.2]'\nFEC_code = 'RS(544,536)'\n\n\n\nbert.add_point(0.03415591078931959, 0.034794674702931586, tap_weights , FEC_code)\n\nbert.add_point(0.027440123443838966, 0.027348414045661754 ,tap_weights , FEC_code)\n\nbert.add_point(0.02053169351900772, 0.020192069274638083 ,tap_weights , FEC_code)\n\nbert.add_point(0.014490155201254275, 0.014204924755383472 ,tap_weights , FEC_code)\n\nbert.add_point(0.008613602854924879,0.008452383223 ,tap_weights , FEC_code)\n\nbert.add_point(0.00419712867189154, 0.004249556543134525 ,tap_weights , FEC_code)\n\nbert.add_point(0.001519206083690803, 0.0013389536325316143, tap_weights , FEC_code)\n\nbert.add_point(0.0002851491644843378, 2.1076121993553185e-05 ,tap_weights , FEC_code)\n\nbert.add_point( 0.00023078962476658776, 1.1126157915148741e-05 ,tap_weights , FEC_code)\n\nbert.add_point( 0.0001759532469811382, 7.667512254668218e-06 ,tap_weights , FEC_code)\n\nbert.add_point( 0.00013160730668507897, 5.5040422012899074e-06 ,tap_weights , FEC_code)\n\nbert.add_point( 9.568550558504534e-05, 2.214269851093641e-06 ,tap_weights , FEC_code)\n\nbert.add_point( 7.05720340195351e-05, 7.257714354314462e-07 ,tap_weights , FEC_code)\n\nbert.add_point( 0.0012455010328312546, 0.001026149650002861 ,tap_weights , FEC_code)\n\nbert.add_point( 0.0007820144310272809,0.0003814682713765283 ,tap_weights , FEC_code)\n\nbert.add_point( 0.0004024513291384299, 0.00010013542123633867 ,tap_weights , FEC_code)\n\nbert.plot()\n#%%\n#tap_weights = [1,0.55,0.3]\n\n#FEC_code = 'KR4'\n\n#0.1 Noise Varience\n\n#NO FEC\n#Bits Transmitted = 10485740 Bit Errors = 9970\n#Bit Error Ratio = 0.0009508151069929256\n\n#FEC\n#Bits Transmitted = 10485740 Bit Errors = 1118\n#Bit Error Ratio = 0.00010662099193762195\n\n#bert.add_point( 0.0009508151069929256, 0.00010662099193762195 ,tap_weights , FEC_code)\n\n#0.09 Noise Varience\n\n#NO FEC\n#Bits Transmitted = 10485740 Bit Errors = 5353\n#Bit Error Ratio = 0.0005105028352791506\n\n#FEC\n#Bits Transmitted = 37748880 Bit Errors = 245\n#Bit Error Ratio = 6.490258783836766e-06\n\n\n#bert.add_point( 0.0005105028352791506, 6.490258783836766e-06 ,tap_weights , FEC_code)\n\n\n#tst.plot()"
] |
[
[
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
intelkevinputnam/lpot-docs
|
[
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f",
"1ff32b4d89074a6bd133ba531f7c0cea3b73152f"
] |
[
"examples/onnxrt/onnx_model_zoo/mobilebert/main.py",
"examples/pytorch/eager/huggingface_models/examples/text-classification/run_glue_no_trainer_gradient_prune.py",
"examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/solver/build.py",
"test/test_tpe.py",
"test/ux/web/test_communication.py",
"examples/pytorch/fx/object_detection/ssd_resnet34/ptq/python/models/ssd_r34.py",
"examples/pytorch/eager/language_translation/prune/callback/modelcheckpoint.py",
"test/test_tensorflow_convert_layout.py",
"examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/utils/mlperf_logger.py",
"examples/pytorch/fx/object_detection/maskrcnn/pytorch/maskrcnn_benchmark/modeling/detector/generalized_rcnn.py",
"test/test_tensorflow_quantize_input.py",
"examples/pytorch/eager/language_translation/ptq/examples/run_squad.py",
"examples/pytorch/eager/image_recognition/imagenet/gpu/main.py"
] |
[
"import numpy as np\nimport onnxruntime\nimport onnx\nimport tokenization\nimport os\nfrom run_onnx_squad import *\nimport json\nfrom run_onnx_squad import read_squad_examples, convert_examples_to_features, write_predictions\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport tqdm\nfrom squad_evaluate import evaluate\n\nmax_seq_length = 384\ndoc_stride = 128\nmax_query_length = 64\nbatch_size = 1\nn_best_size = 20\nmax_answer_length = 30\n\ndef parse_dummy_input(model, benchmark_nums, max_seq_length):\n session = onnxruntime.InferenceSession(model.SerializeToString(), None)\n shapes = []\n lows = []\n highs = []\n for i in range(len(session.get_inputs())):\n input_name = session.get_inputs()[i].name\n input_shapes = session.get_inputs()[i].shape\n shape = [benchmark_nums]\n shape.append(max_seq_length)\n if input_name == \"input_ids\":\n low = 0.0\n high = 1000.0\n else:\n low = 0.0\n high = 2.0\n shapes.append(tuple(shape))\n lows.append(low)\n highs.append(high)\n return shapes, lows, highs\n\nclass squadDataset(Dataset):\n def __init__(self, input_ids, input_mask, segment_ids, bs):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.bs = bs\n \n def __getitem__(self, index):\n return self.input_ids[index:index + self.bs], self.input_mask[index:index + self.bs], self.segment_ids[index:index + self.bs]\n\n def __len__(self):\n assert len(self.input_ids) == len(self.input_mask)\n assert len(self.input_ids) == len(self.segment_ids)\n return len(self.input_ids)\n\ndef evaluate_squad(model, dataloader, input_ids, eval_examples, extra_data, input_file):\n session = onnxruntime.InferenceSession(model.SerializeToString(), None)\n for output_meta in session.get_outputs():\n print(output_meta)\n for input_meta in session.get_inputs():\n print(input_meta)\n n = len(input_ids)\n bs = 1\n all_results = []\n start = timer()\n for idx, batch in tqdm.tqdm(enumerate(dataloader), desc=\"eval\"):\n data = {\"input_ids\": np.array(batch[0])[0],\n \"input_mask\": np.array(batch[1])[0],\n \"segment_ids\": np.array(batch[2])[0]}\n result = session.run([\"end_logits\",\"start_logits\"], data)\n in_batch = result[0].shape[0]\n start_logits = [float(x) for x in result[1][0].flat]\n end_logits = [float(x) for x in result[0][0].flat]\n for i in range(0, in_batch):\n unique_id = len(all_results)\n all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits,end_logits=end_logits))\n \n # postprocessing\n output_dir = './output'\n os.makedirs(output_dir, exist_ok=True)\n output_prediction_file = os.path.join(output_dir, \"predictions_mobilebert_fp32.json\")\n output_nbest_file = os.path.join(output_dir, \"nbest_predictions_mobilebert_fp32.json\")\n write_predictions(eval_examples, extra_data, all_results,\n n_best_size, max_answer_length,\n True, output_prediction_file, output_nbest_file)\n\n with open(input_file) as dataset_file:\n dataset_json = json.load(dataset_file)\n expected_version = '1.1'\n if (dataset_json['version'] != expected_version):\n print('Evaluation expects v-' + expected_version +\n ', but got dataset with v-' + dataset_json['version'],\n file=sys.stderr)\n dataset = dataset_json['data']\n with open(output_prediction_file) as prediction_file:\n predictions = json.load(prediction_file)\n res = evaluate(dataset, predictions)\n return res['f1']\n\ndef main():\n parser = argparse.ArgumentParser(description='onnx squad')\n parser.add_argument('--model_path', required=True, type=str,\n help='model path')\n parser.add_argument('--config', required=True, type=str,\n help='Tuning config file path')\n parser.add_argument('--save_path', type=str, default='mobilbert_tune.onnx', \n help='save tuned model path')\n parser.add_argument('--data_dir', type=str,\n help='datseset path')\n parser.add_argument('--tune', action='store_true', default=False, \n help='run lpot tune')\n parser.add_argument('--benchmark', action='store_true', default=False, \n help='run benchmark')\n parser.add_argument('--mode', type=str, default='performance',\n help=\"benchmark mode of performance or accuracy\")\n parser.add_argument('--benchmark_nums', type=int, default=1000,\n help=\"Benchmark numbers of samples\")\n args = parser.parse_args()\n\n model = onnx.load(args.model_path)\n\n predict_file = 'dev-v1.1.json'\n input_file=os.path.join(args.data_dir, predict_file)\n eval_examples = read_squad_examples(input_file=input_file)\n\n vocab_file = os.path.join('uncased_L-12_H-768_A-12', 'vocab.txt')\n tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=True)\n input_ids, input_mask, segment_ids, extra_data = convert_examples_to_features(eval_examples, tokenizer, \n max_seq_length, doc_stride, max_query_length)\n\n dataset = squadDataset(input_ids, input_mask, segment_ids, 1) \n eval_dataloader = DataLoader(dataset, batch_size=batch_size)\n\n def eval_func(model):\n return evaluate_squad(model, eval_dataloader, input_ids, eval_examples, extra_data, input_file)\n\n if args.tune:\n from lpot.experimental import Quantization, common\n quantize = Quantization(args.config)\n quantize.model = common.Model(model)\n quantize.calib_dataloader = eval_dataloader\n quantize.eval_func = eval_func\n q_model = quantize()\n q_model.save(args.save_path)\n\n if args.benchmark and args.mode == \"accuracy\":\n results = evaluate_squad(model, eval_dataloader, input_ids, eval_examples, extra_data, input_file)\n print(\"Batch size = %d\" % batch_size)\n print(\"Accuracy: %.5f\" % results)\n\n if args.benchmark and args.mode == \"performance\":\n model = onnx.load(args.model_path)\n \n from lpot.experimental.data.datasets.dummy_dataset import DummyDataset\n from lpot.experimental.data.dataloaders.onnxrt_dataloader import ONNXRTDataLoader\n shapes, lows, highs = parse_dummy_input(model, args.benchmark_nums, max_seq_length)\n dummy_dataset = DummyDataset(shapes, low=lows, high=highs, dtype=\"int32\", label=True)\n dummy_dataloader = ONNXRTDataLoader(dummy_dataset)\n \n from lpot.experimental import Benchmark, common\n evaluator = Benchmark(args.config)\n evaluator.b_dataloader = dummy_dataloader\n evaluator.model = common.Model(model)\n evaluator(args.mode)\n\nif __name__ == \"__main__\":\n main()\n",
"# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning a 🤗 Transformers model for sequence classification on GLUE.\"\"\"\nimport argparse\nimport logging\nimport math\nimport os\nimport random\n\nimport datasets\nfrom datasets import load_dataset, load_metric\nimport torch\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm.auto import tqdm\n\nimport numpy as np\n\nimport transformers\nfrom transformers import (\n AdamW,\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n DataCollatorWithPadding,\n PretrainedConfig,\n SchedulerType,\n default_data_collator,\n get_scheduler,\n set_seed,\n)\n\n\nlogger = logging.getLogger(__name__)\n\ntask_to_keys = {\n \"cola\": (\"sentence\", None),\n \"mnli\": (\"premise\", \"hypothesis\"),\n \"mrpc\": (\"sentence1\", \"sentence2\"),\n \"qnli\": (\"question\", \"sentence\"),\n \"qqp\": (\"question1\", \"question2\"),\n \"rte\": (\"sentence1\", \"sentence2\"),\n \"sst2\": (\"sentence\", None),\n \"stsb\": (\"sentence1\", \"sentence2\"),\n \"wnli\": (\"sentence1\", \"sentence2\"),\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a text classification task\")\n parser.add_argument(\n \"--task_name\",\n type=str,\n default=None,\n help=\"The name of the glue task to train on.\",\n choices=list(task_to_keys.keys()),\n )\n parser.add_argument(\n \"--train_file\", type=str, default=None, help=\"A csv or a json file containing the training data.\"\n )\n parser.add_argument(\n \"--validation_file\", type=str, default=None, help=\"A csv or a json file containing the validation data.\"\n )\n parser.add_argument(\n \"--max_length\",\n type=int,\n default=128,\n help=(\n \"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded if `--pad_to_max_lengh` is passed.\"\n ),\n )\n parser.add_argument(\n \"--pad_to_max_length\",\n action=\"store_true\",\n help=\"If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.\",\n )\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n required=True,\n )\n parser.add_argument(\n \"--use_slow_tokenizer\",\n action=\"store_true\",\n help=\"If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).\",\n )\n parser.add_argument(\n \"--per_device_train_batch_size\",\n type=int,\n default=8,\n help=\"Batch size (per device) for the training dataloader.\",\n )\n parser.add_argument(\n \"--per_device_eval_batch_size\",\n type=int,\n default=32,\n help=\"Batch size (per device) for the evaluation dataloader.\",\n )\n parser.add_argument(\n \"--learning_rate\",\n type=float,\n default=5e-5,\n help=\"Initial learning rate (after the potential warmup period) to use.\",\n )\n parser.add_argument(\"--weight_decay\", type=float, default=0.0, help=\"Weight decay to use.\")\n parser.add_argument(\"--num_train_epochs\", type=int, default=3, help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--output_model\", default=None, type=str, required=True,\n help=\"The output path for the trained model.\")\n parser.add_argument(\n \"--max_train_steps\",\n type=int,\n default=None,\n help=\"Total number of training steps to perform. If provided, overrides num_train_epochs.\",\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\n \"--lr_scheduler_type\",\n type=SchedulerType,\n default=\"linear\",\n help=\"The scheduler type to use.\",\n choices=[\"linear\", \"cosine\", \"cosine_with_restarts\", \"polynomial\", \"constant\", \"constant_with_warmup\"],\n )\n parser.add_argument(\n \"--num_warmup_steps\", type=int, default=0, help=\"Number of steps for the warmup in the lr scheduler.\"\n )\n parser.add_argument(\"--output_dir\", type=str, default=None, help=\"Where to store the final model.\")\n parser.add_argument(\"--seed\", type=int, default=None, help=\"A seed for reproducible training.\")\n parser.add_argument('--do_train', action='store_true',\n help=\"train model\")\n parser.add_argument('--do_eval', action='store_true',\n help=\"evaluate model\")\n parser.add_argument('--do_prune', action='store_true',\n help=\"do gradient sensitivity pruning on trained model.\")\n parser.add_argument('--use_onnx', action='store_true',\n help=\"use onnx for inference\")\n parser.add_argument(\"--onnx_model_name\", type=str, default=\"model.onnx\", help=\"name for saved onnx model.\")\n parser.add_argument('--quantization', action='store_true',\n help=\"int8 quantization\")\n parser.add_argument(\"--config\", default=None, help=\"pruning config\")\n parser.add_argument(\"--core_per_instance\", type=int, default=-1, help=\"cores per instance.\")\n args = parser.parse_args()\n\n # Sanity checks\n if args.task_name is None and args.train_file is None and args.validation_file is None:\n raise ValueError(\"Need either a task name or a training/validation file.\")\n else:\n if args.train_file is not None:\n extension = args.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if args.validation_file is not None:\n extension = args.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n\n if args.output_dir is not None:\n os.makedirs(args.output_dir, exist_ok=True)\n\n return args\n\ndef take_eval_steps(args, model, eval_dataloader, metric, prune):\n # reset bert config for pruned weight\n target_num_heads = prune.cfg['pruning']['approach']['weight_compression']['pruners'][0].parameters['target']\n for submodule in model.bert.encoder.layer:\n submodule.attention.self.num_attention_heads = target_num_heads\n submodule.attention.self.all_head_size = target_num_heads * submodule.attention.self.attention_head_size\n logger.info(\"***** Running eval *****\")\n logger.info(f\" Num examples = {len(eval_dataloader) }\")\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=batch[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"eval_metric : {eval_metric}\")\n return eval_metric\n\ndef take_train_steps(args, model, eval_dataloader, metric, prune):\n # Train!\n logger.info(\"***** Running pruning *****\")\n logger.info(f\" Num examples = {len(eval_dataloader)}\")\n # Only show the progress bar once on each machine.\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n\n # To calculate head prune\n head_mask = torch.ones(model.config.num_hidden_layers, model.config.num_attention_heads)\n head_mask.requires_grad_(requires_grad=True)\n\n for step, batch in enumerate(eval_dataloader):\n inputs = {'input_ids': batch['input_ids'],\n 'attention_mask': batch['attention_mask'],\n 'labels': batch['labels']}\n outputs = model(output_attentions=True, **inputs, head_mask=head_mask)\n\n tmp_eval_loss, logits = outputs[:2]\n\n tmp_eval_loss.backward()\n\n prune.on_batch_end()\n nb_eval_steps += 1\n predictions = logits.argmax(dim=-1)\n # if preds is None:\n # preds = logits.detach().cpu().numpy()\n # out_label_ids = inputs['labels'].detach().cpu().numpy()\n # else:\n # preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n # out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n # preds = np.argmax(preds, axis=1)\n metric.add_batch(\n predictions=predictions,\n references=inputs[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"eval_metric : {eval_metric}\")\n prune.on_epoch_end()\n\ndef export_onnx_model(args, model):\n with torch.no_grad():\n inputs = {'input_ids': torch.ones(1,args.max_length, dtype=torch.int64),\n 'attention_mask': torch.ones(1,args.max_length, dtype=torch.int64),\n 'token_type_ids': torch.ones(1,args.max_length, dtype=torch.int64)}\n _ = model(**inputs)\n\n onnx_saved_model = os.path.join(args.output_model, args.onnx_model_name)\n symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}\n torch.onnx.export(model, (inputs['input_ids'],\n inputs['attention_mask'],\n inputs['token_type_ids']),\n onnx_saved_model,\n opset_version=11,\n do_constant_folding=True,\n input_names=['input_ids',\n 'attention_mask',\n 'token_type_ids'],\n output_names=['output'],\n dynamic_axes={'input_ids': symbolic_names,\n 'attention_mask' : symbolic_names,\n 'token_type_ids' : symbolic_names})\n print(\"ONNX Model exported to {}\".format(onnx_saved_model))\n\ndef evaluate_onnxrt(args, model, eval_dataloader, metric, onnx_options=None):\n from onnxruntime import ExecutionMode, InferenceSession, SessionOptions\n session = InferenceSession(model.SerializeToString(), onnx_options)\n\n # Eval!\n logger.info(\"***** Running onnx evaluation *****\")\n #eval_loss = 0.0\n #nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = dict((k, v.detach().cpu().numpy()) for k, v in batch.items())\n ort_inputs = {'input_ids': batch['input_ids'],\n 'attention_mask': batch['attention_mask'],\n 'token_type_ids': batch['token_type_ids']}\n logits = session.run(None, ort_inputs)[0]\n predictions = torch.from_numpy(logits).argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=torch.from_numpy(batch[\"labels\"]),\n )\n\n eval_metric = metric.compute()\n logger.info(f\"onnx eval metric: {eval_metric}\")\n\ndef main():\n args = parse_args()\n\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n\n # If passed along, set the training seed now.\n if args.seed is not None:\n set_seed(args.seed)\n\n # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)\n # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).\n\n # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the\n # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named\n # label if at least two columns are provided.\n\n # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this\n # single column. You can easily tweak this behavior (see below)\n\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if args.task_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(\"glue\", args.task_name)\n else:\n # Loading the dataset from local csv or json file.\n data_files = {}\n if args.train_file is not None:\n data_files[\"train\"] = args.train_file\n if args.validation_file is not None:\n data_files[\"validation\"] = args.validation_file\n extension = (args.train_file if args.train_file is not None else args.valid_file).split(\".\")[-1]\n raw_datasets = load_dataset(extension, data_files=data_files)\n # See more about loading any type of standard or custom dataset at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Labels\n if args.task_name is not None:\n is_regression = args.task_name == \"stsb\"\n if not is_regression:\n label_list = raw_datasets[\"train\"].features[\"label\"].names\n num_labels = len(label_list)\n else:\n num_labels = 1\n else:\n # Trying to have good defaults here, don't hesitate to tweak to your needs.\n is_regression = datasets[\"train\"].features[\"label\"].dtype in [\"float32\", \"float64\"]\n if is_regression:\n num_labels = 1\n else:\n # A useful fast method:\n # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique\n label_list = datasets[\"train\"].unique(\"label\")\n label_list.sort() # Let's sort it for determinism\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n #\n # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n model = AutoModelForSequenceClassification.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n )\n\n # Preprocessing the datasets\n if args.task_name is not None:\n sentence1_key, sentence2_key = task_to_keys[args.task_name]\n else:\n # Again, we try to have some nice defaults but don't hesitate to tweak to your use case.\n non_label_column_names = [name for name in datasets[\"train\"].column_names if name != \"label\"]\n if \"sentence1\" in non_label_column_names and \"sentence2\" in non_label_column_names:\n sentence1_key, sentence2_key = \"sentence1\", \"sentence2\"\n else:\n if len(non_label_column_names) >= 2:\n sentence1_key, sentence2_key = non_label_column_names[:2]\n else:\n sentence1_key, sentence2_key = non_label_column_names[0], None\n\n # Some models have set the order of the labels to use, so let's make sure we do use it.\n label_to_id = None\n if (\n model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id\n and args.task_name is not None\n and not is_regression\n ):\n # Some have all caps in their config, some don't.\n label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}\n if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):\n logger.info(\n f\"The configuration of the model provided the following label correspondence: {label_name_to_id}. \"\n \"Using it!\"\n )\n label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}\n else:\n logger.warn(\n \"Your model seems to have been trained with labels, but they don't match the dataset: \",\n f\"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.\"\n \"\\nIgnoring the model labels as a result.\",\n )\n elif args.task_name is None:\n label_to_id = {v: i for i, v in enumerate(label_list)}\n\n padding = \"max_length\" if args.pad_to_max_length else False\n\n def preprocess_function(examples):\n # Tokenize the texts\n texts = (\n (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])\n )\n result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True)\n\n if \"label\" in examples:\n if label_to_id is not None:\n # Map labels to IDs (not necessary for GLUE tasks)\n result[\"labels\"] = [label_to_id[l] for l in examples[\"label\"]]\n else:\n # In all cases, rename the column to labels because the model will expect that.\n result[\"labels\"] = examples[\"label\"]\n return result\n\n processed_datasets = raw_datasets.map(\n preprocess_function, batched=True, remove_columns=raw_datasets[\"train\"].column_names\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation_matched\" if args.task_name == \"mnli\" else \"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # DataLoaders creation:\n if args.pad_to_max_length:\n # If padding was already done ot max length, we use the default data collator that will just convert everything\n # to tensors.\n data_collator = default_data_collator\n else:\n # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of\n # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple\n # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).\n data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=None)\n\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size\n )\n eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)\n\n # Optimizer\n # Split weights in two groups, one with weight decay and the other not.\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n\n # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be\n # shorter in multiprocess)\n\n # Scheduler and math around the number of training steps.\n num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n if args.max_train_steps is None:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n else:\n args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n\n lr_scheduler = get_scheduler(\n name=args.lr_scheduler_type,\n optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps,\n num_training_steps=args.max_train_steps,\n )\n\n # Get the metric function\n if args.task_name is not None:\n metric = load_metric(\"glue\", args.task_name)\n\n # Pruning\n if args.do_prune:\n # TODO: To remove eval baseline\n # Eval first for baseline\n model.eval()\n eval_dataloader = tqdm(eval_dataloader, desc=\"Evaluating\")\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=batch[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"before prune eval metric: {eval_metric}\")\n\n prune_eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)\n prune_eval_dataloader = tqdm(prune_eval_dataloader, desc=\"Evaluating\")\n from lpot.experimental import Pruning, common\n def train_func(model):\n return take_train_steps(args, model, prune_eval_dataloader, metric, prune)\n\n def eval_func(model):\n return take_eval_steps(args, model, prune_eval_dataloader, metric, prune)\n\n # eval datasets.\n prune = Pruning(args.config)\n prune.model = common.Model(model)\n prune.pruning_func = train_func\n prune.eval_func = eval_func\n model = prune()\n model.save(args.output_model)\n # change to framework model for further use\n model = model.model\n\n if args.do_train:\n # Train!\n total_batch_size = args.per_device_train_batch_size * args.gradient_accumulation_steps\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {len(train_dataset)}\")\n logger.info(f\" Num Epochs = {args.num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {args.max_train_steps}\")\n # Only show the progress bar once on each machine.\n progress_bar = tqdm(range(args.max_train_steps))\n completed_steps = 0\n\n for epoch in range(args.num_train_epochs):\n model.train()\n for step, batch in enumerate(train_dataloader):\n outputs = model(**batch)\n loss = outputs.loss\n loss = loss / args.gradient_accumulation_steps\n loss.backward()\n if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n completed_steps += 1\n\n if completed_steps >= args.max_train_steps:\n break\n # required when actual steps and max train steps doesn't match\n progress_bar.close()\n torch.save(model.state_dict(), args.output_model + '/retrained_model.pth')\n\n if args.do_eval:\n eval_dataloader = tqdm(eval_dataloader, desc=\"Evaluating\")\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=batch[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"eval_metric: {eval_metric}\")\n\n # Get the metric function\n if args.task_name is not None:\n metric = load_metric(\"glue\", args.task_name)\n\n if args.task_name == \"mnli\":\n # Final evaluation on mismatched validation set\n eval_dataset = processed_datasets[\"validation_mismatched\"]\n eval_dataloader = DataLoader(\n eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size\n )\n\n model.eval()\n for step, batch in enumerate(eval_dataloader):\n outputs = model(**batch)\n predictions = outputs.logits.argmax(dim=-1)\n metric.add_batch(\n predictions=predictions,\n references=batch[\"labels\"],\n )\n\n eval_metric = metric.compute()\n logger.info(f\"mnli-mm: {eval_metric}\")\n\n if args.use_onnx:\n export_onnx_model(args, model)\n from onnxruntime.quantization import quantize_dynamic, QuantType\n from onnxruntime import ExecutionMode, InferenceSession, SessionOptions\n\n from onnxruntime.transformers import optimizer\n from onnxruntime.transformers.onnx_model_bert import BertOptimizationOptions\n\n import onnx\n onnx_saved_model = os.path.join(args.output_model, args.onnx_model_name)\n onnx_opt_model = onnx.load(onnx_saved_model)\n if args.quantization:\n logger.info(f\"quantize onnx model ... \")\n quantize_dynamic(onnx_saved_model,\n onnx_saved_model,\n op_types_to_quantize=['MatMul', 'Attention'],\n weight_type=QuantType.QInt8,\n per_channel=True,\n reduce_range=True,\n extra_options={'WeightSymmetric': False, 'MatMulConstBOnly': True})\n\n onnx_options = SessionOptions()\n if args.core_per_instance > 0:\n onnx_options.intra_op_num_threads = args.core_per_instance\n onnx_options.execution_mode = ExecutionMode.ORT_SEQUENTIAL\n\n opt_options = BertOptimizationOptions('bert')\n opt_options.enable_embed_layer_norm = False\n\n logger.info(f\"optimize onnx model ... \")\n model_optimizer = optimizer.optimize_model(\n onnx_saved_model,\n 'bert',\n num_heads=0,\n hidden_size=0,\n optimization_options=opt_options)\n onnx_model = model_optimizer.model\n\n logger.info(f\"executing onnx model ... \")\n\n eval_dataset = processed_datasets[\"validation_matched\" if args.task_name == \"mnli\" else \"validation\"]\n eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)\n evaluate_onnxrt(args, onnx_model, eval_dataloader, metric, onnx_options)\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\nfrom .lr_scheduler import WarmupMultiStepLR\n\n\ndef make_optimizer(cfg, model):\n params = []\n for key, value in model.named_parameters():\n if not value.requires_grad:\n continue\n lr = cfg.SOLVER.BASE_LR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY\n if \"bias\" in key:\n lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR\n weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS\n params += [{\"params\": [value], \"lr\": lr, \"weight_decay\": weight_decay}]\n\n optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)\n return optimizer\n\n\ndef make_lr_scheduler(cfg, optimizer):\n return WarmupMultiStepLR(\n optimizer,\n cfg.SOLVER.STEPS,\n cfg.SOLVER.GAMMA,\n warmup_factor=cfg.SOLVER.WARMUP_FACTOR,\n warmup_iters=cfg.SOLVER.WARMUP_ITERS,\n warmup_method=cfg.SOLVER.WARMUP_METHOD,\n )\n",
"\"\"\"Tests for quantization\"\"\"\r\nimport numpy as np\r\nimport unittest\r\nimport os\r\nimport shutil\r\nimport yaml\r\nimport tensorflow as tf\r\n\r\ndef build_fake_yaml():\r\n fake_yaml = '''\r\n model:\r\n name: fake_yaml\r\n framework: tensorflow\r\n inputs: x\r\n outputs: op_to_store\r\n device: cpu\r\n evaluation:\r\n accuracy:\r\n metric:\r\n topk: 1\r\n tuning:\r\n strategy:\r\n name: tpe\r\n accuracy_criterion:\r\n relative: 0.01\r\n workspace:\r\n path: saved\r\n '''\r\n y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)\r\n with open('fake_yaml.yaml',\"w\",encoding=\"utf-8\") as f:\r\n yaml.dump(y,f)\r\n f.close()\r\n\r\ndef build_fake_yaml2():\r\n fake_yaml = '''\r\n model:\r\n name: fake_yaml\r\n framework: tensorflow\r\n inputs: x\r\n outputs: op_to_store\r\n device: cpu\r\n evaluation:\r\n accuracy:\r\n metric:\r\n topk: 1\r\n tuning:\r\n strategy:\r\n name: tpe\r\n exit_policy:\r\n max_trials: 5\r\n accuracy_criterion:\r\n relative: -0.01\r\n workspace:\r\n path: saved\r\n '''\r\n y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)\r\n with open('fake_yaml2.yaml',\"w\",encoding=\"utf-8\") as f:\r\n yaml.dump(y,f)\r\n f.close()\r\n\r\ndef build_fake_model():\r\n try:\r\n graph = tf.Graph()\r\n graph_def = tf.GraphDef()\r\n\r\n with tf.Session() as sess:\r\n x = tf.placeholder(tf.float32, shape=(1,3,3,1), name='x')\r\n y = tf.constant(np.random.random((2,2,1,1)), name='y', dtype=tf.float32)\r\n op = tf.nn.conv2d(input=x, filter=y, strides=[1,1,1,1], padding='VALID', name='op_to_store')\r\n\r\n sess.run(tf.global_variables_initializer())\r\n constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['op_to_store'])\r\n\r\n graph_def.ParseFromString(constant_graph.SerializeToString())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def, name='')\r\n except:\r\n graph = tf.Graph()\r\n graph_def = tf.compat.v1.GraphDef()\r\n with tf.compat.v1.Session() as sess:\r\n x = tf.compat.v1.placeholder(tf.float32, shape=(1,3,3,1), name='x')\r\n y = tf.compat.v1.constant(np.random.random((2,2,1,1)), name='y', dtype=tf.float32)\r\n op = tf.nn.conv2d(input=x, filters=y, strides=[1,1,1,1], padding='VALID', name='op_to_store')\r\n\r\n sess.run(tf.compat.v1.global_variables_initializer())\r\n constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['op_to_store'])\r\n\r\n graph_def.ParseFromString(constant_graph.SerializeToString())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def, name='')\r\n return graph\r\n\r\nclass TestQuantization(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.constant_graph = build_fake_model()\r\n build_fake_yaml()\r\n build_fake_yaml2()\r\n\r\n @classmethod\r\n def tearDownClass(self):\r\n try:\r\n os.remove('fake_yaml.yaml')\r\n os.remove('fake_yaml2.yaml')\r\n\r\n shutil.rmtree(\"saved\", ignore_errors=True)\r\n except:\r\n print(\"Error while deleting file \")\r\n\r\n def test_run_tpe_one_trial(self):\r\n from lpot.experimental import Quantization, common\r\n\r\n quantizer = Quantization('fake_yaml.yaml')\r\n dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)\r\n quantizer.calib_dataloader = common.DataLoader(dataset)\r\n quantizer.eval_dataloader = common.DataLoader(dataset)\r\n quantizer.model = self.constant_graph\r\n quantizer()\r\n\r\n def test_run_tpe_max_trials(self):\r\n from lpot.experimental import Quantization, common\r\n\r\n quantizer = Quantization('fake_yaml2.yaml')\r\n dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)\r\n quantizer.calib_dataloader = common.DataLoader(dataset)\r\n quantizer.eval_dataloader = common.DataLoader(dataset)\r\n quantizer.model = self.constant_graph\r\n quantizer()\r\n\r\n def test_loss_calculation(self):\r\n from lpot.contrib.strategy.tpe import TpeTuneStrategy\r\n from lpot.experimental import Quantization, common\r\n\r\n quantizer = Quantization('fake_yaml.yaml')\r\n dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)\r\n quantizer.calib_dataloader = common.DataLoader(dataset)\r\n quantizer.eval_dataloader = common.DataLoader(dataset)\r\n quantizer.model = self.constant_graph\r\n\r\n testObject = TpeTuneStrategy(quantizer.model, quantizer.conf, quantizer.calib_dataloader)\r\n testObject._calculate_loss_function_scaling_components(0.01, 2, testObject.loss_function_config)\r\n # check if latency difference between min and max corresponds to 10 points of loss function\r\n tmp_val = testObject.calculate_loss(0.01, 2, testObject.loss_function_config)\r\n tmp_val2 = testObject.calculate_loss(0.01, 1, testObject.loss_function_config)\r\n self.assertTrue(True if int(tmp_val2 - tmp_val) == 10 else False)\r\n # check if 1% of acc difference corresponds to 10 points of loss function\r\n tmp_val = testObject.calculate_loss(0.02, 2, testObject.loss_function_config)\r\n tmp_val2 = testObject.calculate_loss(0.03, 2, testObject.loss_function_config)\r\n self.assertTrue(True if int(tmp_val2 - tmp_val) == 10 else False)\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n",
"# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"UX Communication test.\"\"\"\n\nimport unittest\n\nfrom lpot.ux.web.communication import (\n Message,\n MessageQueue,\n Request,\n Response,\n create_simple_response,\n)\n\n\nclass TestCommunication(unittest.TestCase):\n \"\"\"UX Communication tests.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Create test environment.\"\"\"\n self.queue = MessageQueue()\n\n def test_request(self) -> None:\n \"\"\"Test that Request is working.\"\"\"\n method = \"GET\"\n operation = \"/api/a/b/x\"\n data = self._get_random_dict()\n request = Request(method, operation, data)\n\n self.assertEqual(method, request.method)\n self.assertEqual(operation, request.operation)\n self.assertEqual(data, request.data)\n\n def test_response(self) -> None:\n \"\"\"Test that Response is working.\"\"\"\n response = Response()\n\n self.assertEqual({}, response.data)\n self.assertEqual({}, response.command)\n\n def test_create_simple_response(self) -> None:\n \"\"\"Test that create_simple_response is working.\"\"\"\n data = self._get_random_dict()\n response = create_simple_response(data)\n\n self.assertEqual(data, response.data)\n self.assertEqual({}, response.command)\n\n def test_message(self) -> None:\n \"\"\"Test that Message is working.\"\"\"\n status = \"Test status\"\n subject = \"Test subject\"\n data = self._get_random_dict()\n message = Message(status, subject, data)\n\n self.assertEqual(status, message.status)\n self.assertEqual(subject, message.subject)\n self.assertEqual(data, message.data)\n\n def test_message_queue_post_failure(self) -> None:\n \"\"\"Test posting failure messages to message queue.\"\"\"\n data = self._get_random_dict()\n self.queue.post_failure(\"subject\", data)\n self._assert_message(\"failure\", \"subject\", data)\n\n def test_message_queue_post_success(self) -> None:\n \"\"\"Test posting success messages to message queue.\"\"\"\n data = self._get_random_dict()\n self.queue.post_success(\"subject\", data)\n self._assert_message(\"success\", \"subject\", data)\n\n def test_message_queue_post_error(self) -> None:\n \"\"\"Test posting error messages to message queue.\"\"\"\n data = self._get_random_dict()\n self.queue.post_error(\"subject\", data)\n self._assert_message(\"error\", \"subject\", data)\n\n def _get_random_dict(self, size: int = 5) -> dict:\n \"\"\"Build random dict.\"\"\"\n from numpy.random import randint\n\n return {\"key \" + str(i): randint(65536) for i in range(size)}\n\n def _assert_message(\n self,\n expected_status: str,\n expected_subject: str,\n expected_data: dict,\n ) -> None:\n \"\"\"Assert message in queue matches expectations.\"\"\"\n message = self.queue.get()\n\n self.assertEqual(expected_status, message.status)\n self.assertEqual(expected_subject, message.subject)\n self.assertEqual(expected_data, message.data)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"import torch\nimport torch.nn as nn\nfrom models.base_model_r34 import ResNet34\nimport numpy as np\nfrom math import sqrt, ceil\nimport itertools\nimport torch.nn.functional as F\nimport torch.fx\n\n##Inspired by https://github.com/kuangliu/pytorch-ssd\n\nclass Encoder(object):\n \"\"\"\n Transform between (bboxes, lables) <-> SSD output\n \n dboxes: default boxes in size 8732 x 4, \n encoder: input ltrb format, output xywh format\n decoder: input xywh format, output ltrb format \n\n decode:\n input : bboxes_in (Tensor 8732 x 4), scores_in (Tensor 8732 x nitems)\n output : bboxes_out (Tensor nboxes x 4), labels_out (Tensor nboxes)\n criteria : IoU threshold of bboexes\n max_output : maximum number of output bboxes\n \"\"\"\n\n def __init__(self, dboxes):\n self.dboxes = dboxes(order=\"ltrb\")\n self.dboxes_xywh = dboxes(order=\"xywh\").unsqueeze(dim=0)\n self.nboxes = self.dboxes.size(0)\n #print(\"# Bounding boxes: {}\".format(self.nboxes))\n self.scale_xy = torch.tensor(dboxes.scale_xy)\n self.scale_wh = torch.tensor(dboxes.scale_wh)\n \n \n def decode_batch(self, bboxes_in, scores_in, criteria = 0.45, max_output=200):\n self.dboxes = self.dboxes.to(bboxes_in)\n self.dboxes_xywh = self.dboxes_xywh.to(bboxes_in)\n bboxes, probs = scale_back_batch(bboxes_in, scores_in,self.scale_xy,self.scale_wh,self.dboxes_xywh)\n result = bboex_labels_scores(bboxes, probs)\n return result\n\n\[email protected]\ndef bboex_labels_scores(bboxes, probs, criteria = 0.45, max_output=200):\n boxes = []; labels=[]; scores=[]\n for bbox, prob in zip(bboxes.split(1, 0), probs.split(1, 0)):\n bbox = bbox.squeeze(0)\n prob = prob.squeeze(0)\n dbox,dlabel,dscore=decode_single(bbox, prob, criteria, max_output)\n boxes.append(dbox)\n labels.append(dlabel)\n scores.append(dscore)\n return [boxes,labels,scores]\n\n\[email protected]\n# perform non-maximum suppression\ndef decode_single(bboxes_in, scores_in, criteria, max_output, max_num=200):\n # Reference to https://github.com/amdegroot/ssd.pytorch\n \n bboxes_out = [] \n scores_out = []\n labels_out = []\n\n for i, score in enumerate(scores_in.split(1, 1)):\n # skip background\n if i == 0: continue\n \n score = score.squeeze(1)\n mask = score > 0.05\n\n bboxes, score = bboxes_in[mask, :], score[mask]\n if score.size(0) == 0: continue\n\n score_sorted, score_idx_sorted = score.sort(dim=0)\n \n # select max_output indices\n score_idx_sorted = score_idx_sorted[-max_num:]\n candidates = []\n \n while score_idx_sorted.numel() > 0:\n idx = score_idx_sorted[-1].item()\n bboxes_sorted = bboxes[score_idx_sorted, :]\n bboxes_idx = bboxes[idx, :].unsqueeze(dim=0)\n iou_sorted = calc_iou_tensor(bboxes_sorted, bboxes_idx).squeeze()\n # we only need iou < criteria \n score_idx_sorted = score_idx_sorted[iou_sorted < criteria]\n candidates.append(idx)\n\n bboxes_out.append(bboxes[candidates, :])\n scores_out.append(score[candidates])\n labels_out.extend([i]*len(candidates))\n\n bboxes_out, labels_out, scores_out = torch.cat(bboxes_out, dim=0), \\\n torch.tensor(labels_out, dtype=torch.long), \\\n torch.cat(scores_out, dim=0)\n\n\n _, max_ids = scores_out.sort(dim=0)\n max_ids = max_ids[-max_output:]\n return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]\n\n\[email protected]\ndef calc_iou_tensor(box1, box2):\n \"\"\" Calculation of IoU based on two boxes tensor,\n Reference to https://github.com/kuangliu/pytorch-ssd\n input:\n box1 (N, 4) \n box2 (M, 4)\n output:\n IoU (N, M)\n \"\"\"\n N = box1.size(0)\n M = box2.size(0)\n\n be1 = box1.unsqueeze(1).expand(-1, M, -1)\n be2 = box2.unsqueeze(0).expand(N, -1, -1)\n\n # Left Top & Right Bottom\n lt = torch.max(be1[:,:,:2], be2[:,:,:2])\n rb = torch.min(be1[:,:,2:], be2[:,:,2:])\n delta = rb - lt\n delta.clone().masked_fill_(delta < 0,0)\n intersect = delta[:,:,0]*delta[:,:,1]\n delta1 = be1[:,:,2:] - be1[:,:,:2]\n area1 = delta1[:,:,0]*delta1[:,:,1]\n delta2 = be2[:,:,2:] - be2[:,:,:2]\n area2 = delta2[:,:,0]*delta2[:,:,1]\n\n iou = intersect/(area1 + area2 - intersect)\n return iou\n\n\[email protected]\n#@torch.jit.script\ndef scale_back_batch(bboxes_in, scores_in,scale_xy,scale_wh,dboxes_xywh):\n \"\"\"\n Do scale and transform from xywh to ltrb\n suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox\n \"\"\" \n bboxes_in = bboxes_in.permute(0, 2, 1)\n scores_in = scores_in.permute(0, 2, 1)\n\n bboxes_in[:, :, :2] = scale_xy*bboxes_in[:, :, :2]\n bboxes_in[:, :, 2:] = scale_wh*bboxes_in[:, :, 2:]\n bboxes_in[:, :, :2] = bboxes_in[:, :, :2]*dboxes_xywh[:, :, 2:] + dboxes_xywh[:, :, :2]\n bboxes_in[:, :, 2:] = bboxes_in[:, :, 2:].exp()*dboxes_xywh[:, :, 2:]\n # Transform format to ltrb \n l, t, r, b = bboxes_in[:, :, 0] - 0.5*bboxes_in[:, :, 2],\\\n bboxes_in[:, :, 1] - 0.5*bboxes_in[:, :, 3],\\\n bboxes_in[:, :, 0] + 0.5*bboxes_in[:, :, 2],\\\n bboxes_in[:, :, 1] + 0.5*bboxes_in[:, :, 3]\n bboxes_in[:, :, 0] = l\n bboxes_in[:, :, 1] = t\n bboxes_in[:, :, 2] = r\n bboxes_in[:, :, 3] = b\n return bboxes_in, F.softmax(scores_in, dim=-1)\n\n\nclass DefaultBoxes(object):\n def __init__(self, fig_size, feat_size, steps, scales, aspect_ratios, \\\n scale_xy=0.1, scale_wh=0.2):\n\n self.feat_size = feat_size\n self.fig_size_w,self.fig_size_h = fig_size\n\n self.scale_xy_ = scale_xy\n self.scale_wh_ = scale_wh\n \n # According to https://github.com/weiliu89/caffe\n # Calculation method slightly different from paper\n self.steps_w = [st[0] for st in steps]\n self.steps_h = [st[1] for st in steps]\n self.scales = scales\n fkw = self.fig_size_w//np.array(self.steps_w)\n fkh = self.fig_size_h//np.array(self.steps_h)\n self.aspect_ratios = aspect_ratios\n\n self.default_boxes = []\n # size of feature and number of feature\n for idx, sfeat in enumerate(self.feat_size):\n sfeat_w,sfeat_h=sfeat\n sk1 = scales[idx][0]/self.fig_size_w\n sk2 = scales[idx+1][1]/self.fig_size_h\n sk3 = sqrt(sk1*sk2)\n all_sizes = [(sk1, sk1), (sk3, sk3)]\n for alpha in aspect_ratios[idx]:\n w, h = sk1*sqrt(alpha), sk1/sqrt(alpha)\n all_sizes.append((w, h))\n all_sizes.append((h, w))\n for w, h in all_sizes:\n for i, j in itertools.product(range(sfeat_w), range(sfeat_h)):\n cx, cy = (j+0.5)/fkh[idx], (i+0.5)/fkw[idx]\n self.default_boxes.append((cx, cy, w, h)) \n self.dboxes = torch.tensor(self.default_boxes)\n self.dboxes.clamp_(min=0, max=1)\n # For IoU calculation\n self.dboxes_ltrb = self.dboxes.clone()\n self.dboxes_ltrb[:, 0] = self.dboxes[:, 0] - 0.5*self.dboxes[:, 2]\n self.dboxes_ltrb[:, 1] = self.dboxes[:, 1] - 0.5*self.dboxes[:, 3]\n self.dboxes_ltrb[:, 2] = self.dboxes[:, 0] + 0.5*self.dboxes[:, 2]\n self.dboxes_ltrb[:, 3] = self.dboxes[:, 1] + 0.5*self.dboxes[:, 3]\n \n @property\n def scale_xy(self):\n return self.scale_xy_\n \n @property \n def scale_wh(self):\n return self.scale_wh_\n\n def __call__(self, order=\"ltrb\"):\n if order == \"ltrb\": return self.dboxes_ltrb\n if order == \"xywh\": return self.dboxes\n\ndef dboxes_R34_coco(figsize,strides):\n feat_size = [[50, 50], [25, 25], [13, 13], [7, 7], [3, 3], [3, 3]]\n steps=[(int(figsize[0]/fs[0]),int(figsize[1]/fs[1])) for fs in feat_size]\n scales = [(int(s*figsize[0]/300),int(s*figsize[1]/300)) for s in [21, 45, 99, 153, 207, 261, 315]] \n aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] \n dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)\n return dboxes\n\nclass SSD_R34(nn.Module):\n \"\"\"\n Build a SSD module to take 300x300 image input,\n and output 8732 per class bounding boxes\n\n vggt: pretrained vgg16 (partial) model\n label_num: number of classes (including background 0)\n \"\"\"\n def __init__(self, label_num=81, backbone='resnet34', model_path=\"./resnet34-333f7ec4.pth\",strides=[3,3 ,2 ,2 ,2 ,2],extract_shapes=False):\n\n super(SSD_R34, self).__init__()\n\n self.label_num = label_num\n self.strides = strides\n if backbone == 'resnet34':\n self.model = ResNet34()\n out_channels = 256\n self.out_chan = [out_channels, 512, 512, 256, 256, 256]\n else:\n raise ValueError('Invalid backbone chosen')\n\n self._build_additional_features(self.out_chan)\n self.extract_shapes=extract_shapes\n # after l2norm, conv7, conv8_2, conv9_2, conv10_2, conv11_2\n # classifer 1, 2, 3, 4, 5 ,6\n\n self.num_defaults = [4, 6, 6, 6, 4, 4]\n self.loc = []\n self.conf = []\n for nd, oc in zip(self.num_defaults, self.out_chan):\n self.loc.append(nn.Conv2d(oc, nd*4, kernel_size=3, padding=1,stride=self.strides[0]))\n self.conf.append(nn.Conv2d(oc, nd*label_num, kernel_size=3, padding=1,stride=self.strides[1]))\n\n self.loc = nn.ModuleList(self.loc)\n self.conf = nn.ModuleList(self.conf)\n if not extract_shapes:\n self.size=(1200,1200)\n dboxes = dboxes_R34_coco(list(self.size),[3,3,2,2,2,2])\n self.encoder = Encoder(dboxes)\n # intitalize all weights\n self._init_weights()\n self.device = 1\n def _build_additional_features(self, input_channels):\n idx = 0\n self.additional_blocks = []\n \n self.additional_blocks.append(nn.Sequential(\n nn.Conv2d(input_channels[idx], 256, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, input_channels[idx+1], kernel_size=3, padding=1,stride=self.strides[2]),\n nn.ReLU(inplace=True),\n ))\n idx += 1\n\n self.additional_blocks.append(nn.Sequential(\n nn.Conv2d(input_channels[idx], 256, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, input_channels[idx+1], kernel_size=3, padding=1, stride=self.strides[3]),\n nn.ReLU(inplace=True),\n ))\n idx += 1\n\n # conv9_1, conv9_2\n self.additional_blocks.append(nn.Sequential(\n nn.Conv2d(input_channels[idx], 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, input_channels[idx+1], kernel_size=3, padding=1, stride=self.strides[4]),\n nn.ReLU(inplace=True),\n ))\n idx += 1\n\n # conv10_1, conv10_2\n self.additional_blocks.append(nn.Sequential(\n nn.Conv2d(input_channels[idx], 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, input_channels[idx+1], kernel_size=3,stride=self.strides[5]),\n nn.ReLU(inplace=True),\n ))\n idx += 1\n\n\n\n # conv11_1, conv11_2\n self.additional_blocks.append(nn.Sequential(\n nn.Conv2d(input_channels[idx], 128, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, input_channels[idx+1], kernel_size=3),\n nn.ReLU(inplace=True),\n ))\n\n self.additional_blocks = nn.ModuleList(self.additional_blocks)\n\n def _init_weights(self):\n\n layers = [\n *self.additional_blocks,\n *self.loc, *self.conf]\n\n for layer in layers:\n for param in layer.parameters():\n if param.dim() > 1: nn.init.xavier_uniform_(param)\n\n # Shape the classifier to the view of bboxes\n def bbox_view(self, src, loc, conf,extract_shapes=False):\n ret = []\n features_shapes = []\n for s, l, c in zip(src, loc, conf):\n ret.append((l(s).reshape(s.size(0), 4, -1), c(s).reshape(s.size(0), self.label_num, -1)))\n # extract shapes for prior box initliziation \n if extract_shapes:\n ls=l(s)\n features_shapes.append([ls.shape[2],ls.shape[3]])\n locs, confs = list(zip(*ret))\n locs, confs = torch.cat(locs, 2).contiguous(), torch.cat(confs, 2).contiguous()\n return locs, confs,features_shapes\n\n def forward(self, data):\n layers = self.model(data)\n\n # last result from network goes into additional blocks\n x = layers[-1]\n \n additional_results = []\n for i, l in enumerate(self.additional_blocks):\n \n x = l(x)\n additional_results.append(x)\n\n src = [*layers, *additional_results]\n # Feature maps sizes depend on the image size. For 300x300 with strides=[1,1,2,2,2,1] it is 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4 \n locs, confs,features_shapes = self.bbox_view(src, self.loc, self.conf,extract_shapes=self.extract_shapes)\n if self.extract_shapes:\n return locs, confs,features_shapes\n else: \n # For SSD 300 with strides=[1,1,2,2,2,1] , shall return nbatch x 8732 x {nlabels, nlocs} results \n results=self.encoder.decode_batch(locs, confs, 0.50, 200) #[0]\n return results #locs, confs,features_shapes\n",
"from pathlib import Path\r\nimport numpy as np\r\nimport torch\r\nfrom ..tools.common import logger\r\n\r\nclass ModelCheckpoint(object):\r\n def __init__(self, checkpoint_dir,\r\n monitor,\r\n arch,mode='min',\r\n epoch_freq=1,\r\n best = None,\r\n save_best_only = True):\r\n if isinstance(checkpoint_dir,Path):\r\n checkpoint_dir = checkpoint_dir\r\n else:\r\n checkpoint_dir = Path(checkpoint_dir)\r\n assert checkpoint_dir.is_dir()\r\n checkpoint_dir.mkdir(exist_ok=True)\r\n self.base_path = checkpoint_dir\r\n self.arch = arch\r\n self.monitor = monitor\r\n self.epoch_freq = epoch_freq\r\n self.save_best_only = save_best_only\r\n\r\n if mode == 'min':\r\n self.monitor_op = np.less\r\n self.best = np.Inf\r\n\r\n elif mode == 'max':\r\n self.monitor_op = np.greater\r\n self.best = -np.Inf\r\n if best:\r\n self.best = best\r\n\r\n if save_best_only:\r\n self.model_name = f\"BEST_{arch}_MODEL.pth\"\r\n\r\n def epoch_step(self, state,current):\r\n if self.save_best_only:\r\n if self.monitor_op(current, self.best):\r\n logger.info(f\"\\nEpoch {state['epoch']}: {self.monitor} improved from {self.best:.5f} to {current:.5f}\")\r\n self.best = current\r\n state['best'] = self.best\r\n best_path = self.base_path/ self.model_name\r\n torch.save(state, str(best_path))\r\n else:\r\n filename = self.base_path / f\"EPOCH_{state['epoch']}_{state[self.monitor]}_{self.arch}_MODEL.pth\"\r\n if state['epoch'] % self.epoch_freq == 0:\r\n logger.info(f\"\\nEpoch {state['epoch']}: save model to disk.\")\r\n torch.save(state, str(filename))\r\n\r\n def bert_epoch_step(self, state,current):\r\n model_to_save = state['model']\r\n if self.save_best_only:\r\n if self.monitor_op(current, self.best):\r\n logger.info(f\"\\nEpoch {state['epoch']}: {self.monitor} improved from {self.best:.5f} to {current:.5f}\")\r\n self.best = current\r\n state['best'] = self.best\r\n model_to_save.save_pretrained(str(self.base_path))\r\n output_config_file = self.base_path / 'configs.json'\r\n with open(str(output_config_file), 'w') as f:\r\n f.write(model_to_save.config.to_json_string())\r\n state.pop(\"model\")\r\n torch.save(state,self.base_path / 'checkpoint_info.bin')\r\n else:\r\n if state['epoch'] % self.epoch_freq == 0:\r\n save_path = self.base_path / f\"checkpoint-epoch-{state['epoch']}\"\r\n save_path.mkdir(exist_ok=True)\r\n logger.info(f\"\\nEpoch {state['epoch']}: save model to disk.\")\r\n model_to_save.save_pretrained(save_path)\r\n output_config_file = save_path / 'configs.json'\r\n with open(str(output_config_file), 'w') as f:\r\n f.write(model_to_save.config.to_json_string())\r\n state.pop(\"model\")\r\n torch.save(state, save_path / 'checkpoint_info.bin')\r\n",
"import unittest\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\n\nclass TestConvertLayout(unittest.TestCase):\n def test_convert_layout(self):\n tf.compat.v1.disable_eager_execution()\n x = tf.compat.v1.placeholder(tf.float32, [1, 10, 10, 3], name=\"input\")\n conv_weights = tf.compat.v1.get_variable(\"weight\", [3, 3, 10, 3],\n initializer=tf.compat.v1.random_normal_initializer())\n conv = tf.nn.conv2d(x, conv_weights, strides=[1, 2, 2, 1], padding=\"VALID\", \n data_format='NCHW')\n relu = tf.nn.relu(conv, name='relu')\n out_name = relu.name.split(':')[0]\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output_graph_def = graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=sess.graph_def,\n output_node_names=[out_name])\n\n from lpot.adaptor.tf_utils.graph_rewriter.generic import convert_layout\n convert = convert_layout.ConvertLayoutOptimizer(output_graph_def, [out_name])\n convert_graph = convert.do_transformation()\n for node in convert_graph.node:\n if node.op == 'Conv2D' and 'data_format' in node.attr:\n if tf.version.VERSION >= '2.4.0':\n self.assertEqual(node.attr['data_format'].s, b'NHWC')\n else:\n self.assertEqual(node.attr['data_format'].s, b'NCHW')\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport torch\n\nfrom maskrcnn_benchmark.utils.comm import get_rank, is_main_process\nfrom mlperf_logging import mllog\nfrom mlperf_logging.mllog import constants\n\nmllogger = mllog.get_mllogger()\n\ndef log_start(*args, **kwargs):\n _log_print(mllogger.start, *args, **kwargs)\ndef log_end(*args, **kwargs):\n _log_print(mllogger.end, *args, **kwargs)\ndef log_event(*args, **kwargs):\n _log_print(mllogger.event, *args, **kwargs)\ndef _log_print(logger, *args, **kwargs):\n \"\"\"\n Wrapper for MLPerf compliance logging calls.\n All arguments but 'log_all_ranks' are passed to\n mlperf_logging.mllog.\n If 'log_all_ranks' is set to True then all distributed workers will print\n logging message, if set to False then only worker with rank=0 will print\n the message.\n \"\"\"\n if 'stack_offset' not in kwargs:\n kwargs['stack_offset'] = 3\n if 'value' not in kwargs:\n kwargs['value'] = None\n\n if kwargs.pop('log_all_ranks', False):\n log = True\n else:\n log = (get_rank() == 0)\n\n if log:\n logger(*args, **kwargs)\n\ndef configure_logger(benchmark):\n mllog.config(filename=os.path.join(os.path.dirname(os.path.abspath(__file__)), f'{benchmark}.log'))\n mllogger = mllog.get_mllogger()\n mllogger.logger.propagate = False\n\ndef mlperf_submission_log(benchmark):\n required_dist_init = ['RANK', 'WORLD_SIZE', 'MASTER_ADDR', 'MASTER_PORT']\n\n if all(var in os.environ for var in required_dist_init):\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n\n num_nodes = os.environ.get('SLURM_NNODES', 1)\n\n configure_logger(benchmark)\n\n log_event(\n key=constants.SUBMISSION_BENCHMARK,\n value=benchmark,\n )\n\n log_event(\n key=constants.SUBMISSION_ORG,\n value='NVIDIA')\n\n log_event(\n key=constants.SUBMISSION_DIVISION,\n value='closed')\n\n log_event(\n key=constants.SUBMISSION_STATUS,\n value='onprem')\n\n log_event(\n key=constants.SUBMISSION_PLATFORM,\n value=f'{num_nodes}xSUBMISSION_PLATFORM_PLACEHOLDER')\n\ndef generate_seeds(rng, size):\n seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)]\n return seeds\n\ndef broadcast_seeds(seeds, device):\n if torch.distributed.is_initialized():\n seeds_tensor = torch.LongTensor(seeds).to(device)\n torch.distributed.broadcast(seeds_tensor, 0)\n seeds = seeds_tensor.tolist()\n return seeds\n",
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\"\"\"\nImplements the Generalized R-CNN framework\n\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nimport torch.fx; torch.fx.wrap('to_image_list')\n\nfrom ..backbone import build_backbone\nfrom ..rpn.rpn import build_rpn\nfrom ..roi_heads.roi_heads import build_roi_heads\n\n\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Main class for Generalized R-CNN. Currently supports boxes and masks.\n It consists of three main parts:\n - backbone\n - rpn\n - heads: takes the features + the proposals from the RPN and computes\n detections / masks from it.\n \"\"\"\n\n def __init__(self, cfg):\n super(GeneralizedRCNN, self).__init__()\n\n self.backbone = build_backbone(cfg)\n self.rpn = build_rpn(cfg)\n self.roi_heads = build_roi_heads(cfg)\n\n def forward(self, images, targets=None):\n \"\"\"\n Arguments:\n images (list[Tensor] or ImageList): images to be processed\n targets (list[BoxList]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n images = to_image_list(images)\n features = self.backbone(images.tensors)\n proposals, proposal_losses = self.rpn(images, features, targets)\n if self.roi_heads:\n x, result, detector_losses = self.roi_heads(features, proposals, targets)\n else:\n # RPN-only models don't have roi_heads\n x = features\n result = proposals\n detector_losses = {}\n\n if self.training:\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n return losses\n\n return result\n",
"import os\r\nimport unittest\r\nimport yaml\r\nimport shutil\r\nimport tensorflow as tf\r\nfrom tensorflow.python.framework import graph_util\r\nfrom lpot.adaptor.tensorflow import TensorFlowAdaptor\r\nfrom lpot.adaptor.tf_utils.util import disable_random\r\n\r\ndef build_fake_yaml():\r\n fake_yaml = '''\r\n model:\r\n name: fake_yaml\r\n framework: tensorflow\r\n inputs: input\r\n outputs: op_to_store\r\n device: cpu\r\n evaluation:\r\n accuracy:\r\n metric:\r\n topk: 1\r\n tuning:\r\n accuracy_criterion:\r\n relative: 0.0001\r\n workspace:\r\n path: saved\r\n '''\r\n y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)\r\n with open('fake_yaml.yaml', \"w\", encoding=\"utf-8\") as f:\r\n yaml.dump(y, f)\r\n f.close()\r\nclass TestQuantizeInput(unittest.TestCase):\r\n @classmethod\r\n def setUpClass(self):\r\n build_fake_yaml()\r\n\r\n @classmethod\r\n def tearDownClass(self):\r\n os.remove('fake_yaml.yaml')\r\n shutil.rmtree('./saved', ignore_errors=True)\r\n\r\n @disable_random()\r\n def test_quantize_input(self):\r\n x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name=\"input\")\r\n paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])\r\n x_pad = tf.pad(x, paddings, \"CONSTANT\")\r\n conv_weights = tf.compat.v1.get_variable(\"weight\", [3, 3, 16, 16],\r\n initializer=tf.compat.v1.random_normal_initializer())\r\n conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding=\"VALID\")\r\n\r\n conv_bias = tf.compat.v1.get_variable(\"bias\", [16],\r\n initializer=tf.compat.v1.random_normal_initializer())\r\n\r\n conv_bias = tf.math.add(conv, conv_bias)\r\n relu6 = tf.nn.relu6(conv_bias, name='op_to_store')\r\n\r\n out_name = relu6.name.split(':')[0]\r\n with tf.compat.v1.Session() as sess:\r\n sess.run(tf.compat.v1.global_variables_initializer())\r\n constant_graph = graph_util.convert_variables_to_constants(\r\n sess=sess,\r\n input_graph_def=sess.graph_def,\r\n output_node_names=[out_name])\r\n\r\n from lpot.experimental import Quantization, common\r\n quantizer = Quantization(\"./fake_yaml.yaml\")\r\n dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)\r\n quantizer.calib_dataloader = common.DataLoader(dataset)\r\n quantizer.model = constant_graph\r\n q_model = quantizer()\r\n\r\n framework_specific_info = {'device': 'cpu', 'approach': 'post_training_static_quant', \\\r\n 'random_seed': 1978, 'inputs': ['input'], 'outputs': ['op_to_store'], \\\r\n 'workspace_path': 'saved'}\r\n\r\n quantize_input_graph, _ = TensorFlowAdaptor(framework_specific_info).quantize_input(q_model.graph)\r\n Not_found_QuantizedV2 = True\r\n for i in quantize_input_graph.as_graph_def().node:\r\n if i.op == 'QuantizeV2':\r\n Not_found_QuantizedV2 = False\r\n break\r\n self.assertEqual(Not_found_QuantizedV2, True)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom transformers.data.processors.squad import SquadV1Processor, SquadV2Processor, SquadResult\nfrom transformers.data.metrics.squad_metrics import compute_predictions_logits, compute_predictions_log_probs, squad_evaluate\n\nimport argparse\nimport logging\nimport os\nimport random\nimport glob\nimport timeit\nimport numpy as np\nimport torch\nfrom torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom torch.quantization import quantize, prepare, convert, propagate_qconfig_, add_observer_\nfrom torch.quantization import \\\nQuantWrapper, QuantStub, DeQuantStub, default_qconfig, default_per_channel_qconfig\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept:\n from tensorboardX import SummaryWriter\n\nfrom tqdm import tqdm, trange\n\nfrom transformers import (WEIGHTS_NAME, BertConfig,\n BertForQuestionAnswering, BertTokenizer,\n XLMConfig, XLMForQuestionAnswering,\n XLMTokenizer, XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer,\n AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer,\n XLMConfig, XLMForQuestionAnswering, XLMTokenizer,\n )\n\nfrom transformers import AdamW, get_linear_schedule_with_warmup, squad_convert_examples_to_features\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \\\n for conf in (BertConfig, XLNetConfig, XLMConfig)), ())\n\nMODEL_CLASSES = {\n 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer),\n 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer),\n 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer),\n 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer),\n 'albert': (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer),\n 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer)\n}\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ndef to_list(tensor):\n return tensor.detach().cpu().tolist()\n\ndef train(args, train_dataset, model, tokenizer):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n \n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank,\n find_unused_parameters=True)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 1\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\n \n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n 'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'start_positions': batch[3],\n 'end_positions': batch[4]\n }\n\n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2]\n\n if args.model_type in ['xlnet', 'xlm']:\n inputs.update({'cls_index': batch[5], 'p_mask': batch[6]})\n\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n # Log metrics\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar('eval_{}'.format(key), value, global_step)\n tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step)\n logging_loss = tr_loss\n\n # Save model checkpoint\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, prefix=\"\", calibration=False):\n dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)\n\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n \n calibation_iteration = int((len(dataset) * 0.05 + args.eval_batch_size - 1) / args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n if args.mkldnn_eval:\n from torch.utils import mkldnn as mkldnn_utils\n model = mkldnn_utils.to_mkldnn(model)\n print(model)\n\n all_results = []\n start_time = timeit.default_timer()\n nb_eval_steps = 0 \n \n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n \n if calibration and nb_eval_steps >= calibation_iteration:\n break\n\n with torch.no_grad():\n inputs = {\n 'input_ids': batch[0],\n 'attention_mask': batch[1]\n }\n \n if args.model_type != 'distilbert':\n inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids\n\n example_indices = batch[3]\n \n # XLNet and XLM use more arguments for their predictions\n if args.model_type in ['xlnet', 'xlm']:\n inputs.update({'cls_index': batch[4], 'p_mask': batch[5]})\n\n outputs = model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n\n output = [to_list(output[i]) for output in outputs]\n\n # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other \"simpler\"\n # models only use two.\n if len(output) >= 5:\n start_logits = output[0]\n start_top_index = output[1]\n end_logits = output[2]\n end_top_index = output[3]\n cls_logits = output[4]\n\n result = SquadResult(\n unique_id, start_logits, end_logits, \n start_top_index=start_top_index, \n end_top_index=end_top_index, \n cls_logits=cls_logits\n )\n\n else:\n start_logits, end_logits = output\n result = SquadResult(\n unique_id, start_logits, end_logits\n )\n\n all_results.append(result)\n nb_eval_steps +=1 \n\n evalTime = timeit.default_timer() - start_time\n logger.info(\" Evaluation done in total %f secs (%f sec per example)\", evalTime, evalTime / len(dataset))\n\n # Compute predictions\n output_prediction_file = os.path.join(args.output_dir, \"predictions_{}.json\".format(prefix))\n output_nbest_file = os.path.join(args.output_dir, \"nbest_predictions_{}.json\".format(prefix))\n\n if args.version_2_with_negative:\n output_null_log_odds_file = os.path.join(args.output_dir, \"null_odds_{}.json\".format(prefix))\n else:\n output_null_log_odds_file = None\n\n # XLNet and XLM use a more complex post-processing procedure\n if args.model_type in ['xlnet', 'xlm']:\n start_n_top = model.config.start_n_top if hasattr(model, \"config\") else model.module.config.start_n_top\n end_n_top = model.config.end_n_top if hasattr(model, \"config\") else model.module.config.end_n_top\n\n predictions = compute_predictions_log_probs(examples, features, all_results, args.n_best_size,\n args.max_answer_length, output_prediction_file,\n output_nbest_file, output_null_log_odds_file,\n start_n_top, end_n_top,\n args.version_2_with_negative, tokenizer, args.verbose_logging)\n elif not calibration:\n predictions = compute_predictions_logits(examples, features, all_results, args.n_best_size,\n args.max_answer_length, args.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file, args.verbose_logging,\n args.version_2_with_negative, args.null_score_diff_threshold)\n\n # Compute the F1 and exact scores.\n if not calibration:\n results = squad_evaluate(examples, predictions)\n return results\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Load data features from cache or dataset file\n input_dir = args.data_dir if args.data_dir else \".\"\n cached_features_file = os.path.join(input_dir, 'cached_{}_{}_{}'.format(\n 'dev' if evaluate else 'train',\n list(filter(None, args.model_name_or_path.split('/'))).pop(),\n str(args.max_seq_length))\n )\n\n # Init features and dataset from cache if it exists\n if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n features_and_dataset = torch.load(cached_features_file)\n features, dataset = features_and_dataset[\"features\"], features_and_dataset[\"dataset\"]\n else:\n logger.info(\"Creating features from dataset file at %s\", input_dir)\n\n if not args.data_dir:\n try:\n import tensorflow_datasets as tfds\n except ImportError:\n raise ImportError(\"If not data_dir is specified, tensorflow_datasets needs to be installed.\")\n\n if args.version_2_with_negative:\n logger.warn(\"tensorflow_datasets does not handle version 2 of SQuAD.\")\n\n tfds_examples = tfds.load(\"squad\")\n examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)\n else:\n processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()\n examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)\n\n features, dataset = squad_convert_examples_to_features( \n examples=examples,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride,\n max_query_length=args.max_query_length,\n is_training=not evaluate,\n return_dataset='pt'\n )\n\n if args.local_rank in [-1, 0]:\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n torch.save({\"features\": features, \"dataset\": dataset}, cached_features_file)\n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()))\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS))\n parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n help=\"The output directory where the model checkpoints and predictions will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--data_dir\", default=None, type=str,\n help=\"The input data dir. Should contain the .json files for the task. If not specified, will run with tensorflow_datasets.\")\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\")\n\n parser.add_argument('--version_2_with_negative', action='store_true',\n help='If true, the SQuAD examples contain some that do not have an answer.')\n parser.add_argument('--null_score_diff_threshold', type=float, default=0.0,\n help=\"If null_score - best_non_null is greater than the threshold predict null.\")\n\n parser.add_argument(\"--max_seq_length\", default=384, type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. Sequences \"\n \"longer than this will be truncated, and sequences shorter than this will be padded.\")\n parser.add_argument(\"--doc_stride\", default=128, type=int,\n help=\"When splitting up a long document into chunks, how much stride to take between chunks.\")\n parser.add_argument(\"--max_query_length\", default=64, type=int,\n help=\"The maximum number of tokens for the question. Questions longer than this will \"\n \"be truncated to this length.\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--evaluate_during_training\", action='store_true',\n help=\"Rul evaluation during training at each logging step.\")\n parser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int,\n help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--n_best_size\", default=20, type=int,\n help=\"The total number of n-best predictions to generate in the nbest_predictions.json output file.\")\n parser.add_argument(\"--max_answer_length\", default=30, type=int,\n help=\"The maximum length of an answer that can be generated. This is needed because the start \"\n \"and end predictions are not conditioned on one another.\")\n parser.add_argument(\"--verbose_logging\", action='store_true',\n help=\"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\n parser.add_argument('--logging_steps', type=int, default=50,\n help=\"Log every X updates steps.\")\n parser.add_argument('--save_steps', type=int, default=50,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--overwrite_output_dir', action='store_true',\n help=\"Overwrite the content of the output directory\")\n parser.add_argument('--overwrite_cache', action='store_true',\n help=\"Overwrite the cached training and evaluation sets\")\n parser.add_argument('--seed', type=int, default=42,\n help=\"random seed for initialization\")\n\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--fp16', action='store_true',\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n parser.add_argument('--fp16_opt_level', type=str, default='O1',\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument('--server_ip', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument('--server_port', type=str, default='', help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--do_calibration\", action='store_true',\n help=\"Whether to do calibration.\")\n parser.add_argument(\"--do_int8_inference\", action='store_true',\n help=\"Whether to run int8 inference.\")\n parser.add_argument(\"--do_fp32_inference\", action='store_true',\n help=\"Whether to run fp32 inference.\")\n parser.add_argument(\"--mkldnn_eval\", action='store_true',\n help=\"evaluation with MKLDNN\")\n\n\n args = parser.parse_args()\n\n args.predict_file = os.path.join(args.output_dir, 'predictions_{}_{}.txt'.format(\n list(filter(None, args.model_name_or_path.split('/'))).pop(),\n str(args.max_seq_length))\n )\n\n if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:\n raise ValueError(\"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(args.output_dir))\n\n mix_qkv = False\n if args.do_calibration or args.do_int8_inference:\n mix_qkv = True\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None)\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None)\n model = model_class.from_pretrained(args.model_name_or_path,\n from_tf=bool('.ckpt' in args.model_name_or_path),\n config=config,\n mix_qkv=mix_qkv, \n cache_dir=args.cache_dir if args.cache_dir else None)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.\n # Otherwise it'll default to \"promote\" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level=\"O2\"` will\n # remove the need for this code, but it is still valid.\n if args.fp16:\n try:\n import apex\n apex.amp.register_half_function(torch, 'einsum')\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n # Training\n if args.do_train:\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n\n # Save the trained model and the tokenizer\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = model_class.from_pretrained(args.output_dir, force_download=True, mix_qkv=mix_qkv)\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n model.to(args.device)\n\n\n # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce model loading logs\n\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n\n for checkpoint in checkpoints:\n # Reload the model\n global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else \"\"\n if args.mkldnn_eval or args.do_fp32_inference:\n model = model_class.from_pretrained(checkpoint, force_download=True)\n model.to(args.device)\n\n # Evaluate\n result = evaluate(args, model, tokenizer, prefix=global_step)\n result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())\n results.update(result)\n\n if args.do_calibration:\n model = model_class.from_pretrained(checkpoint, force_download=True, mix_qkv=True)\n model.to(args.device)\n model.qconfig = default_per_channel_qconfig\n propagate_qconfig_(model)\n add_observer_(model)\n # Evaluate\n result = evaluate(args, model, tokenizer, prefix=global_step, calibration=True)\n convert(model, inplace = True)\n quantized_model_path = \"squad\" + str(global_step) + \"_quantized_model\" \n if not os.path.exists(quantized_model_path):\n os.makedirs(quantized_model_path)\n model.save_pretrained(quantized_model_path)\n result = evaluate(args, model, tokenizer, prefix=global_step) \n result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())\n results.update(result)\n if args.do_int8_inference:\n model = model_class.from_pretrained(checkpoint, force_download=True, mix_qkv=True)\n model.to(args.device)\n model.qconfig = default_per_channel_qconfig\n propagate_qconfig_(model)\n add_observer_(model)\n convert(model, inplace = True)\n quantized_model_path = \"squad\" + str(global_step) + \"_quantized_model\"\n if not os.path.exists(quantized_model_path):\n logger.info(\"Please run calibration first!\")\n return \n model_bin_file = os.path.join(quantized_model_path,\"pytorch_model.bin\" )\n state_dict = torch.load(model_bin_file)\n model.load_state_dict(state_dict)\n print(model)\n #result = evaluate(args, model, tokenizer, prefix=global_step)\n with torch.autograd.profiler.profile() as prof:\n result = evaluate(args, model, tokenizer, prefix=global_step)\n print(prof.key_averages().table(sort_by=\"cpu_time_total\"))\n result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())\n results.update(result)\n logger.info(\"Results: {}\".format(results))\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n",
"import argparse\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nuse_gpu = False\nif use_gpu:\n import torch.backends.cudnn as cudnn\n#import torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.multiprocessing as mp\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models.quantization as models\n\nimport subprocess\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N',\n help='mini-batch size (default: 256), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.1, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('-t', '--tune', dest='tune', action='store_true',\n help='tune best int8 model on calibration dataset')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=None, type=int,\n help='GPU id to use.')\nparser.add_argument('--ppn', default=1, type=int,\n help='number of processes on each node of distributed training')\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\n\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.ppn > 1 or args.multiprocessing_distributed\n\n if use_gpu:\n ngpus_per_node = torch.cuda.device_count()\n else:\n ngpus_per_node = args.ppn\n\n #ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n #args.gpu = gpu\n #affinity = subprocess.check_output(\"lscpu | grep 'NUMA node[0-9]' | awk '{ print $4 }' | awk -F',' '{ print $1 }'\", shell=True)\n #os.environ['OMP_NUM_THREADS'] = '28'\n #os.environ['KMP_AFFINITY'] = 'proclist=[{}],granularity=thread,explicit'.format(affinity.splitlines()[gpu].decode('utf-8'))\n #print (os.environ['KMP_AFFINITY'])\n\n #if args.gpu is not None:\n # print(\"Use GPU: {} for training\".format(args.gpu))\n print(\"Use CPU: {} for training\".format(gpu))\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n model = models.__dict__[args.arch](pretrained=True, quantize=False)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n model = models.__dict__[args.arch]()\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int(args.workers / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n else:\n #model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallelCPU(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model)\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss()\n #criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n #cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n validate(val_loader, model, criterion, args)\n\n if args.tune:\n model.eval()\n model.module.fuse_model()\n from lpot.experimental import Quantization, common\n quantizer = Quantization(\"./conf.yaml\")\n quantizer.model = common.Model(model)\n q_model = quantizer()\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,\n top5, prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n if args.gpu is not None:\n input = input.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(acc1[0], input.size(0))\n top5.update(acc5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n # TODO: this should also be done with the ProgressMeter\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, 'model_best.pth.tar')\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, name, fmt=':f'):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n def _get_batch_fmtstr(self, num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"torch.utils.data.DataLoader"
],
[
"torch.onnx.export",
"torch.ones",
"torch.from_numpy",
"torch.no_grad",
"torch.utils.data.dataloader.DataLoader"
],
[
"torch.optim.SGD"
],
[
"tensorflow.Graph",
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.import_graph_def",
"numpy.random.random",
"tensorflow.compat.v1.graph_util.convert_variables_to_constants",
"tensorflow.placeholder",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"tensorflow.Session",
"tensorflow.compat.v1.GraphDef",
"tensorflow.GraphDef",
"tensorflow.nn.conv2d"
],
[
"numpy.random.randint"
],
[
"torch.nn.functional.softmax",
"torch.max",
"torch.cat",
"torch.min",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"numpy.array"
],
[
"torch.save"
],
[
"tensorflow.nn.relu",
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.nn.conv2d"
],
[
"torch.LongTensor",
"torch.distributed.broadcast",
"torch.distributed.is_initialized",
"torch.distributed.init_process_group"
],
[
"torch.fx.wrap"
],
[
"tensorflow.math.add",
"tensorflow.compat.v1.random_normal_initializer",
"tensorflow.constant",
"tensorflow.nn.relu6",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.pad",
"tensorflow.nn.conv2d"
],
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.save",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.barrier",
"torch.quantization.add_observer_",
"torch.nn.DataParallel",
"torch.quantization.convert",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.quantization.propagate_qconfig_",
"torch.utils.mkldnn.to_mkldnn",
"torch.autograd.profiler.profile"
],
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.nn.parallel.DistributedDataParallelCPU",
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
josephdickson11/opencv
|
[
"20003c364dbefb37c1dae0125c37010f0f32fee0",
"20003c364dbefb37c1dae0125c37010f0f32fee0"
] |
[
"chapter3.py",
"chapter2.py"
] |
[
"import cv2\nimport numpy as np\n\nkernel = np.ones((5,5), np.uint8)\n\nimg = cv2.imread(\"resources/1591988497278.jpg\")\nprint(img.shape)\nedge_img = cv2.Canny(img, 30, 30)\nresize_img = cv2.resize(img,(300, 200))\ncropped_img = img[:200, :200]\n\ncv2.imshow(\"image box\", img)\ncv2.imshow(\"edge image\", edge_img)\ncv2.imshow(\"resized image\", resize_img)\ncv2.imshow(\"croppeed image\", cropped_img)\nprint(edge_img.shape)\n\ncv2.waitKey(0)",
"import cv2\nimport numpy as np\n\nkernel = np.ones((5,5), np.uint8)\n\n\n# basic functionalities\n# 1- imageto gray\n\nimg = cv2.imread(\"resources/1587631041441.jpg\")\nimg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n#add blur\nimg_blur = cv2.GaussianBlur(img, (11,11), 0)\n#add edge detection\nimg_canny = cv2.Canny(img_gray, 30, 30)\n#add image dialation\nimg_dialate = cv2.dilate(img_canny, kernel,iterations=1)\n#add image erosion\nimg_erode = cv2.erode(img_dialate, kernel, iterations=1)\n\ncv2.imshow(\"gray image\", img_gray)\ncv2.imshow(\"blur image\", img_blur)\ncv2.imshow(\"edge image\", img_canny)\ncv2.imshow(\"dialated image\", img_dialate)\ncv2.imshow(\"eroded Image\", img_erode)\n\n\ncv2.waitKey(0)\n"
] |
[
[
"numpy.ones"
],
[
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
worc3131/audio
|
[
"05bff83fdec3e8f70f80bf7a1b89951bf7050114"
] |
[
"torchaudio/functional/functional.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport math\nfrom typing import Optional, Tuple\nimport warnings\n\nimport torch\nfrom torch import Tensor\n\n__all__ = [\n \"spectrogram\",\n \"griffinlim\",\n \"amplitude_to_DB\",\n \"DB_to_amplitude\",\n \"compute_deltas\",\n \"compute_kaldi_pitch\",\n \"create_fb_matrix\",\n \"create_dct\",\n \"compute_deltas\",\n \"detect_pitch_frequency\",\n \"DB_to_amplitude\",\n \"mu_law_encoding\",\n \"mu_law_decoding\",\n \"complex_norm\",\n \"angle\",\n \"magphase\",\n \"phase_vocoder\",\n 'mask_along_axis',\n 'mask_along_axis_iid',\n 'sliding_window_cmn',\n \"spectral_centroid\",\n]\n\n\ndef spectrogram(\n waveform: Tensor,\n pad: int,\n window: Tensor,\n n_fft: int,\n hop_length: int,\n win_length: int,\n power: Optional[float],\n normalized: bool,\n center: bool = True,\n pad_mode: str = \"reflect\",\n onesided: bool = True\n) -> Tensor:\n r\"\"\"Create a spectrogram or a batch of spectrograms from a raw audio signal.\n The spectrogram can be either magnitude-only or complex.\n\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., time)\n pad (int): Two sided padding of signal\n window (Tensor): Window tensor that is applied/multiplied to each frame/window\n n_fft (int): Size of FFT\n hop_length (int): Length of hop between STFT windows\n win_length (int): Window size\n power (float or None): Exponent for the magnitude spectrogram,\n (must be > 0) e.g., 1 for energy, 2 for power, etc.\n If None, then the complex spectrum is returned instead.\n normalized (bool): Whether to normalize by magnitude after stft\n center (bool, optional): whether to pad :attr:`waveform` on both sides so\n that the :math:`t`-th frame is centered at time :math:`t \\times \\text{hop\\_length}`.\n Default: ``True``\n pad_mode (string, optional): controls the padding method used when\n :attr:`center` is ``True``. Default: ``\"reflect\"``\n onesided (bool, optional): controls whether to return half of results to\n avoid redundancy. Default: ``True``\n\n Returns:\n Tensor: Dimension (..., freq, time), freq is\n ``n_fft // 2 + 1`` and ``n_fft`` is the number of\n Fourier bins, and time is the number of window hops (n_frame).\n \"\"\"\n\n if pad > 0:\n # TODO add \"with torch.no_grad():\" back when JIT supports it\n waveform = torch.nn.functional.pad(waveform, (pad, pad), \"constant\")\n\n # pack batch\n shape = waveform.size()\n waveform = waveform.reshape(-1, shape[-1])\n\n # default values are consistent with librosa.core.spectrum._spectrogram\n spec_f = torch.stft(\n input=waveform,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n center=center,\n pad_mode=pad_mode,\n normalized=False,\n onesided=onesided,\n return_complex=True,\n )\n\n # unpack batch\n spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])\n\n if normalized:\n spec_f /= window.pow(2.).sum().sqrt()\n if power is not None:\n if power == 1.0:\n return spec_f.abs()\n return spec_f.abs().pow(power)\n return torch.view_as_real(spec_f)\n\n\ndef griffinlim(\n specgram: Tensor,\n window: Tensor,\n n_fft: int,\n hop_length: int,\n win_length: int,\n power: float,\n normalized: bool,\n n_iter: int,\n momentum: float,\n length: Optional[int],\n rand_init: bool\n) -> Tensor:\n r\"\"\"Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.\n Implementation ported from `librosa`.\n\n * [1] McFee, Brian, Colin Raffel, Dawen Liang, Daniel PW Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto.\n \"librosa: Audio and music signal analysis in python.\"\n In Proceedings of the 14th python in science conference, pp. 18-25. 2015.\n * [2] Perraudin, N., Balazs, P., & Søndergaard, P. L.\n \"A fast Griffin-Lim algorithm,\"\n IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4),\n Oct. 2013.\n * [3] D. W. Griffin and J. S. Lim,\n \"Signal estimation from modified short-time Fourier transform,\"\n IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.\n\n Args:\n specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)\n where freq is ``n_fft // 2 + 1``.\n window (Tensor): Window tensor that is applied/multiplied to each frame/window\n n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins\n hop_length (int): Length of hop between STFT windows. (\n Default: ``win_length // 2``)\n win_length (int): Window size. (Default: ``n_fft``)\n power (float): Exponent for the magnitude spectrogram,\n (must be > 0) e.g., 1 for energy, 2 for power, etc.\n normalized (bool): Whether to normalize by magnitude after stft.\n n_iter (int): Number of iteration for phase recovery process.\n momentum (float): The momentum parameter for fast Griffin-Lim.\n Setting this to 0 recovers the original Griffin-Lim method.\n Values near 1 can lead to faster convergence, but above 1 may not converge.\n length (int or None): Array length of the expected output.\n rand_init (bool): Initializes phase randomly if True, to zero otherwise.\n\n Returns:\n torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.\n \"\"\"\n assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)\n assert momentum >= 0, 'momentum={} < 0'.format(momentum)\n\n if normalized:\n warnings.warn(\n \"The argument normalized is not used in Griffin-Lim, \"\n \"and will be removed in v0.9.0 release. To suppress this warning, \"\n \"please use `normalized=False`.\")\n\n # pack batch\n shape = specgram.size()\n specgram = specgram.reshape([-1] + list(shape[-2:]))\n\n specgram = specgram.pow(1 / power)\n\n # randomly initialize the phase\n batch, freq, frames = specgram.size()\n if rand_init:\n angles = 2 * math.pi * torch.rand(batch, freq, frames)\n else:\n angles = torch.zeros(batch, freq, frames)\n angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \\\n .to(dtype=specgram.dtype, device=specgram.device)\n specgram = specgram.unsqueeze(-1).expand_as(angles)\n\n # And initialize the previous iterate to 0\n rebuilt = torch.tensor(0.)\n\n for _ in range(n_iter):\n # Store the previous iterate\n tprev = rebuilt\n\n # Invert with our current estimate of the phases\n inverse = torch.istft(specgram * angles,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n length=length).float()\n\n # Rebuild the spectrogram\n rebuilt = torch.view_as_real(\n torch.stft(\n input=inverse,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n center=True,\n pad_mode='reflect',\n normalized=False,\n onesided=True,\n return_complex=True,\n )\n )\n\n # Update our phase estimates\n angles = rebuilt\n if momentum:\n angles = angles - tprev.mul_(momentum / (1 + momentum))\n angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))\n\n # Return the final phase estimates\n waveform = torch.istft(specgram * angles,\n n_fft=n_fft,\n hop_length=hop_length,\n win_length=win_length,\n window=window,\n length=length)\n\n # unpack batch\n waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])\n\n return waveform\n\n\ndef amplitude_to_DB(\n x: Tensor,\n multiplier: float,\n amin: float,\n db_multiplier: float,\n top_db: Optional[float] = None\n) -> Tensor:\n r\"\"\"Turn a spectrogram from the power/amplitude scale to the decibel scale.\n\n The output of each tensor in a batch depends on the maximum value of that tensor,\n and so may return different values for an audio clip split into snippets vs. a full clip.\n\n Args:\n\n x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take\n the form `(..., freq, time)`. Batched inputs should include a channel dimension and\n have the form `(batch, channel, freq, time)`.\n multiplier (float): Use 10. for power and 20. for amplitude\n amin (float): Number to clamp ``x``\n db_multiplier (float): Log10(max(reference value and amin))\n top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number\n is 80. (Default: ``None``)\n\n Returns:\n Tensor: Output tensor in decibel scale\n \"\"\"\n x_db = multiplier * torch.log10(torch.clamp(x, min=amin))\n x_db -= multiplier * db_multiplier\n\n if top_db is not None:\n # Expand batch\n shape = x_db.size()\n packed_channels = shape[-3] if x_db.dim() > 2 else 1\n x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])\n\n x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))\n\n # Repack batch\n x_db = x_db.reshape(shape)\n\n return x_db\n\n\ndef DB_to_amplitude(\n x: Tensor,\n ref: float,\n power: float\n) -> Tensor:\n r\"\"\"Turn a tensor from the decibel scale to the power/amplitude scale.\n\n Args:\n x (Tensor): Input tensor before being converted to power/amplitude scale.\n ref (float): Reference which the output will be scaled by.\n power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.\n\n Returns:\n Tensor: Output tensor in power/amplitude scale.\n \"\"\"\n return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)\n\n\ndef create_fb_matrix(\n n_freqs: int,\n f_min: float,\n f_max: float,\n n_mels: int,\n sample_rate: int,\n norm: Optional[str] = None\n) -> Tensor:\n r\"\"\"Create a frequency bin conversion matrix.\n\n Args:\n n_freqs (int): Number of frequencies to highlight/apply\n f_min (float): Minimum frequency (Hz)\n f_max (float): Maximum frequency (Hz)\n n_mels (int): Number of mel filterbanks\n sample_rate (int): Sample rate of the audio waveform\n norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band\n (area normalization). (Default: ``None``)\n\n Returns:\n Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)\n meaning number of frequencies to highlight/apply to x the number of filterbanks.\n Each column is a filterbank so that assuming there is a matrix A of\n size (..., ``n_freqs``), the applied result would be\n ``A * create_fb_matrix(A.size(-1), ...)``.\n \"\"\"\n\n if norm is not None and norm != \"slaney\":\n raise ValueError(\"norm must be one of None or 'slaney'\")\n\n # freq bins\n # Equivalent filterbank construction by Librosa\n all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)\n\n # calculate mel freq bins\n # hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))\n m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))\n m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))\n m_pts = torch.linspace(m_min, m_max, n_mels + 2)\n # mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)\n f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)\n # calculate the difference between each mel point and each stft freq point in hertz\n f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)\n slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)\n # create overlapping triangles\n zero = torch.zeros(1)\n down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)\n up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)\n fb = torch.max(zero, torch.min(down_slopes, up_slopes))\n\n if norm is not None and norm == \"slaney\":\n # Slaney-style mel is scaled to be approx constant energy per channel\n enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])\n fb *= enorm.unsqueeze(0)\n\n if (fb.max(dim=0).values == 0.).any():\n warnings.warn(\n \"At least one mel filterbank has all zero values. \"\n f\"The value for `n_mels` ({n_mels}) may be set too high. \"\n f\"Or, the value for `n_freqs` ({n_freqs}) may be set too low.\"\n )\n\n return fb\n\n\ndef create_dct(\n n_mfcc: int,\n n_mels: int,\n norm: Optional[str]\n) -> Tensor:\n r\"\"\"Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),\n normalized depending on norm.\n\n Args:\n n_mfcc (int): Number of mfc coefficients to retain\n n_mels (int): Number of mel filterbanks\n norm (str or None): Norm to use (either 'ortho' or None)\n\n Returns:\n Tensor: The transformation matrix, to be right-multiplied to\n row-wise data of size (``n_mels``, ``n_mfcc``).\n \"\"\"\n # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II\n n = torch.arange(float(n_mels))\n k = torch.arange(float(n_mfcc)).unsqueeze(1)\n dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)\n if norm is None:\n dct *= 2.0\n else:\n assert norm == \"ortho\"\n dct[0] *= 1.0 / math.sqrt(2.0)\n dct *= math.sqrt(2.0 / float(n_mels))\n return dct.t()\n\n\ndef mu_law_encoding(\n x: Tensor,\n quantization_channels: int\n) -> Tensor:\n r\"\"\"Encode signal based on mu-law companding. For more info see the\n `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_\n\n This algorithm assumes the signal has been scaled to between -1 and 1 and\n returns a signal encoded with values from 0 to quantization_channels - 1.\n\n Args:\n x (Tensor): Input tensor\n quantization_channels (int): Number of channels\n\n Returns:\n Tensor: Input after mu-law encoding\n \"\"\"\n mu = quantization_channels - 1.0\n if not x.is_floating_point():\n x = x.to(torch.float)\n mu = torch.tensor(mu, dtype=x.dtype)\n x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)\n x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)\n return x_mu\n\n\ndef mu_law_decoding(\n x_mu: Tensor,\n quantization_channels: int\n) -> Tensor:\n r\"\"\"Decode mu-law encoded signal. For more info see the\n `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_\n\n This expects an input with values between 0 and quantization_channels - 1\n and returns a signal scaled between -1 and 1.\n\n Args:\n x_mu (Tensor): Input tensor\n quantization_channels (int): Number of channels\n\n Returns:\n Tensor: Input after mu-law decoding\n \"\"\"\n mu = quantization_channels - 1.0\n if not x_mu.is_floating_point():\n x_mu = x_mu.to(torch.float)\n mu = torch.tensor(mu, dtype=x_mu.dtype)\n x = ((x_mu) / mu) * 2 - 1.0\n x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu\n return x\n\n\ndef complex_norm(\n complex_tensor: Tensor,\n power: float = 1.0\n) -> Tensor:\n r\"\"\"Compute the norm of complex tensor input.\n\n Args:\n complex_tensor (Tensor): Tensor shape of `(..., complex=2)`\n power (float): Power of the norm. (Default: `1.0`).\n\n Returns:\n Tensor: Power of the normed input tensor. Shape of `(..., )`\n \"\"\"\n\n # Replace by torch.norm once issue is fixed\n # https://github.com/pytorch/pytorch/issues/34279\n return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)\n\n\ndef angle(\n complex_tensor: Tensor\n) -> Tensor:\n r\"\"\"Compute the angle of complex tensor input.\n\n Args:\n complex_tensor (Tensor): Tensor shape of `(..., complex=2)`\n\n Return:\n Tensor: Angle of a complex tensor. Shape of `(..., )`\n \"\"\"\n return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])\n\n\ndef magphase(\n complex_tensor: Tensor,\n power: float = 1.0\n) -> Tuple[Tensor, Tensor]:\n r\"\"\"Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.\n\n Args:\n complex_tensor (Tensor): Tensor shape of `(..., complex=2)`\n power (float): Power of the norm. (Default: `1.0`)\n\n Returns:\n (Tensor, Tensor): The magnitude and phase of the complex tensor\n \"\"\"\n mag = complex_norm(complex_tensor, power)\n phase = angle(complex_tensor)\n return mag, phase\n\n\ndef phase_vocoder(\n complex_specgrams: Tensor,\n rate: float,\n phase_advance: Tensor\n) -> Tensor:\n r\"\"\"Given a STFT tensor, speed up in time without modifying pitch by a\n factor of ``rate``.\n\n Args:\n complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`\n rate (float): Speed-up factor\n phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)\n\n Returns:\n Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`\n\n Example\n >>> freq, hop_length = 1025, 512\n >>> # (channel, freq, time, complex=2)\n >>> complex_specgrams = torch.randn(2, freq, 300, 2)\n >>> rate = 1.3 # Speed up by 30%\n >>> phase_advance = torch.linspace(\n >>> 0, math.pi * hop_length, freq)[..., None]\n >>> x = phase_vocoder(complex_specgrams, rate, phase_advance)\n >>> x.shape # with 231 == ceil(300 / 1.3)\n torch.Size([2, 1025, 231, 2])\n \"\"\"\n\n # pack batch\n shape = complex_specgrams.size()\n complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))\n\n time_steps = torch.arange(0,\n complex_specgrams.size(-2),\n rate,\n device=complex_specgrams.device,\n dtype=complex_specgrams.dtype)\n\n alphas = time_steps % 1.0\n phase_0 = angle(complex_specgrams[..., :1, :])\n\n # Time Padding\n complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])\n\n # (new_bins, freq, 2)\n complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())\n complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())\n\n angle_0 = angle(complex_specgrams_0)\n angle_1 = angle(complex_specgrams_1)\n\n norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)\n norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)\n\n phase = angle_1 - angle_0 - phase_advance\n phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))\n\n # Compute Phase Accum\n phase = phase + phase_advance\n phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)\n phase_acc = torch.cumsum(phase, -1)\n\n mag = alphas * norm_1 + (1 - alphas) * norm_0\n\n real_stretch = mag * torch.cos(phase_acc)\n imag_stretch = mag * torch.sin(phase_acc)\n\n complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)\n\n # unpack batch\n complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])\n\n return complex_specgrams_stretch\n\n\ndef mask_along_axis_iid(\n specgrams: Tensor,\n mask_param: int,\n mask_value: float,\n axis: int\n) -> Tensor:\n r\"\"\"\n Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where\n ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.\n\n Args:\n specgrams (Tensor): Real spectrograms (batch, channel, freq, time)\n mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]\n mask_value (float): Value to assign to the masked columns\n axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)\n\n Returns:\n Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)\n \"\"\"\n\n if axis != 2 and axis != 3:\n raise ValueError('Only Frequency and Time masking are supported')\n\n device = specgrams.device\n dtype = specgrams.dtype\n\n value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param\n min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)\n\n # Create broadcastable mask\n mask_start = min_value[..., None, None]\n mask_end = (min_value + value)[..., None, None]\n mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)\n\n # Per batch example masking\n specgrams = specgrams.transpose(axis, -1)\n specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)\n specgrams = specgrams.transpose(axis, -1)\n\n return specgrams\n\n\ndef mask_along_axis(\n specgram: Tensor,\n mask_param: int,\n mask_value: float,\n axis: int\n) -> Tensor:\n r\"\"\"\n Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where\n ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.\n All examples will have the same mask interval.\n\n Args:\n specgram (Tensor): Real spectrogram (channel, freq, time)\n mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]\n mask_value (float): Value to assign to the masked columns\n axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)\n\n Returns:\n Tensor: Masked spectrogram of dimensions (channel, freq, time)\n \"\"\"\n\n # pack batch\n shape = specgram.size()\n specgram = specgram.reshape([-1] + list(shape[-2:]))\n\n value = torch.rand(1) * mask_param\n min_value = torch.rand(1) * (specgram.size(axis) - value)\n\n mask_start = (min_value.long()).squeeze()\n mask_end = (min_value.long() + value.long()).squeeze()\n\n assert mask_end - mask_start < mask_param\n if axis == 1:\n specgram[:, mask_start:mask_end] = mask_value\n elif axis == 2:\n specgram[:, :, mask_start:mask_end] = mask_value\n else:\n raise ValueError('Only Frequency and Time masking are supported')\n\n # unpack batch\n specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])\n\n return specgram\n\n\ndef compute_deltas(\n specgram: Tensor,\n win_length: int = 5,\n mode: str = \"replicate\"\n) -> Tensor:\n r\"\"\"Compute delta coefficients of a tensor, usually a spectrogram:\n\n .. math::\n d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2}\n\n where :math:`d_t` is the deltas at time :math:`t`,\n :math:`c_t` is the spectrogram coeffcients at time :math:`t`,\n :math:`N` is ``(win_length-1)//2``.\n\n Args:\n specgram (Tensor): Tensor of audio of dimension (..., freq, time)\n win_length (int, optional): The window length used for computing delta (Default: ``5``)\n mode (str, optional): Mode parameter passed to padding (Default: ``\"replicate\"``)\n\n Returns:\n Tensor: Tensor of deltas of dimension (..., freq, time)\n\n Example\n >>> specgram = torch.randn(1, 40, 1000)\n >>> delta = compute_deltas(specgram)\n >>> delta2 = compute_deltas(delta)\n \"\"\"\n device = specgram.device\n dtype = specgram.dtype\n\n # pack batch\n shape = specgram.size()\n specgram = specgram.reshape(1, -1, shape[-1])\n\n assert win_length >= 3\n\n n = (win_length - 1) // 2\n\n # twice sum of integer squared\n denom = n * (n + 1) * (2 * n + 1) / 3\n\n specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)\n\n kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)\n\n output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom\n\n # unpack batch\n output = output.reshape(shape)\n\n return output\n\n\ndef _compute_nccf(\n waveform: Tensor,\n sample_rate: int,\n frame_time: float,\n freq_low: int\n) -> Tensor:\n r\"\"\"\n Compute Normalized Cross-Correlation Function (NCCF).\n\n .. math::\n \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}},\n\n where\n :math:`\\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,\n :math:`w` is the waveform,\n :math:`N` is the length of a frame,\n :math:`b_i` is the beginning of frame :math:`i`,\n :math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`.\n \"\"\"\n\n EPSILON = 10 ** (-9)\n\n # Number of lags to check\n lags = int(math.ceil(sample_rate / freq_low))\n\n frame_size = int(math.ceil(sample_rate * frame_time))\n\n waveform_length = waveform.size()[-1]\n num_of_frames = int(math.ceil(waveform_length / frame_size))\n\n p = lags + num_of_frames * frame_size - waveform_length\n waveform = torch.nn.functional.pad(waveform, (0, p))\n\n # Compute lags\n output_lag = []\n for lag in range(1, lags + 1):\n s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]\n s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]\n\n output_frames = (\n (s1 * s2).sum(-1)\n / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)\n / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)\n )\n\n output_lag.append(output_frames.unsqueeze(-1))\n\n nccf = torch.cat(output_lag, -1)\n\n return nccf\n\n\ndef _combine_max(\n a: Tuple[Tensor, Tensor],\n b: Tuple[Tensor, Tensor],\n thresh: float = 0.99\n) -> Tuple[Tensor, Tensor]:\n \"\"\"\n Take value from first if bigger than a multiplicative factor of the second, elementwise.\n \"\"\"\n mask = (a[0] > thresh * b[0])\n values = mask * a[0] + ~mask * b[0]\n indices = mask * a[1] + ~mask * b[1]\n return values, indices\n\n\ndef _find_max_per_frame(\n nccf: Tensor,\n sample_rate: int,\n freq_high: int\n) -> Tensor:\n r\"\"\"\n For each frame, take the highest value of NCCF,\n apply centered median smoothing, and convert to frequency.\n\n Note: If the max among all the lags is very close\n to the first half of lags, then the latter is taken.\n \"\"\"\n\n lag_min = int(math.ceil(sample_rate / freq_high))\n\n # Find near enough max that is smallest\n\n best = torch.max(nccf[..., lag_min:], -1)\n\n half_size = nccf.shape[-1] // 2\n half = torch.max(nccf[..., lag_min:half_size], -1)\n\n best = _combine_max(half, best)\n indices = best[1]\n\n # Add back minimal lag\n indices += lag_min\n # Add 1 empirical calibration offset\n indices += 1\n\n return indices\n\n\ndef _median_smoothing(\n indices: Tensor,\n win_length: int\n) -> Tensor:\n r\"\"\"\n Apply median smoothing to the 1D tensor over the given window.\n \"\"\"\n\n # Centered windowed\n pad_length = (win_length - 1) // 2\n\n # \"replicate\" padding in any dimension\n indices = torch.nn.functional.pad(\n indices, (pad_length, 0), mode=\"constant\", value=0.\n )\n\n indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)\n roll = indices.unfold(-1, win_length, 1)\n\n values, _ = torch.median(roll, -1)\n return values\n\n\ndef detect_pitch_frequency(\n waveform: Tensor,\n sample_rate: int,\n frame_time: float = 10 ** (-2),\n win_length: int = 30,\n freq_low: int = 85,\n freq_high: int = 3400,\n) -> Tensor:\n r\"\"\"Detect pitch frequency.\n\n It is implemented using normalized cross-correlation function and median smoothing.\n\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., freq, time)\n sample_rate (int): The sample rate of the waveform (Hz)\n frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).\n win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).\n freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).\n freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).\n\n Returns:\n Tensor: Tensor of freq of dimension (..., frame)\n \"\"\"\n # pack batch\n shape = list(waveform.size())\n waveform = waveform.reshape([-1] + shape[-1:])\n\n nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)\n indices = _find_max_per_frame(nccf, sample_rate, freq_high)\n indices = _median_smoothing(indices, win_length)\n\n # Convert indices to frequency\n EPSILON = 10 ** (-9)\n freq = sample_rate / (EPSILON + indices.to(torch.float))\n\n # unpack batch\n freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))\n\n return freq\n\n\ndef sliding_window_cmn(\n waveform: Tensor,\n cmn_window: int = 600,\n min_cmn_window: int = 100,\n center: bool = False,\n norm_vars: bool = False,\n) -> Tensor:\n r\"\"\"\n Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.\n\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., freq, time)\n cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)\n min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).\n Only applicable if center == false, ignored if center==true (int, default = 100)\n center (bool, optional): If true, use a window centered on the current frame\n (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)\n norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)\n\n Returns:\n Tensor: Tensor of freq of dimension (..., frame)\n \"\"\"\n input_shape = waveform.shape\n num_frames, num_feats = input_shape[-2:]\n waveform = waveform.view(-1, num_frames, num_feats)\n num_channels = waveform.shape[0]\n\n dtype = waveform.dtype\n device = waveform.device\n last_window_start = last_window_end = -1\n cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)\n cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)\n cmn_waveform = torch.zeros(\n num_channels, num_frames, num_feats, dtype=dtype, device=device)\n for t in range(num_frames):\n window_start = 0\n window_end = 0\n if center:\n window_start = t - cmn_window // 2\n window_end = window_start + cmn_window\n else:\n window_start = t - cmn_window\n window_end = t + 1\n if window_start < 0:\n window_end -= window_start\n window_start = 0\n if not center:\n if window_end > t:\n window_end = max(t + 1, min_cmn_window)\n if window_end > num_frames:\n window_start -= (window_end - num_frames)\n window_end = num_frames\n if window_start < 0:\n window_start = 0\n if last_window_start == -1:\n input_part = waveform[:, window_start: window_end - window_start, :]\n cur_sum += torch.sum(input_part, 1)\n if norm_vars:\n cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]\n else:\n if window_start > last_window_start:\n frame_to_remove = waveform[:, last_window_start, :]\n cur_sum -= frame_to_remove\n if norm_vars:\n cur_sumsq -= (frame_to_remove ** 2)\n if window_end > last_window_end:\n frame_to_add = waveform[:, last_window_end, :]\n cur_sum += frame_to_add\n if norm_vars:\n cur_sumsq += (frame_to_add ** 2)\n window_frames = window_end - window_start\n last_window_start = window_start\n last_window_end = window_end\n cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames\n if norm_vars:\n if window_frames == 1:\n cmn_waveform[:, t, :] = torch.zeros(\n num_channels, num_feats, dtype=dtype, device=device)\n else:\n variance = cur_sumsq\n variance = variance / window_frames\n variance -= ((cur_sum ** 2) / (window_frames ** 2))\n variance = torch.pow(variance, -0.5)\n cmn_waveform[:, t, :] *= variance\n\n cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))\n if len(input_shape) == 2:\n cmn_waveform = cmn_waveform.squeeze(0)\n return cmn_waveform\n\n\ndef spectral_centroid(\n waveform: Tensor,\n sample_rate: int,\n pad: int,\n window: Tensor,\n n_fft: int,\n hop_length: int,\n win_length: int,\n) -> Tensor:\n r\"\"\"\n Compute the spectral centroid for each channel along the time axis.\n\n The spectral centroid is defined as the weighted average of the\n frequency values, weighted by their magnitude.\n\n Args:\n waveform (Tensor): Tensor of audio of dimension (..., time)\n sample_rate (int): Sample rate of the audio waveform\n pad (int): Two sided padding of signal\n window (Tensor): Window tensor that is applied/multiplied to each frame/window\n n_fft (int): Size of FFT\n hop_length (int): Length of hop between STFT windows\n win_length (int): Window size\n\n Returns:\n Tensor: Dimension (..., time)\n \"\"\"\n specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, power=1., normalized=False)\n freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,\n device=specgram.device).reshape((-1, 1))\n freq_dim = -2\n return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)\n\n\ndef compute_kaldi_pitch(\n waveform: torch.Tensor,\n sample_rate: float,\n frame_length: float = 25.0,\n frame_shift: float = 10.0,\n min_f0: float = 50,\n max_f0: float = 400,\n soft_min_f0: float = 10.0,\n penalty_factor: float = 0.1,\n lowpass_cutoff: float = 1000,\n resample_frequency: float = 4000,\n delta_pitch: float = 0.005,\n nccf_ballast: float = 7000,\n lowpass_filter_width: int = 1,\n upsample_filter_width: int = 5,\n max_frames_latency: int = 0,\n frames_per_chunk: int = 0,\n simulate_first_pass_online: bool = False,\n recompute_frame: int = 500,\n snip_edges: bool = True,\n) -> torch.Tensor:\n \"\"\"Extract pitch based on method described in [1].\n\n This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.\n\n Args:\n waveform (Tensor):\n The input waveform of shape `(..., time)`.\n sample_rate (float):\n Sample rate of `waveform`.\n frame_length (float, optional):\n Frame length in milliseconds. (default: 25.0)\n frame_shift (float, optional):\n Frame shift in milliseconds. (default: 10.0)\n min_f0 (float, optional):\n Minimum F0 to search for (Hz) (default: 50.0)\n max_f0 (float, optional):\n Maximum F0 to search for (Hz) (default: 400.0)\n soft_min_f0 (float, optional):\n Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)\n penalty_factor (float, optional):\n Cost factor for FO change. (default: 0.1)\n lowpass_cutoff (float, optional):\n Cutoff frequency for LowPass filter (Hz) (default: 1000)\n resample_frequency (float, optional):\n Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.\n (default: 4000)\n delta_pitch( float, optional):\n Smallest relative change in pitch that our algorithm measures. (default: 0.005)\n nccf_ballast (float, optional):\n Increasing this factor reduces NCCF for quiet frames (default: 7000)\n lowpass_filter_width (int, optional):\n Integer that determines filter width of lowpass filter, more gives sharper filter.\n (default: 1)\n upsample_filter_width (int, optional):\n Integer that determines filter width when upsampling NCCF. (default: 5)\n max_frames_latency (int, optional):\n Maximum number of frames of latency that we allow pitch tracking to introduce into\n the feature processing (affects output only if ``frames_per_chunk > 0`` and\n ``simulate_first_pass_online=True``) (default: 0)\n frames_per_chunk (int, optional):\n The number of frames used for energy normalization. (default: 0)\n simulate_first_pass_online (bool, optional):\n If true, the function will output features that correspond to what an online decoder\n would see in the first pass of decoding -- not the final version of the features,\n which is the default. (default: False)\n Relevant if ``frames_per_chunk > 0``.\n recompute_frame (int, optional):\n Only relevant for compatibility with online pitch extraction.\n A non-critical parameter; the frame at which we recompute some of the forward pointers,\n after revising our estimate of the signal energy.\n Relevant if ``frames_per_chunk > 0``. (default: 500)\n snip_edges (bool, optional):\n If this is set to false, the incomplete frames near the ending edge won't be snipped,\n so that the number of frames is the file size divided by the frame-shift.\n This makes different types of features give the same number of frames. (default: True)\n\n Returns:\n Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension\n corresponds to pitch and NCCF.\n\n Reference:\n - A pitch extraction algorithm tuned for automatic speech recognition\n\n P. Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S. Khudanpur\n\n 2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),\n\n Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049.\n \"\"\"\n shape = waveform.shape\n waveform = waveform.reshape(-1, shape[-1])\n result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(\n waveform, sample_rate, frame_length, frame_shift,\n min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,\n resample_frequency, delta_pitch, nccf_ballast,\n lowpass_filter_width, upsample_filter_width, max_frames_latency,\n frames_per_chunk, simulate_first_pass_online, recompute_frame,\n snip_edges,\n )\n result = result.reshape(shape[:-1] + result.shape[-2:])\n return result\n"
] |
[
[
"torch.abs",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.sin",
"torch.sign",
"torch.sum",
"torch.pow",
"torch.norm",
"torch.median",
"torch.round",
"torch.tensor",
"torch.rand",
"torch.arange",
"torch.nn.functional.pad",
"torch.cos",
"torch.linspace",
"torch.min",
"torch.nn.functional.conv1d",
"torch.log1p",
"torch.stack",
"torch.ops.torchaudio.kaldi_ComputeKaldiPitch",
"torch.atan2",
"torch.view_as_real",
"torch.istft",
"torch.clamp",
"torch.cumsum",
"torch.stft"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abdullah-zaiter/Sign-Language-By-Glove
|
[
"48734b870cc797b817e2d8ac33d822d0c1cea000"
] |
[
"Bluetooth/ble_comm/Scripts/dataAnalysis.py"
] |
[
"\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef loadFromFile(nameFile):\n d = np.load('../data/'+nameFile+name+'array.npy')\n print('Reading from' + '../data/'+nameFile+name+'array.npy')\n return d\n\ndef diff(x, y, y2, name):\n newY = abs(y - y2)\n fig = plt.figure()\n ax = plt.subplot(111)\n\n ax.plot(x, newY, 'r.', label=name)\n plt.xlabel('Delay(us)')\n plt.ylabel('Diferença de Dados')\n plt.legend()\n\n plt.show()\n\n\nif __name__ == \"__main__\": \n name = 'Com_Sensores'\n delay = loadFromFile('delay')\n acerto = loadFromFile('acerto')\n tempDec = loadFromFile('TempoDec')\n\n name = 'Sem_Sensores'\n acertoSS = loadFromFile('acerto')\n tempDecSS = loadFromFile('TempoDec')\n\n diff(delay, acerto, acertoSS, 'Acerto')\n diff(delay, tempDec, tempDecSS, 'Tempo de Aquisição Python')"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
linklab/link_rl
|
[
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99",
"e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99"
] |
[
"codes/f_main/federation_main/federated_main/main_chief.py",
"codes/f_main/trade_main/upbit_trade_main.py",
"temp/cartpole_a2c.py",
"common/fast_rl/replay_buffer.py",
"codes/b_environments/or_gym/envs/classic_or/vmpacking.py",
"temp/ppo/minimal_ppo.py",
"codes/g_play/play_bfore_train.py",
"codes/c_models/continuous_action/continuous_sac_model.py",
"common/environments/gym/breakout.py",
"common/fast_rl/common/noise.py",
"z_externals/handson_second_edition/Chapter12/02_pong_a2c.py",
"codes/b_environments/quanser_rotary_inverted_pendulum/adjust_angle.py"
] |
[
"import sys, os\nfrom multiprocessing import Process\n\nimport torch\n\nfrom codes.a_config.parameters import PARAMETERS as params\nimport codes.f_main.federation_main.utils as utils\nfrom rl_main import rl_utils\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nif __name__ == \"__main__\":\n utils.make_output_folders()\n utils.ask_file_removal(device)\n\n env = rl_utils.get_environment(params=params)\n rl_model = rl_utils.get_rl_model(env, params=params)\n\n utils.print_configuration(env, rl_model, params)\n\n try:\n chief = Process(target=utils.run_chief, args=(params,))\n chief.start()\n chief.join()\n except KeyboardInterrupt as error:\n print(\"=== {0:>8} is aborted by keyboard interrupt\".format('Main-Chief'))\n",
"#!/usr/bin/env python3\n\nimport torch\nimport torch.optim as optim\nimport os, sys\nimport warnings\nimport numpy as np\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\nPROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, os.pardir, os.pardir))\nif PROJECT_HOME not in sys.path:\n sys.path.append(PROJECT_HOME)\n\nfrom common.fast_rl.common.utils import EarlyStopping\nfrom common.environments import get_data\nfrom codes.f_utils import common_utils\nfrom common.environments import TimeUnit, TradeEnvironmentType, Action\nfrom common.environments import UpbitEnvironment\nfrom common.environments import EpsilonGreedyTradeDQNActionSelector, \\\n ArgmaxTradeActionSelector, RandomTradeDQNActionSelector\n\nfrom common.fast_rl import rl_agent, value_based_model, actions, experience_single, replay_buffer\nfrom common.fast_rl.common import utils\nfrom common.fast_rl.common import statistics\nfrom rl_main.trade_main import visualizer\nfrom common.slack import PushSlack\n\npusher = PushSlack()\n\n##### NOTE #####\nfrom codes.a_config.parameters import PARAMETERS as params\n##### NOTE #####\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nMODEL_SAVE_DIR = os.path.join(PROJECT_HOME, \"out\", \"model_save_files\")\nif not os.path.exists(MODEL_SAVE_DIR):\n os.makedirs(MODEL_SAVE_DIR)\n\n\ndef evaluate(env, agent, verbose=True):\n experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(env, agent, gamma=params.GAMMA, n_step=params.N_STEP)\n\n done = False\n state = env.reset()\n agent_state = agent.initial_agent_state()\n\n episode_reward = 0.0\n num_buys = 0\n info = None\n step_idx = 0\n while not done:\n step_idx += 1\n states_input = []\n processed_state = experience_source.get_processed_state(state)\n states_input.append(processed_state)\n\n agent_states_input = []\n agent_states_input.append(agent_state)\n\n new_actions, new_agent_states = agent(states_input, agent_states_input)\n\n agent_state = new_agent_states[0]\n action = new_actions[0]\n\n if action == Action.MARKET_BUY.value:\n num_buys += 1\n if num_buys > 10:\n action_str = \"BUY({0})\".format(10)\n else:\n action_str = \"BUY({0})\".format(num_buys)\n else:\n action_str = env.get_action_meanings()[action]\n\n msg = \"[{0:2}|{1}] OHLCV: {2}, {3}, {4}, {5}, {6:<10.1f}, Action: {7:7} --> \".format(\n step_idx,\n env.data.iloc[env.transaction_state_idx]['datetime_krw'],\n env.data.iloc[env.transaction_state_idx]['open'],\n env.data.iloc[env.transaction_state_idx]['high'],\n env.data.iloc[env.transaction_state_idx]['low'],\n env.data.iloc[env.transaction_state_idx]['final'],\n env.data.iloc[env.transaction_state_idx]['volume'],\n action_str\n )\n\n next_state, reward, done, info = env.step(action)\n\n if action in [Action.HOLD.value]:\n msg += \"Reward: {0:.3f}, hold coin: {1:.1f}\".format(\n reward, info[\"hold_coin\"]\n )\n elif action == Action.MARKET_BUY.value:\n if num_buys <= 10:\n coin_krw_str = \"{0:.1f}\".format(info['coin_krw'])\n commission_fee_str = \"{0:.1f}\".format(info['commission_fee'])\n else:\n coin_krw_str = \"-\"\n commission_fee_str = \"-\"\n\n msg += \"Reward: {0:.3f}, slippage: {1:.1f}, coin_unit_price: {2:.1f}, \" \\\n \"coin_krw: {3}, commission: {4}, hold coin: {5:.1f}\".format(\n reward, info[\"slippage\"], info[\"coin_unit_price\"],\n coin_krw_str, commission_fee_str, info[\"hold_coin\"]\n )\n elif action == Action.MARKET_SELL.value:\n msg += \"Reward: {0:.3f}, slippage: {1:.1f}, coin_unit_price: {2:.1f}, \" \\\n \"coin_krw: {3:.1f}, commission: {4:.1f}, sold coin: {5:.1f}, profit: {6:.1f}\".format(\n reward, info[\"slippage\"], info[\"coin_unit_price\"],\n info['coin_krw'], info['commission_fee'], info[\"sold_coin\"], info[\"profit\"]\n )\n else:\n raise ValueError()\n if verbose:\n print(msg)\n\n episode_reward += reward\n state = next_state\n\n if verbose:\n print(\"SAMPLED TRANSACTION DONE! - START DATETIME: {0}, EPISODE REWARD: {1:>8.3f}, \"\n \"PROFIT: {2:>10.1f}, STEPS: {3}\".format(\n env.transaction_start_datetime, episode_reward, info[\"profit\"], step_idx\n ))\n\n return info[\"profit\"], step_idx\n\n\ndef train(coin_name, time_unit, train_env, evaluate_env):\n common_utils.print_fast_rl_params(params)\n\n params.BATCH_SIZE *= params.TRAIN_STEP_FREQ\n\n net = value_based_model.DuelingDQNSmallCNN(\n observation_shape=train_env.observation_space.shape,\n n_actions=train_env.action_space.n\n ).to(device)\n print(net)\n print(\"ACTION MEANING: {0}\".format(train_env.get_action_meanings()))\n\n tgt_net = value_based_model.DuelingDQNSmallCNN(\n observation_shape=train_env.observation_space.shape,\n n_actions=train_env.action_space.n\n ).to(device)\n\n action_selector = EpsilonGreedyTradeDQNActionSelector(epsilon=params.EPSILON_INIT, env=train_env)\n agent = rl_agent.DQNAgent(dqn_model=net, action_selector=action_selector, device=device)\n\n argmax_action_selector = ArgmaxTradeActionSelector(env=evaluate_env)\n evaluate_agent = rl_agent.DQNAgent(dqn_model=net, action_selector=argmax_action_selector, device=device)\n\n random_action_selector = RandomTradeDQNActionSelector(env=evaluate_env)\n random_agent = rl_agent.DQNAgent(dqn_model=None, action_selector=random_action_selector, device=device)\n\n experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(\n train_env, agent, gamma=params.GAMMA, n_step=params.N_STEP\n )\n buffer = replay_buffer.ExperienceReplayBuffer(experience_source, buffer_size=params.REPLAY_BUFFER_SIZE)\n optimizer = optim.Adam(net.parameters(), lr=params.LEARNING_RATE)\n\n step_idx = 0\n\n last_loss = 0.0\n\n evaluate_steps = []\n evaluate_dqn_total_profits = []\n evaluate_random_total_profits = []\n\n early_stopping = EarlyStopping(\n patience=params.STOP_PATIENCE_COUNT,\n evaluation_min_threshold=params.TRAIN_STOP_EPISODE_REWARD,\n verbose=True,\n delta=0.0,\n model_save_dir=MODEL_SAVE_DIR,\n model_save_file_prefix=params.ENVIRONMENT_ID.value + \"_\" + coin_name + \"_\" + time_unit.value,\n agent=agent\n )\n\n with utils.SpeedTracker(params=params, frame=False, early_stopping=None) as reward_tracker:\n while step_idx < params.MAX_GLOBAL_STEP:\n step_idx += params.TRAIN_STEP_FREQ\n last_entry = buffer.populate(params.TRAIN_STEP_FREQ)\n\n if epsilon_tracker:\n epsilon_tracker.udpate(step_idx)\n\n episode_rewards = experience_source.pop_episode_reward_lst()\n\n solved = False\n if episode_rewards:\n for episode_reward in episode_rewards:\n reward_tracker.set_episode_reward(\n episode_reward, step_idx, action_selector.epsilon, last_info=last_entry.info,\n last_loss=last_loss, model=net\n )\n\n if reward_tracker.done_episodes % params.TEST_PERIOD_EPISODE == 0:\n print(\"#\" * 200)\n print(\"[TEST START]\")\n evaluate(evaluate_env, evaluate_agent)\n\n evaluate_steps.append(step_idx)\n\n dqn_total_profit, _ = evaluate_random(\n \"DQN\", evaluate_env, evaluate_agent, num_episodes=100\n )\n evaluate_dqn_total_profits.append(dqn_total_profit)\n\n random_total_profit, _ = evaluate_random(\n \"RANDOM\", evaluate_env, random_agent, num_episodes=100\n )\n evaluate_random_total_profits.append(random_total_profit)\n\n solved = early_stopping(dqn_total_profit, step_idx=step_idx)\n\n visualizer.draw_performance(\n evaluate_steps,\n evaluate_dqn_total_profits,\n evaluate_random_total_profits\n )\n\n print(\"[TEST END]\")\n print(\"#\" * 200)\n\n if solved:\n break\n\n if solved:\n break\n\n optimizer.zero_grad()\n batch = buffer.sample(params.BATCH_SIZE)\n loss_v = value_based_model.calc_loss_double_dqn(batch, net, tgt_net, gamma=params.GAMMA, device=device)\n loss_v.backward()\n optimizer.step()\n\n draw_loss = min(1.0, loss_v.detach().item())\n last_loss = loss_v.detach().item()\n\n if step_idx % params.TARGET_NET_SYNC_STEP_PERIOD < params.TRAIN_STEP_FREQ:\n tgt_net.sync(net)\n\n return net\n\n\ndef evaluate_random(agent_type, env, agent, num_episodes, verbose=True):\n num_positive = 0\n num_negative = 0\n total_profit = 0.0\n total_steps = 0\n\n for _ in range(num_episodes):\n profit, step = evaluate(env, agent, verbose=False)\n if profit > 0:\n num_positive += 1\n else:\n num_negative += 1\n total_profit += profit\n total_steps += step\n\n avg_num_steps_per_episode = total_steps / num_episodes\n\n if verbose:\n print(\"###[{0:6}] POSITIVE: {1}/{3}, NEGATIVE: {2}/{3}, TOTAL PROFIT: {4:.1f}, AVG. STEP FOR EPISODE: {5:.1f}\".format(\n agent_type, num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode\n ))\n\n return total_profit, avg_num_steps_per_episode\n\n\ndef evaluate_sequential_all(agent_type, env, agent, data_size, verbose=True):\n num_positive = 0\n num_negative = 0\n total_profit = 0.0\n total_steps = 0\n\n num_episodes = 0\n env.transaction_state_idx = 0\n while True:\n num_episodes += 1\n profit, step = evaluate(env, agent, verbose=False)\n if profit > 0:\n num_positive += 1\n else:\n num_negative += 1\n total_profit += profit\n total_steps += step\n\n if env.transaction_state_idx >= data_size - 1:\n break\n\n avg_num_steps_per_episode = total_steps / num_episodes\n\n if verbose:\n print(\"###[{0:6}] POSITIVE: {1}/{3}, NEGATIVE: {2}/{3}, TOTAL PROFIT: {4:.1f}, AVG. STEP FOR EPISODE: {5:.1f}\".format(\n agent_type, num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode\n ))\n\n return num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode\n\n\ndef main():\n coin_name = \"OMG\"\n time_unit = TimeUnit.ONE_HOUR\n\n train_data_info, evaluate_data_info = get_data(coin_name=coin_name, time_unit=time_unit)\n\n print(train_data_info[\"first_datetime_krw\"], train_data_info[\"last_datetime_krw\"])\n print(evaluate_data_info[\"first_datetime_krw\"], evaluate_data_info[\"last_datetime_krw\"])\n\n train_env = UpbitEnvironment(\n coin_name=coin_name,\n time_unit=time_unit,\n data_info=train_data_info,\n environment_type=TradeEnvironmentType.TRAIN\n )\n\n evaluate_random_env = UpbitEnvironment(\n coin_name=coin_name,\n time_unit=time_unit,\n data_info=evaluate_data_info,\n environment_type=TradeEnvironmentType.TEST_RANDOM,\n )\n\n net = train(coin_name, time_unit, train_env, evaluate_random_env)\n\n print(\"#### TEST SEQUENTIALLY\")\n evaluate_sequential_env = UpbitEnvironment(\n coin_name=coin_name,\n time_unit=time_unit,\n data_info=evaluate_data_info,\n environment_type=TradeEnvironmentType.TEST_SEQUENTIAL,\n )\n\n argmax_action_selector = ArgmaxTradeActionSelector(env=evaluate_sequential_env)\n evaluate_agent = rl_agent.DQNAgent(dqn_model=net, action_selector=argmax_action_selector, device=device)\n sequential_dqn_num_positives = []\n sequential_dqn_num_negatives = []\n sequential_dqn_num_episodes = []\n sequential_dqn_num_steps_per_episode = []\n sequential_dqn_total_profits = []\n for _ in range(10):\n num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode = evaluate_sequential_all(\n \"DQN\", evaluate_sequential_env, evaluate_agent, data_size=len(evaluate_data_info[\"data\"]), verbose=False\n )\n sequential_dqn_num_positives.append(num_positive)\n sequential_dqn_num_negatives.append(num_negative)\n sequential_dqn_num_episodes.append(num_episodes)\n sequential_dqn_total_profits.append(total_profit)\n sequential_dqn_num_steps_per_episode.append(avg_num_steps_per_episode)\n\n dqn_msg = f\"SEQUENTIAL: DQN - {np.mean(sequential_dqn_num_episodes):.1f} EPISODES - \" \\\n f\"POSITIVE: {np.mean(sequential_dqn_num_positives):.1f}, \" \\\n f\"NEGATIVE: {np.mean(sequential_dqn_num_negatives):.1f}, \" \\\n f\"AVERAGE PROFIT {np.mean(sequential_dqn_total_profits):.1f}/STD {np.std(sequential_dqn_total_profits):.1f}, \" \\\n f\"AVERAGE STEP {np.mean(sequential_dqn_num_steps_per_episode):.1f}\"\n print(dqn_msg)\n\n random_action_selector = RandomTradeDQNActionSelector(env=evaluate_sequential_env)\n random_agent = rl_agent.DQNAgent(dqn_model=None, action_selector=random_action_selector, device=device)\n sequential_random_num_positives = []\n sequential_random_num_negatives = []\n sequential_random_num_episodes = []\n sequential_random_num_steps_per_episode = []\n sequential_random_total_profits = []\n for _ in range(10):\n num_positive, num_negative, num_episodes, total_profit, avg_num_steps_per_episode = evaluate_sequential_all(\n \"RANDOM\", evaluate_sequential_env, random_agent, data_size=len(evaluate_data_info[\"data\"]), verbose=False\n )\n sequential_random_num_positives.append(num_positive)\n sequential_random_num_negatives.append(num_negative)\n sequential_random_num_episodes.append(num_episodes)\n sequential_random_total_profits.append(total_profit)\n sequential_random_num_steps_per_episode.append(avg_num_steps_per_episode)\n\n random_msg = f\"SEQUENTIAL: RANDOM - {np.mean(sequential_random_num_episodes):.1f} EPISODES - \" \\\n f\"POSITIVE: {np.mean(sequential_random_num_positives):.1f}, \" \\\n f\"NEGATIVE: {np.mean(sequential_random_num_negatives):.1f}, \" \\\n f\"AVERAGE PROFIT {np.mean(sequential_random_total_profits):.1f}/STD {np.std(sequential_random_total_profits):.1f}, \" \\\n f\"AVERAGE STEP {np.mean(sequential_random_num_steps_per_episode):.1f}\"\n print(random_msg)\n\n pusher.send_message(\n \"me\", dqn_msg\n )\n\n pusher.send_message(\n \"me\", random_msg\n )\n\nif __name__ == \"__main__\":\n main()",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport random\nimport os\nimport gym\n\n# Hyper Parameters\nSTATE_DIM = 4\nACTION_DIM = 2\nSTEP = 2000\nSAMPLE_NUMS = 30\n\n\nclass ActorNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,action_size):\n super(ActorNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,action_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = F.log_softmax(self.fc3(out))\n return out\n\nclass ValueNetwork(nn.Module):\n\n def __init__(self,input_size,hidden_size,output_size):\n super(ValueNetwork, self).__init__()\n self.fc1 = nn.Linear(input_size,hidden_size)\n self.fc2 = nn.Linear(hidden_size,hidden_size)\n self.fc3 = nn.Linear(hidden_size,output_size)\n\n def forward(self,x):\n out = F.relu(self.fc1(x))\n out = F.relu(self.fc2(out))\n out = self.fc3(out)\n return out\n\ndef roll_out(actor_network,task,sample_nums,value_network,init_state):\n #task.reset()\n states = []\n actions = []\n rewards = []\n is_done = False\n final_r = 0\n state = init_state\n\n for j in range(sample_nums):\n states.append(state)\n log_softmax_action = actor_network(Variable(torch.Tensor([state])))\n softmax_action = torch.exp(log_softmax_action)\n action = np.random.choice(ACTION_DIM,p=softmax_action.cpu().data.numpy()[0])\n one_hot_action = [int(k == action) for k in range(ACTION_DIM)]\n next_state,reward,done,_ = task.step(action)\n #fix_reward = -10 if done else 1\n actions.append(one_hot_action)\n rewards.append(reward)\n final_state = next_state\n state = next_state\n if done:\n is_done = True\n state = task.reset()\n break\n if not is_done:\n final_r = value_network(Variable(torch.Tensor([final_state]))).cpu().data.numpy()\n\n return states,actions,rewards,final_r,state\n\ndef discount_reward(r, gamma,final_r):\n discounted_r = np.zeros_like(r)\n running_add = final_r\n for t in reversed(range(0, len(r))):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\ndef main():\n # init a task generator for data fetching\n task = gym.make(\"CartPole-v1\")\n init_state = task.reset()\n\n # init value network\n value_network = ValueNetwork(input_size = STATE_DIM,hidden_size = 40,output_size = 1)\n value_network_optim = torch.optim.Adam(value_network.parameters(),lr=0.01)\n\n # init actor network\n actor_network = ActorNetwork(STATE_DIM,40,ACTION_DIM)\n actor_network_optim = torch.optim.Adam(actor_network.parameters(),lr = 0.01)\n\n steps =[]\n task_episodes =[]\n test_results =[]\n\n for step in range(STEP):\n states,actions,rewards,final_r,current_state = roll_out(actor_network,task,SAMPLE_NUMS,value_network,init_state)\n init_state = current_state\n actions_var = Variable(torch.Tensor(actions).view(-1,ACTION_DIM))\n states_var = Variable(torch.Tensor(states).view(-1,STATE_DIM))\n\n # train actor network\n actor_network_optim.zero_grad()\n log_softmax_actions = actor_network(states_var)\n vs = value_network(states_var).detach()\n # calculate qs\n qs = Variable(torch.Tensor(discount_reward(rewards,0.99,final_r)))\n\n advantages = qs - vs\n actor_network_loss = - torch.mean(torch.sum(log_softmax_actions*actions_var,1)* advantages)\n actor_network_loss.backward()\n torch.nn.utils.clip_grad_norm(actor_network.parameters(),0.5)\n actor_network_optim.step()\n\n # train value network\n value_network_optim.zero_grad()\n target_values = qs\n values = value_network(states_var)\n criterion = nn.MSELoss()\n value_network_loss = criterion(values,target_values)\n value_network_loss.backward()\n torch.nn.utils.clip_grad_norm(value_network.parameters(),0.5)\n value_network_optim.step()\n\n # Testing\n if (step + 1) % 50== 0:\n result = 0\n test_task = gym.make(\"CartPole-v0\")\n for test_epi in range(10):\n state = test_task.reset()\n for test_step in range(200):\n softmax_action = torch.exp(actor_network(Variable(torch.Tensor([state]))))\n #print(softmax_action.data)\n action = np.argmax(softmax_action.data.numpy()[0])\n next_state,reward,done,_ = test_task.step(action)\n result += reward\n state = next_state\n if done:\n break\n print(\"step:\",step+1,\"test result:\",result/10.0)\n steps.append(step+1)\n test_results.append(result/10)\n\nif __name__ == '__main__':\n main()",
"import random\n\nimport numpy as np\n\nfrom common.fast_rl.experience import ExperienceSource, ExperienceSourceFirstLast\nfrom common.fast_rl.experience_single import ExperienceSourceSingleEnvFirstLast\nfrom .common import utils\n\n\nclass ExperienceSourceBuffer:\n \"\"\"\n The same as ExperienceSource, but takes episodes from the buffer\n \"\"\"\n\n def __init__(self, buffer, steps_count=1):\n \"\"\"\n Create buffered experience source\n :param buffer: list of episodes, each is a list of Experience object\n :param steps_count: count of steps in every entry\n \"\"\"\n self.update_buffer(buffer)\n self.steps_count = steps_count\n\n def update_buffer(self, buffer):\n self.buffer = buffer\n self.lens = list(map(len, buffer))\n\n def __iter__(self):\n \"\"\"\n Infinitely sample episode from the buffer and then sample item offset\n \"\"\"\n while True:\n episode = random.randrange(len(self.buffer))\n ofs = random.randrange(self.lens[episode] - self.steps_count - 1)\n yield self.buffer[episode][ofs:ofs + self.steps_count]\n\n\nclass ExperienceReplayBuffer:\n def __init__(self, experience_source, buffer_size):\n assert isinstance(\n experience_source,\n (ExperienceSource, ExperienceSourceFirstLast, ExperienceSourceSingleEnvFirstLast, type(None))\n )\n assert isinstance(buffer_size, int)\n self.experience_source_iter = None if experience_source is None else iter(experience_source)\n self.buffer = []\n self.capacity = buffer_size\n self.pos = 0\n\n def __len__(self):\n return len(self.buffer)\n\n def __iter__(self):\n return iter(self.buffer)\n\n def sample(self, batch_size):\n \"\"\"\n Get one random batch from experience replay\n TODO: implement sampling order policy\n :param batch_size:\n :return:\n \"\"\"\n if len(self.buffer) <= batch_size:\n return self.buffer\n # Warning: replace=False makes random.choice O(n)\n keys = np.random.choice(len(self.buffer), batch_size, replace=True)\n return [self.buffer[key] for key in keys]\n\n def _add(self, sample):\n if len(self.buffer) < self.capacity:\n self.buffer.append(sample)\n else:\n self.buffer[self.pos] = sample\n self.pos = (self.pos + 1) % self.capacity\n\n def populate(self, num_samples):\n \"\"\"\n Populates samples into the buffer\n :param samples: how many samples to populate\n \"\"\"\n entry = None\n for _ in range(num_samples):\n entry = next(self.experience_source_iter)\n self._add(entry)\n\n return entry\n\n def populate_with_action_count(self, num_samples, action_count):\n \"\"\"\n Populates samples into the buffer\n :param samples: how many samples to populate\n \"\"\"\n for _ in range(num_samples):\n entry = next(self.experience_source_iter)\n action_count[entry.action] += 1\n self._add(entry)\n\n def populate_stacked_experience(self, num_samples, action_count=None):\n for _ in range(num_samples):\n exp = next(self.experience_source_iter)\n if action_count:\n action_count[exp.action] += 1\n # assert np.array_equal(exp.state.__array__()[1, :, :], exp.last_state.__array__()[0, :, :])\n # assert np.array_equal(exp.state.__array__()[2, :, :], exp.last_state.__array__()[1, :, :])\n # assert np.array_equal(exp.state.__array__()[3, :, :], exp.last_state.__array__()[2, :, :])\n\n extended_frames = np.zeros([5, 84, 84], dtype=np.uint8)\n extended_frames[0, :, :] = exp.state.__array__()[0, :, :]\n for i in range(1, 4):\n extended_frames[i, :, :] = exp.state.__array__()[i, :, :]\n\n if exp.last_state is not None:\n extended_frames[4, :, :] = exp.last_state.__array__()[3, :, :]\n\n self._add((extended_frames, exp.action, exp.reward, exp.last_state is None))\n\n def update_priorities(self, batch_indices, batch_priorities):\n raise NotImplementedError()\n\n def update_beta(self, idx):\n raise NotImplementedError()\n\n\nclass PrioReplayBufferNaive:\n def __init__(self, experience_source, buffer_size, prob_alpha=0.6):\n self.exp_source_iter = iter(experience_source)\n self.prob_alpha = prob_alpha\n self.capacity = buffer_size\n self.pos = 0\n self.buffer = []\n self.priorities = np.zeros((buffer_size,), dtype=np.float32)\n\n def __len__(self):\n return len(self.buffer)\n\n def populate(self, count):\n max_prio = self.priorities.max() if self.buffer else 1.0\n for _ in range(count):\n sample = next(self.exp_source_iter)\n if len(self.buffer) < self.capacity:\n self.buffer.append(sample)\n else:\n self.buffer[self.pos] = sample\n self.priorities[self.pos] = max_prio\n self.pos = (self.pos + 1) % self.capacity\n\n def sample(self, batch_size, beta=0.4):\n if len(self.buffer) == self.capacity:\n prios = self.priorities\n else:\n prios = self.priorities[:self.pos]\n probs = np.array(prios, dtype=np.float32) ** self.prob_alpha\n\n probs /= probs.sum()\n indices = np.random.choice(len(self.buffer), batch_size, p=probs, replace=True)\n samples = [self.buffer[idx] for idx in indices]\n total = len(self.buffer)\n weights = (total * probs[indices]) ** (-beta)\n weights /= weights.max()\n return samples, indices, np.array(weights, dtype=np.float32)\n\n def update_priorities(self, batch_indices, batch_priorities):\n for idx, prio in zip(batch_indices, batch_priorities):\n self.priorities[idx] = prio\n\n\n# sumtree 사용 버전\nclass PrioritizedReplayBuffer(ExperienceReplayBuffer):\n def __init__(self, experience_source, buffer_size, alpha=0.6, n_step=1, beta_start=0.4, beta_frames=100000):\n super(PrioritizedReplayBuffer, self).__init__(experience_source, buffer_size)\n assert alpha > 0\n self.alpha = alpha\n self.beta = beta_start\n self.n_step = n_step\n self.beta_start = beta_start\n self.beta_frames = beta_frames\n\n it_capacity = 1\n while it_capacity < buffer_size:\n it_capacity *= 2\n\n self._it_sum = utils.SumSegmentTree(it_capacity)\n self._it_min = utils.MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def update_beta(self, idx):\n v = self.beta_start + idx * (1.0 - self.beta_start) / self.beta_frames\n self.beta = min(1.0, v)\n return self.beta\n\n def _add(self, *args, **kwargs):\n idx = self.pos\n super()._add(*args, **kwargs)\n self._it_sum[idx] = self._max_priority ** self.alpha\n self._it_min[idx] = self._max_priority ** self.alpha\n\n def _sample_proportional(self, batch_size):\n assert len(self) > self.n_step\n res = []\n for _ in range(batch_size):\n while True:\n mass = random.random() * self._it_sum.sum(0, len(self) - 1)\n idx = self._it_sum.find_prefixsum_idx(mass)\n\n upper = self.pos\n lower = (self.pos - self.n_step)\n if lower < 0:\n lower = self.capacity + lower\n if lower < upper:\n if not lower <= idx < upper:\n res.append(idx)\n break\n else:\n if upper <= idx < lower:\n res.append(idx)\n break\n return res\n\n def sample(self, batch_size):\n assert self.beta > 0\n\n idxes = self._sample_proportional(batch_size)\n # print(\"#################\")\n # print(idxes)\n # print(self.pos)\n # print(\"#################\")\n\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self)) ** (-self.beta)\n\n for idx in idxes:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self)) ** (-self.beta)\n weights.append(weight / max_weight)\n\n weights = np.array(weights, dtype=np.float32)\n samples = [self.buffer[idx] for idx in idxes]\n return samples, idxes, weights\n\n def update_priorities(self, idxes, priorities):\n # with torch.no_grad():\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0.0, priority\n assert 0 <= idx < len(self), idx\n self._it_sum[idx] = priority ** self.alpha\n self._it_min[idx] = priority ** self.alpha\n\n self._max_priority = max(self._max_priority, priority)\n\n\n# sumtree 사용 안하는 버전\nclass PrioReplayBuffer:\n def __init__(self, experience_source, buffer_size, prob_alpha=0.6, n_step=1, beta_start=0.4, beta_frames=100000):\n assert isinstance(\n experience_source,\n (ExperienceSource, ExperienceSourceFirstLast, ExperienceSourceSingleEnvFirstLast, type(None))\n )\n assert isinstance(buffer_size, int)\n self.exp_source_iter = None if experience_source is None else iter(experience_source)\n self.prob_alpha = prob_alpha\n self.capacity = buffer_size\n self.pos = 0\n self.buffer = []\n self.priorities = np.zeros((buffer_size,), dtype=np.float32)\n self.beta = beta_start\n self.n_step = n_step\n self.beta_start = beta_start\n self.beta_frames = beta_frames\n\n def update_beta(self, idx):\n v = self.beta_start + idx * (1.0 - self.beta_start) / self.beta_frames\n self.beta = min(1.0, v)\n return self.beta\n\n def __len__(self):\n return len(self.buffer)\n\n def _add(self, sample):\n max_prio = self.priorities.max() if self.buffer else 1.0\n if len(self.buffer) < self.capacity:\n self.buffer.append(sample)\n else:\n self.buffer[self.pos] = sample\n self.priorities[self.pos] = max_prio\n self.pos = (self.pos + 1) % self.capacity\n\n def populate(self, count):\n max_prio = self.priorities.max() if self.buffer else 1.0\n for _ in range(count):\n sample = next(self.exp_source_iter)\n if len(self.buffer) < self.capacity:\n self.buffer.append(sample)\n else:\n self.buffer[self.pos] = sample\n self.priorities[self.pos] = max_prio\n self.pos = (self.pos + 1) % self.capacity\n\n def sample(self, batch_size):\n if len(self.buffer) == self.capacity:\n prios = self.priorities\n else:\n prios = self.priorities[:self.pos]\n probs = prios ** self.prob_alpha\n probs = probs[:-self.n_step]\n probs /= probs.sum()\n indices = np.random.choice(len(self.buffer) - self.n_step, batch_size, p=probs)\n samples = [self.buffer[idx] for idx in indices]\n total = len(self.buffer)\n weights = (total * probs[indices]) ** (-self.beta)\n weights /= weights.max()\n return samples, indices, np.array(weights, dtype=np.float32)\n\n def update_priorities(self, batch_indices, batch_priorities):\n for idx, prio in zip(batch_indices, batch_priorities):\n self.priorities[idx] = prio",
"import numpy as np\nimport gym\nfrom gym import spaces\n\nfrom common.environments.or_gym.utils import assign_env_config\n\n\nclass VMPackingEnv(gym.Env):\n '''\n Online VM Packing Problem\n\n The VM Packing Problem (VMPP) is a combinatorial optimization problem which\n requires the user to select from a series of physical machines (PM's) to\n send a virtual machine process to. Each VM process is characterized by\n two values, the memory and compute of the process. These are normalized\n by the PM capacities to range between 0-1. \n\n Observation:\n Type: Tuple, Discrete\n [0][:, 0]: Binary indicator for open PM's\n [0][:, 1]: CPU load of PM's\n [0][:, 2]: Memory load of PM's\n [1][0]: Current CPU demand\n [1][1]: Current memory demand\n\n Actions:\n Type: Discrete\n Integer of PM number to send VM to that PM\n\n Reward:\n Negative of the waste, which is the difference between the current\n size and excess space on the PM.\n\n Starting State:\n No open PM's and random starting item\n \n Episode Termination:\n When invalid action is selected, attempt to overload VM, or step\n limit is reached.\n '''\n def __init__(self, *args, **kwargs):\n self.cpu_capacity = 1\n self.mem_capacity = 1\n self.t_interval = 20\n self.tol = 1e-5\n self.step_limit = int(60 * 24 / self.t_interval)\n self.n_pms = 50\n self.load_idx = np.array([1, 2])\n self.seed = 0\n self.mask = True\n assign_env_config(self, kwargs)\n self.action_space = spaces.Discrete(self.n_pms)\n\n if self.mask:\n self.observation_space = spaces.Dict({\n \"action_mask\": spaces.Box(0, 1, shape=(self.n_pms,)),\n \"avail_actions\": spaces.Box(0, 1, shape=(self.n_pms,)),\n \"state\": spaces.Box(0, 1, shape=(self.n_pms+1, 3))\n })\n else:\n self.observation_space = spaces.Box(0, 1, shape=(self.n_pms+1, 3))\n self.reset()\n \n def reset(self):\n self.demand = self.generate_demand()\n self.current_step = 0\n self.state = {\n \"action_mask\": np.ones(self.n_pms),\n \"avail_actions\": np.ones(self.n_pms),\n \"state\": np.vstack([\n np.zeros((self.n_pms, 3)),\n self.demand[self.current_step]])\n }\n self.assignment = {}\n return self.state\n \n def step(self, action):\n done = False\n pm_state = self.state[\"state\"][:-1]\n demand = self.state[\"state\"][-1, 1:]\n \n if action < 0 or action >= self.n_pms:\n raise ValueError(\"Invalid action: {}\".format(action))\n \n elif any(pm_state[action, 1:] + demand > 1 + self.tol):\n # Demand doesn't fit into PM\n reward = -1000\n done = True\n else:\n if pm_state[action, 0] == 0:\n # Open PM if closed\n pm_state[action, 0] = 1\n pm_state[action, self.load_idx] += demand\n reward = np.sum(pm_state[:, 0] * (pm_state[:,1:].sum(axis=1) - 2))\n self.assignment[self.current_step] = action\n \n self.current_step += 1\n if self.current_step >= self.step_limit:\n done = True\n self.update_state(pm_state)\n return self.state, reward, done, {}\n \n def update_state(self, pm_state):\n # Make action selection impossible if the PM would exceed capacity\n step = self.current_step if self.current_step < self.step_limit else self.step_limit-1\n data_center = np.vstack([pm_state, self.demand[step]])\n data_center = np.where(data_center>1,1,data_center) # Fix rounding errors\n self.state[\"state\"] = data_center\n self.state[\"action_mask\"] = np.ones(self.n_pms)\n self.state[\"avail_actions\"] = np.ones(self.n_pms)\n if self.mask:\n action_mask = (pm_state[:, 1:] + self.demand[step, 1:]) <= 1\n self.state[\"action_mask\"] = (action_mask.sum(axis=1)==2).astype(int)\n\n def sample_action(self):\n return self.action_space.sample()\n\n def generate_demand(self):\n n = self.step_limit\n # From Azure data\n mem_probs = np.array([0.12 , 0.165, 0.328, 0.287, 0.064, 0.036])\n mem_bins = np.array([0.02857143, 0.05714286, 0.11428571, 0.45714286, 0.91428571,\n 1.]) # Normalized bin sizes\n mu_cpu = 16.08\n sigma_cpu = 1.26\n cpu_demand = np.random.normal(loc=mu_cpu, scale=sigma_cpu, size=n)\n cpu_demand = np.where(cpu_demand<=0, mu_cpu, cpu_demand) # Ensure demand isn't negative\n mem_demand = np.random.choice(mem_bins, p=mem_probs, size=n)\n return np.vstack([np.arange(n)/n, cpu_demand/100, mem_demand]).T\n\nclass TempVMPackingEnv(VMPackingEnv):\n '''\n Online Temporary VM Packing Problem\n\n The VM Packing Problem (VMPP) is a combinatorial optimization problem which\n requires the user to select from a series of physical machines (PM's) to\n send a virtual machine process to. Each VM process is characterized by\n two values, the memory and compute of the process. These are normalized\n by the PM capacities to range between 0-1. \n\n Observation:\n Type: Tuple, Discrete\n [0][:, 0]: Binary indicator for open PM's\n [0][:, 1]: CPU load of PM's\n [0][:, 2]: Memory load of PM's\n [1][0]: Current CPU demand\n [1][1]: Current memory demand\n\n Actions:\n Type: Discrete\n Integer of PM number to send VM to that PM\n\n Reward:\n Negative of the waste, which is the difference between the current\n size and excess space on the PM.\n\n Starting State:\n No open PM's and random starting item\n \n Episode Termination:\n When invalid action is selected, attempt to overload VM, or step\n limit is reached.\n '''\n def __init__(self, *args, **kwargs):\n super().__init__() \n self.state = self.reset()\n\n def step(self, action):\n done = False\n pm_state = self.state[\"state\"][:-1]\n demand = self.state[\"state\"][-1, 1:]\n \n if action < 0 or action >= self.n_pms:\n raise ValueError(\"Invalid action: {}\".format(action))\n \n elif any(pm_state[action, 1:] + demand > 1 + self.tol):\n # Demand doesn't fit into PM\n reward = -1000\n done = True\n else:\n if pm_state[action, 0] == 0:\n # Open PM if closed\n pm_state[action, 0] = 1\n pm_state[action, self.load_idx] += demand\n reward = np.sum(pm_state[:, 0] * (pm_state[:,1:].sum(axis=1) - 2))\n self.assignment[self.current_step] = action\n\n # Remove processes\n if self.current_step in self.durations.values():\n for process in self.durations.keys():\n # Remove process from PM\n if self.durations[process] == self.current_step:\n pm = self.assignment[process] # Find PM where process was assigned\n pm_state[pm, self.load_idx] -= self.demand[process]\n # Shut down PM's if state is 0\n if pm_state[pm, self.load_idx].sum() == 0:\n pm_state[pm, 0] = 0\n \n self.current_step += 1\n if self.current_step >= self.step_limit:\n done = True\n self.update_state(pm_state)\n return self.state, reward, done, {}\n \n def update_state(self, pm_state):\n # Make action selection impossible if the PM would exceed capacity\n step = self.current_step if self.current_step < self.step_limit else self.step_limit-1\n data_center = np.vstack([pm_state, self.demand[step]])\n data_center = np.where(data_center>1,1,data_center) # Fix rounding errors\n self.state[\"state\"] = data_center\n self.state[\"action_mask\"] = np.ones(self.n_pms)\n self.state[\"avail_actions\"] = np.ones(self.n_pms)\n if self.mask:\n action_mask = (pm_state[:, 1:] + self.demand[step, 1:]) <= 1\n self.state[\"action_mask\"] = (action_mask.sum(axis=1)==2).astype(int)\n \n def reset(self):\n self.current_step = 0\n self.assignment = {}\n self.demand = self.generate_demand()\n self.durations = generate_durations(self.demand)\n self.state = (np.zeros((self.n_pms, 3)), self.demand[0])\n return self.state\n\ndef generate_durations(demand):\n # duration_params = np.array([ 6.53563303e-02, 5.16222242e+01, 4.05028032e+06, -4.04960880e+06])\n return {i: np.random.randint(low=i+1, high=len(demand)+1)\n for i, j in enumerate(demand)}\n\ndef gaussian_model(params, x):\n return params[2] * np.exp(-0.5*((x - params[0]) / params[1]) ** 2) + params[3]",
"import os\n\nimport gym\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.distributions import Normal\nimport numpy as np\n\n#Hyperparameters\nlearning_rate = 0.0003\ngamma = 0.9\nlmbda = 0.9\neps_clip = 0.2\nK_epoch = 10\nrollout_len = 3\nbuffer_size = 30\nminibatch_size = 32\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n\nclass PPO(nn.Module):\n def __init__(self):\n super(PPO, self).__init__()\n self.data = []\n\n self.fc1 = nn.Linear(3,128)\n self.fc_mu = nn.Linear(128,1)\n self.fc_std = nn.Linear(128,1)\n self.fc_v = nn.Linear(128,1)\n self.optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n self.optimization_step = 0\n\n def pi(self, x, softmax_dim = 0):\n x = F.relu(self.fc1(x))\n mu = 2.0*torch.tanh(self.fc_mu(x))\n std = F.softplus(self.fc_std(x))\n return mu, std\n\n def v(self, x):\n x = F.relu(self.fc1(x))\n v = self.fc_v(x)\n return v\n\n def put_data(self, transition):\n self.data.append(transition)\n\n def make_batch(self):\n s_batch, a_batch, r_batch, s_prime_batch, prob_a_batch, done_batch = [], [], [], [], [], []\n data = []\n\n for j in range(buffer_size):\n for i in range(minibatch_size):\n rollout = self.data.pop()\n s_lst, a_lst, r_lst, s_prime_lst, prob_a_lst, done_lst = [], [], [], [], [], []\n\n for transition in rollout:\n s, a, r, s_prime, prob_a, done = transition\n\n s_lst.append(s)\n a_lst.append([a])\n r_lst.append([r])\n s_prime_lst.append(s_prime)\n prob_a_lst.append([prob_a])\n done_mask = 0 if done else 1\n done_lst.append([done_mask])\n\n s_batch.append(s_lst)\n a_batch.append(a_lst)\n r_batch.append(r_lst)\n s_prime_batch.append(s_prime_lst)\n prob_a_batch.append(prob_a_lst)\n done_batch.append(done_lst)\n\n mini_batch = torch.tensor(s_batch, dtype=torch.float), torch.tensor(a_batch, dtype=torch.float), \\\n torch.tensor(r_batch, dtype=torch.float), torch.tensor(s_prime_batch, dtype=torch.float), \\\n torch.tensor(done_batch, dtype=torch.float), torch.tensor(prob_a_batch, dtype=torch.float)\n data.append(mini_batch)\n\n return data\n\n def calc_advantage(self, data):\n data_with_adv = []\n for mini_batch in data:\n s, a, r, s_prime, done_mask, old_log_prob = mini_batch\n with torch.no_grad():\n td_target = r + gamma * self.v(s_prime) * done_mask\n delta = td_target - self.v(s)\n delta = delta.numpy()\n\n advantage_lst = []\n advantage = 0.0\n for delta_t in delta[::-1]:\n advantage = gamma * lmbda * advantage + delta_t[0]\n advantage_lst.append([advantage])\n advantage_lst.reverse()\n advantage = torch.tensor(advantage_lst, dtype=torch.float)\n data_with_adv.append((s, a, r, s_prime, done_mask, old_log_prob, td_target, advantage))\n\n return data_with_adv\n\n\n def train_net(self):\n if len(self.data) == minibatch_size * buffer_size:\n data = self.make_batch()\n data = self.calc_advantage(data)\n\n for i in range(K_epoch):\n for mini_batch in data:\n s, a, r, s_prime, done_mask, old_log_prob, td_target, advantage = mini_batch\n\n mu, std = self.pi(s, softmax_dim=1)\n dist = Normal(mu, std)\n log_prob = dist.log_prob(a)\n ratio = torch.exp(log_prob - old_log_prob) # a/b == exp(log(a)-log(b))\n\n surr1 = ratio * advantage\n surr2 = torch.clamp(ratio, 1-eps_clip, 1+eps_clip) * advantage\n loss = -torch.min(surr1, surr2) + F.smooth_l1_loss(self.v(s) , td_target)\n\n self.optimizer.zero_grad()\n loss.mean().backward()\n nn.utils.clip_grad_norm_(self.parameters(), 1.0)\n self.optimizer.step()\n self.optimization_step += 1\n\ndef main():\n env = gym.make('Pendulum-v0')\n model = PPO()\n score = 0.0\n print_interval = 20\n rollout = []\n\n for n_epi in range(10000):\n s = env.reset()\n done = False\n while not done:\n for t in range(rollout_len):\n mu, std = model.pi(torch.from_numpy(s).float())\n dist = Normal(mu, std)\n a = dist.rsample()\n log_prob = dist.log_prob(a)\n s_prime, r, done, info = env.step([a.item()])\n\n rollout.append((s, a, r/10.0, s_prime, log_prob.item(), done))\n if len(rollout) == rollout_len:\n model.put_data(rollout)\n rollout = []\n\n s = s_prime\n score += r\n if done:\n break\n\n model.train_net()\n\n if n_epi%print_interval==0 and n_epi!=0:\n print(\"# of episode :{}, avg score : {:.1f}, opt step: {}\".format(n_epi, score/print_interval, model.optimization_step))\n score = 0.0\n\n env.close()\n\nif __name__ == '__main__':\n main()",
"# https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py\n# https://mspries.github.io/jimmy_pendulum.html\n#!/usr/bin/env python3\nimport time\nimport torch\nimport os, sys\nimport numpy as np\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\nPROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))\nif PROJECT_HOME not in sys.path:\n sys.path.append(PROJECT_HOME)\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))))\n\nprint(\"PyTorch Version\", torch.__version__)\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nfrom codes.c_models.continuous_action.continuous_action_model import ContinuousActionModel\nfrom codes.e_utils.reward_changer import RewardChanger\nfrom codes.e_utils import rl_utils\nfrom codes.e_utils.common_utils import load_model, map_range\nfrom codes.e_utils.logger import get_logger\nfrom codes.e_utils.names import EnvironmentName, AgentMode\nfrom codes.e_utils.rl_utils import get_environment_input_output_info, MODEL_ZOO_SAVE_DIR, MODEL_SAVE_FILE_PREFIX\n\nmy_logger = get_logger(\"openai_pendulum_ddpg\")\n\n\ndef play_main(params, env, mode=None):\n observation_shape, action_shape, num_outputs, action_n, action_min, action_max = get_environment_input_output_info(\n env, mode='play_before_train'\n )\n agent = rl_utils.get_rl_agent(\n observation_shape, action_shape, num_outputs, action_n, action_min, action_max, worker_id=-1, params=params, device=device\n )\n load_model(MODEL_ZOO_SAVE_DIR, MODEL_SAVE_FILE_PREFIX, agent, inquery=False)\n agent.agent_mode = AgentMode.PLAY\n agent.model.eval()\n agent.test_model.load_state_dict(agent.model.state_dict())\n agent.test_model.eval()\n\n num_step = 0\n num_episode = 0\n\n done = False\n episode_reward = 0\n\n if mode == 'test':\n env.play_before_train_mode = True\n env.play_before_train = True\n state = env.reset()\n else:\n env.envs[0].play_before_train = True\n env.envs[0].play_before_train_mode = True\n state = env.envs[0].reset()\n\n\n num_episode += 1\n num_episode_step = 0\n\n agent_state = rl_utils.initial_agent_state()\n while not done:\n\n num_step += 1\n num_episode_step += 1\n\n state = np.expand_dims(state, axis=0)\n\n action, agent_state, = agent(state, agent_state)\n\n if isinstance(agent.model, ContinuousActionModel):\n action = map_range(\n np.asarray(action),\n np.ones_like(agent.action_min) * -1.0, np.ones_like(agent.action_max),\n agent.action_min, agent.action_max\n )\n\n if action.ndim == 2:\n action = action[0]\n\n if mode == 'test':\n next_state, reward, done, info = env.step(action)\n else:\n next_state, reward, done, info = env.envs[0].step(action)\n\n if isinstance(env, RewardChanger):\n reward = env.envs[0].reverse_reward(reward)\n\n episode_reward += reward\n\n state = next_state\n if mode == 'test':\n env.play_before_train = False\n env.play_before_train_mode = False\n else:\n env.envs[0].play_before_train = False\n env.envs[0].play_before_train_mode = False\n\nif __name__ == \"__main__\":\n from codes.a_config.parameters import PARAMETERS as parameters\n params = parameters\n\n env = rl_utils.get_single_environment(params=params, mode=AgentMode.PLAY)\n print(\"env:\", params.ENVIRONMENT_ID)\n print(\"observation_space:\", env.observation_space)\n print(\"action_space:\", env.action_space)\n\n play_main(params, env)\n",
"# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import Normal, TanhTransform, TransformedDistribution\n\nfrom codes.c_models.continuous_action.continuous_action_model import ContinuousActionModel\nfrom codes.e_utils.common_utils import weights_init_, slack\n\n\nclass ContinuousSACModel(ContinuousActionModel):\n def __init__(self, worker_id, observation_shape, num_outputs, params, device):\n super(ContinuousSACModel, self).__init__(worker_id, params, device)\n self.__name__ = \"ContinuousSACModel\"\n\n num_inputs = observation_shape[0]\n\n self.base = SoftActorCriticMLPBase(\n num_inputs=num_inputs, num_outputs=num_outputs, params=params\n )\n\n def forward(self, inputs, agent_state):\n mu_v, logstd_v, _ = self.base.forward_actor(inputs, agent_state)\n return mu_v, logstd_v\n\n def re_parameterization_trick_sample_old(self, state):\n mu_v, logstd_v, _ = self.base.forward_actor(state)\n dist = Normal(loc=mu_v, scale=torch.exp(logstd_v))\n x_t = dist.rsample() # for reparameterization trick (mean + std * N(0,1))\n action_v = torch.tanh(x_t)\n\n log_probs = dist.log_prob(x_t) - torch.log(1.0 - action_v.pow(2) + 1.0e-6)\n log_probs = log_probs.sum(dim=-1, keepdim=True)\n\n # action_v.shape: [128, 1]\n # log_prob.shape: [128, 1]\n return action_v, log_probs\n\n def re_parameterization_trick_sample(self, state):\n mu_v, logstd_v, _ = self.base.forward_actor(state)\n dist = Normal(loc=mu_v, scale=torch.exp(logstd_v))\n transforms = [TanhTransform(cache_size=1)]\n dist = TransformedDistribution(dist, transforms)\n action_v = dist.rsample() # for reparameterization trick (mean + std * N(0,1))\n\n log_probs = dist.log_prob(action_v).sum(dim=-1, keepdim=True)\n\n # action_v.shape: [128, 1]\n # log_prob.shape: [128, 1]\n return action_v, log_probs\n\n\nclass SoftActorCriticMLPBase(nn.Module):\n def __init__(self, num_inputs, num_outputs, params):\n super(SoftActorCriticMLPBase, self).__init__()\n self.__name__ = \"SoftActorCriticMLPBase\"\n\n self.hidden_1_size = params.HIDDEN_1_SIZE\n self.hidden_2_size = params.HIDDEN_2_SIZE\n self.hidden_3_size = params.HIDDEN_3_SIZE\n\n self.actor = GaussianActorMLPBase(num_inputs, num_outputs, params)\n\n self.twinq = TwinQMLPBase(num_inputs, num_outputs, params)\n\n self.layers_info = {'actor': self.actor, 'twinq': self.twinq}\n\n self.actor_params = list(self.actor.parameters())\n self.twinq_params = list(self.twinq.parameters())\n\n self.train()\n\n def forward_actor(self, inputs, agent_state=None):\n # if not (type(inputs) is torch.Tensor):\n # inputs = torch.tensor([inputs], dtype=torch.float).to(self.device)\n\n mu_v, logstd_v = self.actor.forward(inputs)\n\n return mu_v, logstd_v, agent_state\n\n def forward_critic(self, inputs, action, agent_state=None):\n q1_v, q2_v = self.twinq.forward(inputs, action)\n\n return q1_v, q2_v, agent_state\n\n\nclass GaussianActorMLPBase(nn.Module):\n def __init__(self, num_inputs, num_outputs, params):\n super(GaussianActorMLPBase, self).__init__()\n\n self.hidden_1_size = params.HIDDEN_1_SIZE\n self.hidden_2_size = params.HIDDEN_2_SIZE\n self.hidden_3_size = params.HIDDEN_3_SIZE\n\n self.common = nn.Sequential(\n nn.Linear(num_inputs, self.hidden_1_size),\n nn.LayerNorm(self.hidden_1_size),\n nn.GELU(),\n nn.Linear(self.hidden_1_size, self.hidden_2_size),\n nn.LayerNorm(self.hidden_2_size),\n nn.GELU(),\n nn.Linear(self.hidden_2_size, self.hidden_3_size),\n nn.LayerNorm(self.hidden_3_size),\n nn.GELU(),\n )\n\n self.mu = nn.Sequential(\n nn.Linear(self.hidden_3_size, num_outputs),\n nn.Tanh()\n )\n\n self.logstd = nn.Sequential(\n nn.Linear(self.hidden_3_size, num_outputs),\n nn.Softplus()\n )\n\n #self.apply(weights_init_)\n\n def forward(self, inputs):\n mu_v = self.mu(self.common(inputs))\n logstd_v = self.logstd(self.common(inputs))\n\n if torch.any(torch.isnan(mu_v)):\n slack.send_message(message=\"mu_v contains 'Nan': inputs {0}\".format(inputs))\n slack.send_message(message=\"{0}\".format(self.common.parameters()))\n print(\"inputs:\", inputs, \"!!! - 1\")\n print(\"self.common(inputs)\", self.common(inputs), \"!!! - 2\")\n print(\"mu_v:\", mu_v, \"!!! - 3\")\n print(\"logstd_v:\", logstd_v, \"!!! - 4\")\n exit(-1)\n\n return mu_v, logstd_v\n\n\nclass TwinQMLPBase(nn.Module):\n def __init__(self, num_inputs, num_outputs, params):\n super(TwinQMLPBase, self).__init__()\n\n self.hidden_1_size = params.HIDDEN_1_SIZE\n self.hidden_2_size = params.HIDDEN_2_SIZE\n self.hidden_3_size = params.HIDDEN_3_SIZE\n\n self.q1 = nn.Sequential(\n nn.Linear(num_inputs + num_outputs, self.hidden_1_size),\n nn.LayerNorm(self.hidden_1_size),\n nn.GELU(),\n nn.Linear(self.hidden_1_size, self.hidden_2_size),\n nn.LayerNorm(self.hidden_2_size),\n nn.GELU(),\n nn.Linear(self.hidden_2_size, self.hidden_3_size),\n nn.LayerNorm(self.hidden_3_size),\n nn.GELU(),\n nn.Linear(self.hidden_3_size, 1),\n )\n\n self.q2 = nn.Sequential(\n nn.Linear(num_inputs + num_outputs, self.hidden_1_size),\n nn.LayerNorm(self.hidden_1_size),\n nn.GELU(),\n nn.Linear(self.hidden_1_size, self.hidden_2_size),\n nn.LayerNorm(self.hidden_2_size),\n nn.GELU(),\n nn.Linear(self.hidden_2_size, self.hidden_3_size),\n nn.LayerNorm(self.hidden_3_size),\n nn.GELU(),\n nn.Linear(self.hidden_3_size, 1),\n )\n\n self.apply(weights_init_)\n\n def forward(self, obs, act):\n x = torch.cat([obs, act], dim=-1)\n return self.q1(x), self.q2(x)\n",
"# https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L449\nimport gym\nimport numpy as np\n\nfrom config.names import EnvironmentName, DeepLearningModelName\nfrom common.environments import Environment\n\nclass BreakoutDeterministic_v4(Environment):\n def __init__(self, params):\n self.env = gym.make(EnvironmentName.BREAKOUT_DETERMINISTIC_V4.value)\n super(BreakoutDeterministic_v4, self).__init__()\n self.action_shape = self.get_action_shape()\n self.state_shape = self.get_state_shape()\n\n self.cnn_input_height = self.state_shape[0]\n self.cnn_input_width = self.state_shape[1]\n self.cnn_input_channels = self.state_shape[2]\n self.continuous = False\n\n self.last_ball_lives = -1\n\n self.skipping_state_fq = 3\n self.skipping_state_index = 0\n self.params = params\n\n @staticmethod\n def to_grayscale(img):\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n\n @staticmethod\n def downsample(img):\n return img[::2, ::2]\n\n @staticmethod\n def transform_reward(reward):\n return np.sign(reward)\n\n def preprocess(self, img):\n gray_frame = self.to_grayscale(self.downsample(img))\n\n if self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.ACTOR_CRITIC_CNN:\n state = np.expand_dims(gray_frame, axis=0)\n elif self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.ACTOR_CRITIC_MLP:\n state = gray_frame.flatten()\n else:\n state = None\n\n return state\n\n def get_n_states(self):\n if self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.ACTOR_CRITIC_CNN:\n return 1, 105, 80 # input_channels, input_height, input_width\n elif self.params.DEEP_LEARNING_MODEL == DeepLearningModelName.ACTOR_CRITIC_MLP:\n return 8400\n else:\n return None\n\n def get_n_actions(self):\n return self.env.action_space.n - 1\n\n @property\n def action_meanings(self):\n action_meanings = self.env.get_action_meanings()\n action_meanings.remove('FIRE')\n return action_meanings\n\n def get_state_shape(self):\n state_shape = (int(self.env.observation_space.shape[0]/2), int(self.env.observation_space.shape[1]/2), 1)\n return state_shape\n\n def get_action_shape(self):\n action_shape = self.env.action_space.n - 1\n return action_shape,\n\n def get_action_space(self):\n return self.env.action_space\n\n def reset(self):\n self.env.reset()\n next_state, reward, done, info = self.env.step(1)\n self.last_ball_lives = info['ale.lives']\n info[\"dead\"] = False #if a ball fall down, dead is true\n\n return self.preprocess(next_state)\n\n def step(self, action):\n if action == 1:\n env_action = 2\n elif action == 2:\n env_action = 3\n else:\n env_action = 0\n\n next_state, reward, done, info = self.env.step(env_action)\n\n if self.last_ball_lives != info['ale.lives']:\n env_action = 1\n self.last_ball_lives = info['ale.lives']\n next_state, reward, done, info = self.env.step(env_action)\n info[\"dead\"] = True\n reward = -5.0\n\n # info[\"skipping\"] = True\n # if self.skipping_state_index == self.skipping_state_fq:\n # self.skipping_state_index = 0\n # info[\"skipping\"] = False\n\n adjusted_reward = self.transform_reward(reward)\n\n # self.skipping_state_index += 1\n\n return self.preprocess(next_state), reward, adjusted_reward, done, info\n\n def render(self):\n self.env.render()\n\n def close(self):\n self.env.close()\n",
"import numpy as np\nfrom math import sqrt\n\n\n# from https://github.com/songrotek/DDPG/blob/master/ou_noise.py\n# and adapted to be synchronous with https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py\nclass OUNoise:\n def __init__(self, action_dimension, dt=0.01, mu=0, theta=0.15, sigma=0.2):\n self.action_dimension = action_dimension\n self.dt = dt\n self.mu = mu\n self.theta = theta\n self.sigma = sigma\n self.reset()\n\n def reset(self):\n self.state = np.ones(self.action_dimension) * self.mu\n\n def noise(self):\n x = self.state\n dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.random.randn(len(x)) * np.sqrt(self.dt)\n self.state = x + dx\n return self.state\n\n\n# From OpenAI Baselines:\n# https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py\nclass OrnsteinUhlenbeckActionNoise:\n def __init__(self, mu, sigma=0.2, theta=0.15, dt=1.0, x0=None):\n self.theta = theta\n self.mu = mu\n self.sigma = sigma\n self.dt = dt # default: 1e-2\n self.x0 = x0\n self.reset()\n\n def noise(self):\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n\n\n# From OpenAI Baselines:\n# https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py\nclass AdaptiveParamNoiseSpec(object):\n def __init__(self, initial_stddev=0.1, desired_action_stddev=0.2, adaptation_coefficient=1.01):\n \"\"\"\n Note that initial_stddev and current_stddev refer to std of parameter noise,\n but desired_action_stddev refers to (as name notes) desired std in action space\n \"\"\"\n self.initial_stddev = initial_stddev\n self.desired_action_stddev = desired_action_stddev\n self.adaptation_coefficient = adaptation_coefficient\n\n self.current_stddev = initial_stddev\n\n def adapt(self, distance):\n if distance > self.desired_action_stddev:\n # Decrease stddev.\n self.current_stddev /= self.adaptation_coefficient\n else:\n # Increase stddev.\n self.current_stddev *= self.adaptation_coefficient\n\n def get_stats(self):\n stats = {\n 'param_noise_stddev': self.current_stddev,\n }\n return stats\n\n def __repr__(self):\n fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adaptation_coefficient={})'\n return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adaptation_coefficient)\n\n\ndef ddpg_distance_metric(actions1, actions2):\n \"\"\"\n Compute \"distance\" between actions taken by two policies at the same states\n Expects numpy arrays\n \"\"\"\n diff = actions1-actions2\n mean_diff = np.mean(np.square(diff), axis=0)\n dist = sqrt(np.mean(mean_diff))\n return dist",
"#!/usr/bin/env python3\nimport gym\nimport numpy as np\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils as nn_utils\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\n\nfrom codes.e_utils.experience import ExperienceSourceFirstLast\nfrom common.fast_rl.common.utils import TBMeanTracker\nfrom common.fast_rl.common.wrappers import wrap_dqn\nfrom common.fast_rl.rl_agent import PolicyAgent\nfrom z_externals.handson_second_edition.Chapter11.lib.common import RewardTracker\n\nGAMMA = 0.99\nLEARNING_RATE = 0.001\nENTROPY_BETA = 0.01\nBATCH_SIZE = 128\nNUM_ENVS = 50\n\nREWARD_STEPS = 4\nCLIP_GRAD = 0.1\n\n\nclass AtariA2C(nn.Module):\n def __init__(self, input_shape, n_actions):\n super(AtariA2C, self).__init__()\n\n self.conv = nn.Sequential(\n nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n conv_out_size = self._get_conv_out(input_shape)\n self.policy = nn.Sequential(\n nn.Linear(conv_out_size, 512),\n nn.ReLU(),\n nn.Linear(512, n_actions)\n )\n\n self.value = nn.Sequential(\n nn.Linear(conv_out_size, 512),\n nn.ReLU(),\n nn.Linear(512, 1)\n )\n\n def _get_conv_out(self, shape):\n o = self.conv(torch.zeros(1, *shape))\n return int(np.prod(o.size()))\n\n def forward(self, x):\n fx = x.float() / 256\n conv_out = self.conv(fx).view(fx.size()[0], -1)\n return self.policy(conv_out), self.value(conv_out)\n\n\ndef unpack_batch(batch, net, device='cpu'):\n \"\"\"\n Convert batch into training tensors\n :param batch:\n :param net:\n :return: states variable, actions tensor, reference values variable\n \"\"\"\n states = []\n actions = []\n rewards = []\n not_done_idx = []\n last_states = []\n for idx, exp in enumerate(batch):\n states.append(np.array(exp.state, copy=False))\n actions.append(int(exp.action))\n rewards.append(exp.reward)\n if exp.last_state is not None:\n not_done_idx.append(idx)\n last_states.append(np.array(exp.last_state, copy=False))\n\n states_v = torch.FloatTensor(np.array(states, copy=False)).to(device)\n actions_t = torch.LongTensor(actions).to(device)\n\n # handle rewards\n rewards_np = np.array(rewards, dtype=np.float32)\n if not_done_idx:\n last_states_v = torch.FloatTensor(np.array(last_states, copy=False)).to(device)\n last_vals_v = net(last_states_v)[1]\n last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]\n last_vals_np *= GAMMA ** REWARD_STEPS\n rewards_np[not_done_idx] += last_vals_np\n\n ref_vals_v = torch.FloatTensor(rewards_np).to(device)\n\n return states_v, actions_t, ref_vals_v\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cuda\", default=False, action=\"store_true\", help=\"Enable cuda\")\n args = parser.parse_args()\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n writer = SummaryWriter(comment=\"-pong-a2c-rollouts\")\n\n env = wrap_dqn(gym.make(\"PongNoFrameskip-v4\"))\n net = AtariA2C(env.observation_space.shape, env.action_space.n).to(device)\n print(net)\n\n agent = PolicyAgent(lambda x: net(x)[0], apply_softmax=True, device=device)\n experience_source = ExperienceSourceFirstLast(env, agent, gamma=GAMMA, n_step=REWARD_STEPS)\n\n optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, eps=1e-3)\n\n batch = []\n\n with RewardTracker(writer, stop_reward=18) as tracker:\n with TBMeanTracker(writer, batch_size=10) as tb_tracker:\n for step_idx, exp in enumerate(experience_source):\n batch.append(exp)\n\n # handle new rewards\n new_rewards = experience_source.pop_episode_reward_lst()\n if new_rewards:\n if tracker.reward(new_rewards[0], step_idx):\n break\n\n if len(batch) < BATCH_SIZE:\n continue\n\n states_v, actions_t, vals_ref_v = unpack_batch(batch, net)\n batch.clear()\n\n optimizer.zero_grad()\n logits_v, value_v = net(states_v)\n loss_value_v = F.mse_loss(value_v.squeeze(-1), vals_ref_v)\n\n log_prob_v = F.log_softmax(logits_v, dim=1)\n adv_v = vals_ref_v - value_v.detach()\n log_prob_actions_v = adv_v * log_prob_v[range(BATCH_SIZE), actions_t]\n loss_policy_v = -log_prob_actions_v.mean()\n\n prob_v = F.softmax(logits_v, dim=1)\n entropy_loss_v = ENTROPY_BETA * (prob_v * log_prob_v).sum(dim=1).mean()\n\n # calculate policy gradients only\n loss_policy_v.backward(retain_graph=True)\n grads = np.concatenate([p.grad.data.cpu().numpy().flatten()\n for p in net.parameters()\n if p.grad is not None])\n\n # apply entropy and value gradients\n loss_v = entropy_loss_v + loss_value_v\n loss_v.backward()\n nn_utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)\n optimizer.step()\n # get full loss\n loss_v += loss_policy_v\n",
"import time\nimport math\n\nimport gym\nfrom gym import spaces\nimport numpy as np\nimport grpc\n\n# MQTT Topic for RIP\nfrom codes.b_environments.quanser_rotary_inverted_pendulum import quanser_service_pb2_grpc\nfrom codes.e_utils.names import RLAlgorithmName, AgentMode\nfrom common.environments.environment import Environment\nfrom codes.a_config.parameters import PARAMETERS as params\nfrom codes.b_environments.quanser_rotary_inverted_pendulum.quanser_service_pb2 import QuanserRequest\n\nRIP_SERVER = '10.0.0.4'\nGOAL_ANGLE = params.GOAL_ANGLE\n\ndef get_quanser_rip_observation_space():\n low = np.array([0, 0, 0, 0, 0, 0], dtype=np.float32)\n high = np.array([1., 1., 500., 1., 1., 500,], dtype=np.float32)\n\n observation_space = gym.spaces.Box(\n low=low, high=high, dtype=np.float32\n )\n n_states = observation_space.shape[0]\n return observation_space, n_states\n\n\ndef get_quanser_rip_action_info(params):\n if params.RL_ALGORITHM in [RLAlgorithmName.DQN_V0]:\n action_index_to_voltage = list(np.array([\n -250, -100, 0, 100, 250\n ]))\n action_space = gym.spaces.Discrete(len(action_index_to_voltage))\n n_actions = action_space.n\n else:\n action_index_to_voltage = None\n action_space = gym.spaces.Box(\n low=-1.0, high=1.0, shape=(1,),\n dtype=np.float32\n )\n n_actions = action_space.shape[0]\n\n return action_space, n_actions, action_index_to_voltage\n\n\nclass ADJUSTAngleEnv(gym.Env):\n def __init__(self, mode=AgentMode.TRAIN):\n super(ADJUSTAngleEnv, self).__init__()\n self.params = params\n\n self.previous_time = 0\n\n self.episode = 0\n\n self.env_reset = False\n\n self.reward = 0\n\n self.step_idx = 0\n self.state = []\n self.episode_steps = 0\n\n self.count_continuous_uprights = 0\n self.is_upright = False\n\n self.motor_radian = 0\n self.motor_velocity = 0\n self.pendulum_radian = 0\n self.pendulum_velocity = 0\n\n self.is_motor_limit = False\n\n self.unit_time = self.params.UNIT_TIME\n self.over_unit_time = 0\n\n if mode == AgentMode.PLAY:\n self.max_episode_step = self.params.MAX_EPISODE_STEP_AT_PLAY\n else:\n self.max_episode_step = self.params.MAX_EPISODE_STEP\n\n self.initial_motor_radian = 0.0\n #==================observation==========================================================\n self.observation_space, self.n_states = get_quanser_rip_observation_space()\n #=======================================================================================\n\n #==================action===============================================================\n self.action_space, self.n_actions, self.action_index_to_voltage = get_quanser_rip_action_info(self.params)\n #=======================================================================================\n\n channel = grpc.insecure_channel('{0}:50051'.format(RIP_SERVER))\n self.server_obj = quanser_service_pb2_grpc.QuanserRIPStub(channel)\n\n def reset(self):\n self.episode_steps = 0\n self.reward = 0\n self.is_motor_limit = False\n\n quanser_response = self.server_obj.reset_sync(QuanserRequest(value=0.0))\n\n if quanser_response.message != \"RESET_SYNC\":\n raise ValueError()\n\n self.motor_radian = quanser_response.motor_radian\n self.motor_velocity = quanser_response.motor_velocity\n self.pendulum_radian = quanser_response.pendulum_radian\n self.pendulum_velocity = quanser_response.pendulum_velocity\n\n if self.episode % 5 == 0:\n print(\"*RESET PENDULUM RADIAN : {0:1.3f}\".format(self.pendulum_radian))\n\n self.state = [\n math.cos(self.pendulum_radian),\n math.sin(self.pendulum_radian),\n self.pendulum_velocity / params.VELOCITY_STATE_DENOMINATOR,\n # math.cos(self.initial_motor_radian - self.motor_radian),\n # math.sin(self.initial_motor_radian - self.motor_radian),\n math.cos(quanser_response.motor_radian),\n math.sin(quanser_response.motor_radian),\n self.motor_velocity / params.VELOCITY_STATE_DENOMINATOR\n ]\n\n # wait_time = 1 if self.episode == 0 else 15 # if self.episode % 10 == 0 else 3\n\n previousTime = time.perf_counter()\n time_done = False\n while not time_done:\n currentTime = time.perf_counter()\n if currentTime - previousTime >= self.unit_time:\n time_done = True\n time.sleep(0.0001)\n\n self.episode += 1\n\n return self.state\n\n def step(self, action):\n # current_time = time.perf_counter()\n # print(\"current_time - self.previous_time\", current_time - self.previous_time)\n while True:\n current_time = time.perf_counter()\n if current_time - self.previous_time >= self.unit_time:\n break\n time.sleep(0.0001)\n\n current_time = time.perf_counter()\n step_time = current_time - self.previous_time\n\n if step_time > self.unit_time:\n self.over_unit_time += 1\n\n # print(self.step_idx, action, step_time)\n self.previous_time = time.perf_counter()\n\n if self.step_idx % 100000 == 0:\n print(\"*OVER UNIT TIME STEP NUMBER :\", self.over_unit_time)\n #######################################################\n\n if type(action) is np.ndarray:\n action = action[0]\n\n if self.params.RL_ALGORITHM in [RLAlgorithmName.DQN_V0]:\n action = self.action_index_to_voltage[int(action)]\n\n #motor_power = float(action)\n # if self.step_idx % 100 == 0:\n # print(action)\n\n #==================== Grpc and use sample time========================================\n # previous_time = time.perf_counter()\n # print(action)\n\n # if self.step_idx % 50 == 0:\n # self.action_ = -self.action_\n\n quanser_response = self.server_obj.step_sync(QuanserRequest(value=action))\n\n self.motor_radian = quanser_response.motor_radian\n self.motor_velocity = quanser_response.motor_velocity\n self.pendulum_radian = quanser_response.pendulum_radian\n self.pendulum_velocity = quanser_response.pendulum_velocity\n self.step_syncronize = quanser_response.is_motor_limit\n\n self.state = [\n math.cos(self.pendulum_radian),\n math.sin(self.pendulum_radian),\n self.pendulum_velocity / params.VELOCITY_STATE_DENOMINATOR,\n # math.cos(self.initial_motor_radian - self.motor_radian),\n # math.sin(self.initial_motor_radian - self.motor_radian),\n math.cos(quanser_response.motor_radian),\n math.sin(quanser_response.motor_radian),\n self.motor_velocity / params.VELOCITY_STATE_DENOMINATOR\n ]\n\n next_state = self.state\n self.update_current_state()\n\n #=======================reward================================================================\n self.reward = self.get_reward()\n #=============================================================================================\n self.step_idx += 1\n self.episode_steps += 1\n\n done, info = self.__isDone()\n\n # print(\"pendulum radian : {0}, motor radian: {1}, reward: {2}, pendulum_velocity : {3} \\n\\n\".format(\n # self.pendulum_radian, self.motor_radian, self.is_motor_limit, self.pendulum_velocity\n # ))\n\n return next_state, self.reward, done, info\n\n def update_current_state(self):\n if abs(self.pendulum_radian) < math.radians(12):\n self.count_continuous_uprights += 1\n else:\n self.count_continuous_uprights = 0\n\n if self.count_continuous_uprights >= 1:\n self.is_upright = True\n else:\n self.is_upright = False\n\n def __isDone(self):\n info = {}\n\n def insert_to_info(s):\n info[\"result\"] = s\n\n if self.episode_steps >= self.max_episode_step: # 5000 * 25ms (0.025sec.) = 125 sec.\n insert_to_info(\"*** Success ***\")\n return True, info\n # elif self.is_motor_limit:\n # print(self.motor_radian, \" !!!!!!!\")\n # insert_to_info(\"*** Limit position ***\")\n # return True, info\n elif self.is_motor_limit:#abs(self.motor_radian) > math.radians(90) or self.is_motor_limit:\n insert_to_info(\"***motor_radian exceed 90***\")\n return True, info\n elif not self.step_syncronize:\n insert_to_info(\"**not_sync_step**\")\n return True, info\n else:\n insert_to_info(\"\")\n return False, info\n\n def get_reward(self):\n position_reward = math.pi - abs(self.pendulum_radian) # math.pi - math.radians(12) ~ math.pi\n energy_penalty = -1.0 * (abs(self.pendulum_velocity) + abs(self.motor_velocity)) / 100\n\n # TODO\n inverted_reward = (position_reward - energy_penalty) / math.pi #TODO\n angle_reward = 1 - abs(GOAL_ANGLE - math.degrees(self.motor_radian))/(abs(GOAL_ANGLE)+90.0)\n\n if self.is_upright:\n reward = (inverted_reward + angle_reward)/2.0\n else:\n reward = angle_reward\n\n # print(inverted_reward, angle_reward, reward)\n\n return reward\n\n def render(self):\n pass"
] |
[
[
"torch.cuda.is_available"
],
[
"numpy.std",
"numpy.mean",
"torch.cuda.is_available"
],
[
"torch.Tensor",
"torch.sum",
"torch.exp",
"torch.nn.Linear",
"numpy.zeros_like",
"torch.nn.MSELoss"
],
[
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.choice",
"numpy.arange",
"numpy.ones",
"numpy.random.normal",
"numpy.zeros",
"numpy.exp",
"numpy.array",
"numpy.where",
"numpy.vstack"
],
[
"torch.min",
"torch.from_numpy",
"torch.tensor",
"torch.exp",
"torch.nn.Linear",
"torch.no_grad",
"torch.distributions.Normal",
"torch.clamp"
],
[
"numpy.asarray",
"numpy.expand_dims",
"numpy.ones_like",
"torch.cuda.is_available"
],
[
"torch.nn.GELU",
"torch.distributions.TransformedDistribution",
"torch.nn.Softplus",
"torch.isnan",
"torch.cat",
"torch.nn.LayerNorm",
"torch.tanh",
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.exp",
"torch.distributions.TanhTransform"
],
[
"numpy.sign",
"numpy.expand_dims"
],
[
"numpy.square",
"numpy.sqrt",
"numpy.ones",
"numpy.random.normal",
"numpy.mean",
"numpy.zeros_like"
],
[
"torch.LongTensor",
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.device",
"torch.nn.ReLU",
"numpy.array"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jsong0327/mmdetection
|
[
"bd65e7706a4c5f61554d24549ab2d1cdca5dec0d"
] |
[
"mmdet/models/backbones/ssd_vgg.py"
] |
[
"import logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (VGG, xavier_init, constant_init, kaiming_init,\n normal_init)\nfrom mmcv.runner import load_checkpoint\n\nfrom ..registry import BACKBONES\n\n\[email protected]_module\nclass SSDVGG(VGG):\n extra_setting = {\n 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),\n 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),\n }\n\n def __init__(self,\n input_size,\n depth,\n with_last_pool=False,\n ceil_mode=True,\n out_indices=(3, 4),\n out_feature_indices=(22, 34),\n l2_norm_scale=20.):\n super(SSDVGG, self).__init__(\n depth,\n with_last_pool=with_last_pool,\n ceil_mode=ceil_mode,\n out_indices=out_indices)\n assert input_size in (300, 512)\n self.input_size = input_size\n\n self.features.add_module(\n str(len(self.features)),\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1))\n self.features.add_module(\n str(len(self.features)),\n nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))\n self.features.add_module(\n str(len(self.features)), nn.ReLU(inplace=True))\n self.features.add_module(\n str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))\n self.features.add_module(\n str(len(self.features)), nn.ReLU(inplace=True))\n self.out_feature_indices = out_feature_indices\n\n self.inplanes = 1024\n self.extra = self._make_extra_layers(self.extra_setting[input_size])\n self.l2_norm = L2Norm(\n self.features[out_feature_indices[0] - 1].out_channels,\n l2_norm_scale)\n\n def init_weights(self, pretrained=None):\n if isinstance(pretrained, str):\n logger = logging.getLogger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n elif pretrained is None:\n for m in self.features.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n elif isinstance(m, nn.BatchNorm2d):\n constant_init(m, 1)\n elif isinstance(m, nn.Linear):\n normal_init(m, std=0.01)\n else:\n raise TypeError('pretrained must be a str or None')\n\n for m in self.extra.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n constant_init(self.l2_norm, self.l2_norm.scale)\n\n def forward(self, x):\n outs = []\n for i, layer in enumerate(self.features):\n x = layer(x)\n if i in self.out_feature_indices:\n outs.append(x)\n for i, layer in enumerate(self.extra):\n x = F.relu(layer(x), inplace=True)\n if i % 2 == 1:\n outs.append(x)\n outs[0] = self.l2_norm(outs[0])\n if len(outs) == 1:\n return outs[0]\n else:\n return tuple(outs)\n\n def _make_extra_layers(self, outplanes):\n layers = []\n kernel_sizes = (1, 3)\n num_layers = 0\n outplane = None\n for i in range(len(outplanes)):\n if self.inplanes == 'S':\n self.inplanes = outplane\n continue\n k = kernel_sizes[num_layers % 2]\n if outplanes[i] == 'S':\n outplane = outplanes[i + 1]\n conv = nn.Conv2d(\n self.inplanes, outplane, k, stride=2, padding=1)\n else:\n outplane = outplanes[i]\n conv = nn.Conv2d(\n self.inplanes, outplane, k, stride=1, padding=0)\n layers.append(conv)\n self.inplanes = outplanes[i]\n num_layers += 1\n if self.input_size == 512:\n layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))\n\n return nn.Sequential(*layers)\n\n\nclass L2Norm(nn.Module):\n\n def __init__(self, n_dims, scale=20., eps=1e-10):\n super(L2Norm, self).__init__()\n self.n_dims = n_dims\n self.weight = nn.Parameter(torch.Tensor(self.n_dims))\n self.eps = eps\n self.scale = scale\n\n def forward(self, x):\n # normalization layer convert to FP32 in FP16 training\n x_float = x.float()\n norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps\n return (self.weight[None, :, None, None].float().expand_as(x_float) *\n x_float / norm).type_as(x)\n"
] |
[
[
"torch.nn.Sequential",
"torch.Tensor",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nateGeorge/scrape_stocks
|
[
"7dd2498ec7a860dfa2c9d655c0b45c9712be1a1f"
] |
[
"short_squeeze_plotting.py"
] |
[
"\"\"\"\nThis is intended to be plug-and-play short squeeze analysis. Generates top picks and plots.\n\nNeed to add in earnings dates into the mix. Stocks with earnings coming up should be looked into more.\n\"\"\"\nimport sys\n\nimport talib\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import ParameterGrid\nimport matplotlib.pyplot as plt\n\n# custom\nsys.path.append('../stock_prediction/code')\nimport data_processing as dp\n\n\ndef load_stocks_calculate_short_corr(only_latest=False):\n \"\"\"\n only_latest: boolean; if True, will keep only the latest 50 days to speed up procesing\n \"\"\"\n dfs, sh_int, fin_sh = dp.load_stocks(stocks=None)\n if only_latest:\n earliest = sh_int.index[-50]\n sh_int = sh_int[sh_int.index >= earliest]\n\n latest_stocks = []\n all_sh_stocks = []\n all_sh_stocks_full = []\n latest_date = sh_int['SPY'].index[-1]\n # TODO: make parallel\n for s in tqdm(sh_int.keys()):\n if sh_int[s].shape[0] == 0:\n print(s, 'is empty')\n continue\n\n if latest_date != sh_int[s].index[-1]:\n print(s, 'is old')\n continue\n\n if verbose:\n print(s)\n\n df = sh_int[s].copy()\n # create future price changes\n for i in [5, 10, 20, 40]:\n df['{}d_future_price_change'.format(i)] = df['Adj_Close'].pct_change(i).shift(-i)\n\n df['ticker'] = s\n\n # create short-close correlations -- TODO: deal with -1s\n # if short % is all -1 or 0, won't work. if less than 20 samples, rolling corr with 20 period window won't work\n # also broke on DF with 22 samples\n if df['Short_%_of_Float'].mean() in [-1, 0] or df.shape[0] < 30:\n df['Short_%_of_Float_10d_EMA'] = -np.inf\n df['Adj_Close_10d_EMA'] = talib.EMA(df['Adj_Close'].values, timeperiod=10)\n df['short_close_corr_10d_EMA'] = -np.inf\n df['short_close_corr_rocr_20d'] = -np.inf\n df['short_%_rocr_20d'] = -np.inf\n else:\n df['Short_%_of_Float_10d_EMA'] = talib.EMA(df['Short_%_of_Float'].values, timeperiod=10)\n df['Adj_Close_10d_EMA'] = talib.EMA(df['Adj_Close'].values, timeperiod=10)\n\n # essentially, we want to take an arbitrary number of points, calculate correlation, and find where the correlations are largest\n # take 10 points at a time and get correlations first, then take parts that have largest correlations, and keep expanding by 5 points at a time until correlation decreases\n corr = df[['Short_%_of_Float_10d_EMA', 'Adj_Close_10d_EMA']].rolling(window=20).corr()\n df['short_close_corr_10d_EMA'] = corr.unstack(1)['Short_%_of_Float_10d_EMA']['Adj_Close_10d_EMA']\n df['short_close_corr_10d_EMA'].replace(np.inf, 1, inplace=True)\n df['short_close_corr_10d_EMA'].replace(-np.inf, -1, inplace=True)\n df['short_close_corr_10d_EMA'].clip(lower=-1, upper=1, inplace=True)\n\n # WARNING: things with small (< 1%) Short % of float will result in huge rocr...maybe do something about this\n df['short_close_corr_rocr_20d'] = talib.ROCR100(df['short_close_corr_10d_EMA'].values, timeperiod=20)\n df['short_%_rocr_20d'] = talib.ROCR100(df['Short_%_of_Float_10d_EMA'].values, timeperiod=20)\n\n # auto-detect long stretches of negative and positive correlation\n thresh = 0.7\n rolling = df['short_close_corr_10d_EMA'].rolling(window=20).min()\n df['Short_%_positive_corr_detection'] = rolling > thresh\n df['Short_%_positive_corr_detection'] = df['Short_%_positive_corr_detection'].astype('int16')\n # sh_int[ticker]['Short_%_positive_corr_detection'].plot()\n # plt.show()\n\n df['Short_%_negative_corr_detection'] = rolling < -thresh\n df['Short_%_negative_corr_detection'] = df['Short_%_negative_corr_detection'].astype('int16')\n\n\n latest_stocks.append(df.iloc[-1])\n all_sh_stocks_full.append(df)\n all_sh_stocks.append(df.dropna())\n\n latest_stocks_df = pd.concat(latest_stocks, axis=1).T\n latest_stocks_df.set_index('ticker', inplace=True)\n\n all_sh_stocks_df = pd.concat(all_sh_stocks)\n all_sh_stocks_df['market_cap'] = all_sh_stocks_df['Shares_Outstanding'] * all_sh_stocks_df['Adj_Close']\n all_sh_stocks_full_df = pd.concat(all_sh_stocks_full)\n\n return all_sh_stocks_df, all_sh_stocks_full_df, latest_stocks_df, sh_int\n\n\ndef make_larger_shorts(all_sh_stocks_df):\n \"\"\"\n gets dataframe of stocks with larger amounts of shares short overall (not only currently)\n \"\"\"\n # plot short close rocr 20d to see if trend from TDOC holds overall -- under 80% ROCR100 means high gains\n # no overall trend, but for individual stocks there tends to be a trend\n shorts = all_sh_stocks_df[all_sh_stocks_df['short_%_rocr_20d'] != -np.inf]\n shorts_nona = shorts.dropna()\n\n ticker_groups = shorts_nona[['ticker', 'Short_%_of_Float', 'Days_to_Cover', 'rocp_cl', 'short_%_rocr_20d', 'short_close_corr_rocr_20d']].groupby('ticker').mean()\n larger_shorts = ticker_groups[ticker_groups['Short_%_of_Float'] > 10]\n short_stocks = larger_shorts.index\n\n return ticker_groups, larger_shorts, short_stocks\n\n\ndef short_squeeze_analysis(all_sh_stocks_df, ticker='TDOC'):\n # also look at EMA\n df = all_sh_stocks_df[all_sh_stocks_df['ticker'] == ticker]\n\n # I think all this is already calcuclated in load_stocks_calculate_short_corr\n # df['Short_%_of_Float_10d_EMA'] = talib.EMA(sh_int[ticker]['Short_%_of_Float'].values, timeperiod=10)\n # df['Adj_Close_10d_EMA'] = talib.EMA(df['Adj_Close'].values, timeperiod=10)\n df[['Short_%_of_Float', 'Short_%_of_Float_10d_EMA']].plot()\n\n # look for sections where short % mva and price are (ideally, inversely) correlated\n # make gradient of color where oldest -> newest goes from dark to light\n cm = plt.get_cmap('copper')\n num_colors = df.shape[0]\n colors = []\n for i in range(num_colors):\n colors.append(cm(i / num_colors)) # color will now be an RGBA tuple\n\n df.plot.scatter(x='Short_%_of_Float_10d_EMA', y='Adj_Close', color=colors)\n plt.show()\n\n # essentially, we want to take an arbitrary number of points, calculate correlation, and find where the correlations are largest\n # take 10 points at a time and get correlations first, then take parts that have largest correlations, and keep expanding by 5 points at a time until correlation decreases\n # already being done in preprocessing now\n # corr = df[['Short_%_of_Float_10d_EMA', 'Adj_Close_10d_EMA']].rolling(window=20).corr()\n # df['short_close_corr_10d_EMA'] = corr.unstack(1)['Short_%_of_Float_10d_EMA']['Adj_Close_10d_EMA']\n # df['short_close_corr_10d_EMA'].replace(np.inf, 1, inplace=True)\n # df['short_close_corr_10d_EMA'].replace(-np.inf, -1, inplace=True)\n # df['short_close_corr_10d_EMA'].clip(lower=-1, upper=1, inplace=True)\n\n # rows, columns\n # f, ax = plt.subplots(nrows=2, ncols=1, sharex=True)\n # sh_int[ticker]['short_close_corr_10d_EMA'].plot(ax=ax[0])\n # sh_int[ticker][['Adj_Close_10d_EMA', 'Short_%_of_Float_10d_EMA']].plot(ax=ax[1])\n # plt.show()\n\n # auto-detect long stretches of negative and positive correlation\n # now calculated in load_stocks_calculate_short_corr function\n # thresh = 0.8\n # rolling = df['short_close_corr_10d_EMA'].rolling(window=20).min()\n # df['Short_%_positive_corr_detection'] = rolling > thresh\n # df['Short_%_positive_corr_detection'] = df['Short_%_positive_corr_detection'].astype('int16')\n # # sh_int[ticker]['Short_%_positive_corr_detection'].plot()\n # # plt.show()\n #\n # df['Short_%_negative_corr_detection'] = rolling < -thresh\n # df['Short_%_negative_corr_detection'] = df['Short_%_negative_corr_detection'].astype('int16')\n df[['short_close_corr_10d_EMA', 'Short_%_negative_corr_detection', 'Adj_Close', 'Short_%_of_Float_10d_EMA', 'Days_to_Cover', 'short_close_corr_rocr_20d']].plot(subplots=True)\n plt.show()\n\n\nall_sh_stocks_df, all_sh_stocks_full_df, latest_stocks_df, sh_int = load_stocks_calculate_short_corr()\nticker_groups, larger_shorts, short_stocks = make_larger_shorts(all_sh_stocks_df)\n\nshort_squeeze_analysis(all_sh_stocks_full_df, ticker='TLRY')\n"
] |
[
[
"pandas.concat",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
GeniusGaryant/PReMVOS
|
[
"53c5fac37fb57429d0046b77a491009d7b81607f"
] |
[
"code/ReID_net/Trainer.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport Constants as Constants\nfrom Log import log\nfrom Util import average_gradients, clip_gradients\n\n\nclass Trainer(object):\n\n def __init__(self, config, train_network, test_network, global_step, session):\n self.profile = config.bool(\"profile\", False)\n self.add_grad_checks = config.bool(\"add_grad_checks\", False)\n self.add_numerical_checks = config.bool(\"add_numerical_checks\", False)\n self.measures = config.unicode_list(\"measures\", [])\n self.opt_str = config.str(\"optimizer\", \"adam\").lower()\n self.train_network = train_network\n self.test_network = test_network\n self.session = session\n self.global_step = global_step\n self.validation_step_number = 0\n self.gradient_clipping = config.float(\"gradient_clipping\", -1.0)\n self.optimizer_exclude_prefix = config.str(\n \"optimizer_exclude_prefix\", \"\")\n self.learning_rates = config.int_key_dict(\"learning_rates\")\n self.recursive_training = config.bool(\n Constants.RECURSIVE_TRAINING, False)\n assert 1 in self.learning_rates, \"no initial learning rate specified\"\n self.curr_learning_rate = self.learning_rates[1]\n self.lr_var = tf.placeholder(\n config.dtype, shape=[], name=\"learning_rate\")\n self.loss_scale_var = tf.placeholder_with_default(\n 1.0, shape=[], name=\"loss_scale\")\n self.opt, self.reset_opt_op = self.create_optimizer(config)\n grad_norm = None\n if train_network is not None:\n if train_network.use_partialflow:\n self.prepare_partialflow()\n self.step_op = tf.no_op(\"step\")\n else:\n self.step_op, grad_norm = self.create_step_op()\n if len(self.train_network.update_ops) == 0:\n self.update_ops = []\n else:\n self.update_ops = self.train_network.update_ops\n if self.add_numerical_checks:\n self.update_ops.append(tf.add_check_numerics_ops())\n self.train_targets = self.train_network.raw_labels\n self.train_inputs = self.train_network.inputs\n self.train_network_ys = self.train_network.y_softmax\n if self.train_network_ys is not None and self.train_targets is not None:\n self.train_network_ys = self._adjust_results_to_targets(\n self.train_network_ys, self.train_targets)\n else:\n self.step_op = None\n self.update_ops = None\n\n self.summary_writer, self.summary_op, self.summary_op_test = self.init_summaries(\n config, grad_norm)\n\n if test_network is not None:\n self.test_targets = self.test_network.raw_labels\n self.test_inputs = self.test_network.inputs\n self.test_network_ys = self.test_network.y_softmax\n if self.test_network_ys is not None and self.test_targets is not None:\n self.test_network_ys = self._adjust_results_to_targets(\n self.test_network_ys, self.test_targets)\n\n def create_optimizer(self, config):\n momentum = config.float(\"momentum\", 0.9)\n if self.opt_str == \"sgd_nesterov\":\n return tf.train.MomentumOptimizer(self.lr_var, momentum, use_nesterov=True), None\n elif self.opt_str == \"sgd_momentum\":\n return tf.train.MomentumOptimizer(self.lr_var, momentum), None\n elif self.opt_str == \"sgd\":\n return tf.train.GradientDescentOptimizer(self.lr_var), None\n elif self.opt_str == \"adam\":\n opt = tf.train.AdamOptimizer(self.lr_var)\n all_vars = tf.global_variables()\n opt_vars = [v for v in all_vars if \"Adam\" in v.name]\n reset_opt_op = tf.variables_initializer(\n opt_vars, \"reset_optimizer\")\n return opt, reset_opt_op\n elif self.opt_str == \"yellowfin\":\n from external.yellowfin import YFOptimizer\n return YFOptimizer(sparsity_debias=False), None\n elif self.opt_str == \"none\":\n return None, None\n else:\n assert False, (\"unknown optimizer\", self.opt_str)\n\n def reset_optimizer(self):\n assert self.opt_str == \"adam\", \"reset not implemented for other optimizers yet\"\n assert self.reset_opt_op is not None\n self.session.run(self.reset_opt_op)\n\n def prepare_partialflow(self):\n sm = self.train_network.graph_section_manager\n losses = self.train_network.losses\n regularizers = self.train_network.regularizers\n assert len(losses) == 1\n assert len(regularizers) == 1\n loss = losses[0] + tf.add_n(regularizers[0])\n loss *= self.loss_scale_var\n sm.add_training_ops(self.opt, loss, verbose=False,\n global_step=self.global_step)\n sm.prepare_training()\n # for sec in self.train_network.graph_sections:\n # print sec.get_tensors_to_feed()\n # for sec in self.train_network.graph_sections:\n # print sec.get_tensors_to_cache()\n\n def create_step_op(self):\n if self.opt is None:\n return tf.no_op(\"dummy_step_op\"), None\n\n losses, regularizers, setups = self.train_network.losses, self.train_network.regularizers, \\\n self.train_network.tower_setups\n assert len(losses) == len(regularizers)\n assert all(len(regularizers[0]) == len(x) for x in regularizers)\n regularizers = [tf.add_n(tower_regularizers) if len(tower_regularizers) > 0 else tf.constant(0, tf.float32) for\n tower_regularizers in regularizers]\n losses_with_regularizers = [\n l + r for l, r in zip(losses, regularizers)]\n tower_grads = []\n for l, s in zip(losses_with_regularizers, setups):\n gpu_str = \"/gpu:\" + str(s.gpu)\n with tf.device(gpu_str), tf.name_scope(\"tower_gpu_\" + str(s.gpu) + \"_opt\"):\n var_list = (\n tf.trainable_variables() +\n tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))\n if self.optimizer_exclude_prefix != \"\":\n to_remove = [x.name for x in var_list if x.name.startswith(\n self.optimizer_exclude_prefix)]\n if len(to_remove) > 0:\n print(\"excluding\", to_remove, \"from optimization, since they start with prefix\",\n self.optimizer_exclude_prefix, file=log.v1)\n var_list = [x for x in var_list if not x.name.startswith(\n self.optimizer_exclude_prefix)]\n else:\n print(\"warning, optimizer_exclude_prefix=\", self.optimizer_exclude_prefix, \"is specified,\"\n \" but no variable with this prefix is present in the model\", file=log.v1)\n grads_raw = self.opt.compute_gradients(l, var_list=var_list)\n # filter out gradients w.r.t. disconnected variables\n grads_filtered = [g for g in grads_raw if g[0] is not None]\n tower_grads.append(grads_filtered)\n\n with tf.device(setups[0].variable_device):\n if len(losses) == 1:\n grads = tower_grads[0]\n else:\n # average the gradients over the towers\n grads = average_gradients(tower_grads)\n\n if self.add_grad_checks:\n grads = [(tf.check_numerics(x[0], x[1].name), x[1])\n for x in grads]\n\n # grad clipping\n if self.gradient_clipping != -1:\n grads, norm = clip_gradients(grads, self.gradient_clipping)\n else:\n norm = None\n\n step_op = self.opt.apply_gradients(\n grads, global_step=self.global_step)\n return step_op, norm\n\n def init_summaries(self, config, grad_norm=None):\n summdir = config.dir(\"summary_dir\", \"summaries\")\n model = config.str(\"model\")\n summdir += model + \"/\"\n tf.gfile.MakeDirs(summdir)\n summary_writer = tf.summary.FileWriter(summdir, self.session.graph)\n summary_op = None\n summary_op_test = None\n if config.bool(\"write_summaries\", True):\n if self.train_network is not None and len(self.train_network.summaries) > 0:\n # better do not merge ALL summaries, since otherwise we get summaries from different networks\n # and might execute (parts of) the test network while training\n # self.summary_op = tf.merge_all_summaries()\n # atm we only collect summaries from the train network\n if grad_norm is None:\n summary_op = tf.summary.merge(self.train_network.summaries)\n else:\n #grad_norm = tf.Print(grad_norm, [grad_norm], \"grad_norm\")\n grad_norm_summary = tf.summary.scalar(\n \"grad_norm\", grad_norm)\n summary_op = tf.summary.merge(\n self.train_network.summaries + [grad_norm_summary])\n if self.test_network is not None and len(self.test_network.summaries) > 0:\n summary_op_test = tf.summary.merge(self.test_network.summaries)\n return summary_writer, summary_op, summary_op_test\n\n def get_options(self):\n if self.profile:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n return run_options, run_metadata\n else:\n return None, None\n\n # for profiling\n def handle_run_metadata(self, metadata, step):\n if metadata is None:\n return\n if not self.profile:\n return\n self.summary_writer.add_run_metadata(\n metadata, \"profile%d\" % step, step)\n # leave a few steps for warmup and then write out at the 10th step\n if step == 10:\n from tensorflow.python.client import timeline\n tl = timeline.Timeline(metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n with open('timeline.json', 'w') as f:\n f.write(ctf)\n\n def validation_step(self, _):\n ops = [self.test_network.loss_summed,\n self.test_network.measures_accumulated, self.test_network.n_imgs]\n if 'clicks' in self.measures:\n ops.append(self.test_network.tags)\n\n if self.recursive_training:\n ops.append(self.test_network.tags)\n ops.append(self.test_network_ys)\n ops.append(self.test_targets)\n\n if self.summary_op_test is not None:\n ops.append(self.summary_op_test)\n\n res = self.session.run(ops)\n if self.summary_op_test is not None:\n summary_str = res[-1]\n res = res[:-1]\n self.summary_writer.add_summary(\n summary_str, global_step=self.validation_step_number)\n self.validation_step_number += 1\n\n if len(res) > 4:\n loss_summed, measures_accumulated, n_imgs, tags, ys_val, targets = res\n ys_argmax_val = np.argmax(ys_val, axis=-1)\n return loss_summed, measures_accumulated, n_imgs, tags, ys_argmax_val, targets\n elif len(res) > 3:\n loss_summed, measures_accumulated, n_imgs, tags = res\n measures_accumulated[Constants.CLICKS] = tags\n else:\n loss_summed, measures_accumulated, n_imgs = res\n\n return loss_summed, measures_accumulated, n_imgs\n\n def adjust_learning_rate(self, epoch, learning_rate=None):\n if learning_rate is None:\n key = max(\n [k for k in list(self.learning_rates.keys()) if k <= epoch + 1])\n new_lr = self.learning_rates[key]\n else:\n new_lr = learning_rate\n if self.curr_learning_rate != new_lr:\n print(\"changing learning rate to\", new_lr, file=log.v1)\n self.curr_learning_rate = new_lr\n\n def train_step(self, epoch, feed_dict=None, loss_scale=1.0, learning_rate=None):\n self.adjust_learning_rate(epoch, learning_rate)\n if feed_dict is None:\n feed_dict = {}\n else:\n feed_dict = feed_dict.copy()\n feed_dict[self.lr_var] = self.curr_learning_rate\n feed_dict[self.loss_scale_var] = loss_scale\n\n ops = self.update_ops + [self.global_step, self.step_op, self.train_network.loss_summed,\n self.train_network.measures_accumulated, self.train_network.n_imgs]\n\n if self.recursive_training:\n ops.append(self.train_network.tags)\n ops.append(self.train_network_ys)\n ops.append(self.train_targets)\n elif Constants.CLICKS in self.measures:\n ops.append(self.train_network.tags)\n\n if self.summary_op is not None:\n ops.append(self.summary_op)\n\n if self.train_network.use_partialflow:\n res = self.train_network.graph_section_manager.run_full_cycle(\n self.session, fetches=ops, basic_feed=feed_dict)\n run_metadata = None\n else:\n run_options, run_metadata = self.get_options()\n res = self.session.run(\n ops, feed_dict=feed_dict, options=run_options, run_metadata=run_metadata)\n\n # remove update outputs\n res = res[len(self.update_ops):]\n step = res[0]\n\n if self.summary_op is not None:\n summary_str = res[-1]\n res = res[:-1]\n self.summary_writer.add_summary(summary_str, step)\n\n self.handle_run_metadata(run_metadata, step)\n\n if len(res) > 6:\n _, _, loss_summed, measures_accumulated, n_imgs, tags, ys_val, targets = res\n ys_argmax_val = np.argmax(ys_val, axis=-1)\n return loss_summed, measures_accumulated, n_imgs, tags, ys_argmax_val, targets\n elif len(res) == 6:\n _, _, loss_summed, measures_accumulated, n_imgs, tags = res\n measures_accumulated[Constants.CLICKS] = tags\n else:\n _, _, loss_summed, measures_accumulated, n_imgs = res\n\n return loss_summed, measures_accumulated, n_imgs\n\n def _adjust_results_to_targets(self, y_softmax, targets):\n # scale it up!\n return tf.image.resize_images(y_softmax, tf.shape(targets)[1:3])\n"
] |
[
[
"tensorflow.device",
"tensorflow.RunMetadata",
"tensorflow.global_variables",
"tensorflow.variables_initializer",
"tensorflow.gfile.MakeDirs",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.python.client.timeline.Timeline",
"tensorflow.get_collection",
"tensorflow.placeholder_with_default",
"tensorflow.check_numerics",
"numpy.argmax",
"tensorflow.train.MomentumOptimizer",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.RunOptions",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.no_op",
"tensorflow.add_check_numerics_ops",
"tensorflow.summary.merge",
"tensorflow.summary.FileWriter",
"tensorflow.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
BhanuPrasadKoppineedi/Heart_Disease_Prediction
|
[
"0e4abb3ef42d135344b39456e0d7fa4afafb53e6"
] |
[
"heart_disease_prediction (1).py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"HEART_DISEASE_PREDICTION.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1yc00bQ4VnOoHkCp3NaMbUnL2qCZmz_Xk\n\n***HEART DISESE PREDICTION PROJECT***\n\n**IMPORTING LIBRARIES AND UPLOADING DATA SET**\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom google.colab import files\nuploaded = files.upload()\n\ndf_data = pd.read_csv('heart.csv')\n\ndf_data\n\n\"\"\"**EDA**\"\"\"\n\ndf_data.shape\n\ndf_data.columns\n\ndf_data.head()\n\ndf_data.tail()\n\ndf_data.info()\n\ndf_data.dtypes\n\ndf_data.isnull().sum()\n\ndf_data.iloc[:,-1]\n\ndf_heart=df_data.drop(1)\n\ndf_heart\n\ndf_heart.count()\n\ndf_heart.std()\n\ndf_heart.duplicated()\n\ndf_heart.describe()\n\ndf_heart.corr()\n\n\"\"\"**DATA VISUALISATION**\"\"\"\n\nimport seaborn as sns\ncorr=df_heart.corr()\nsns.heatmap(corr,annot=True)\n\n\"\"\"**BUILDING A MACHINE LEARNING MODEL USING LOGISTIC REGRESSION.**\"\"\"\n\nimport pandas as pd\n\nfrom warnings import simplefilter\n\nsimplefilter(action='ignore', category = FutureWarning)\n\ndf_heart.isnull().sum()\n\ndf_heart['target'] = df_heart.target.map({0: 0, 1: 1, 2: 1, 3: 1, 4: 1})\ndf_heart['sex'] = df_heart.sex.map({0: 'female', 1: 'male'})\ndf_heart['thalach'] = df_heart.thalach.fillna(df_heart.thalach.mean())\ndf_heart['ca'] = df_heart.ca.fillna(df_heart.ca.mean())\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_context(\"paper\", font_scale = 2, rc = {\"font.size\": 20,\"axes.titlesize\": 25,\"axes.labelsize\": 20}) \nsns.catplot(kind = 'count', data = df_heart, x = 'age', hue = 'target', order = df_heart['age'].sort_values().unique())\nplt.title('Variation of Age for each target class')\nplt.show()\n\nsns.catplot(kind = 'bar', data = df_heart, y = 'age', x = 'sex', hue = 'target')\nplt.title('Distribution of age vs sex with the target class')\nplt.show()\n\ndf_heart['sex'] = df_heart.sex.map({'female': 0, 'male': 1})\n\n\"\"\"**Data Preprocessing**\"\"\"\n\nX = df_heart.iloc[:, :-1].values\ny = df_heart.iloc[:, -1].values\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\nfrom sklearn.preprocessing import StandardScaler as ss\nsc = ss()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n\"\"\"**Logistic Reegression**\"\"\"\n\nX = df_heart.iloc[:, :-1].values\ny = df_heart.iloc[:, -1].values\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\nfrom sklearn.linear_model import LogisticRegression\nclassifier = LogisticRegression()\nclassifier.fit(X_train, y_train)\n\ny_pred = classifier.predict(X_test)\n\nfrom sklearn.metrics import classification_report,confusion_matrix\n\nprint(confusion_matrix(y_test,y_pred))\nprint(classification_report(y_test,y_pred))\n\ny_pred_train = classifier.predict(X_train)\ncm_train = confusion_matrix(y_pred_train, y_train)\n\ncm_train\n\nprint('Accuracy for training set for svm = {}'.format((cm_train[0][0] + cm_train[1][1])/len(y_train)))\nprint('Accuracy for test set for svm = {}'.format((cm_test[0][0] + cm_test[1][1])/len(y_test)))\n\n\"\"\"**TESTING**\"\"\"\n\ninput_data = X[0:1]\ninput_data_to_arry = np.asarray(input_data)\n\ninput_data_to_arry_reshape = input_data_to_arry.reshape(1, -1)\n\nprint(input_data_to_arry.shape)\n\ninput_data_to_arry_reshape.shape\n\n\"\"\"WE AARE PASSING TEST CASE WWHOSE TARGET IS 1, THAAT IS, THE PATIENT HAVE HEART DISEASE\"\"\"\n\nprediction = classifier.predict(input_data_to_arry_reshape)\nif (prediction[0] == 1):\n print(\"TEST CASE - PASSED\")\nelse:\n print(\"TEST CASE - FAILED\")\n\nprint(prediction)\n\nprint(y[0])\n\n\"\"\"**Created By - K BHANU PRASAD**\n\n**THE END**\n\"\"\""
] |
[
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.title",
"numpy.asarray",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.confusion_matrix",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"sklearn.metrics.classification_report"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
randompast/cusignal
|
[
"30f2fa9884b6dac39c20e1d06e100a7ee5c41e38"
] |
[
"python/cusignal/test/utils.py"
] |
[
"# Copyright (c) 2019-2020, NVIDIA CORPORATION.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest_benchmark\n\n\ndef array_equal(a, b, rtol=1e-7, atol=1e-5):\n if a.dtype == np.float32 or a.dtype == np.complex64:\n rtol = 1e-3\n atol = 1e-3\n\n np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)\n\n\ndef _check_rapids_pytest_benchmark():\n try:\n from rapids_pytest_benchmark import setFixtureParamNames\n except ImportError:\n print(\n \"\\n\\nWARNING: rapids_pytest_benchmark is not installed, \"\n \"falling back to pytest_benchmark fixtures.\\n\"\n )\n\n # if rapids_pytest_benchmark is not available, just perfrom time-only\n # benchmarking and replace the util functions with nops\n gpubenchmark = pytest_benchmark.plugin.benchmark\n\n def setFixtureParamNames(*args, **kwargs):\n pass\n\n return gpubenchmark\n"
] |
[
[
"numpy.testing.assert_allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ljmartin/molrec
|
[
"cff25d831710e589a4892b32545afd1ef1097442"
] |
[
"3_time_split/unused/mean_median_figure.py"
] |
[
"import matplotlib.pyplot as plt\nimport pymc3 as pm\nfrom scipy.special import logit, expit\nfrom scipy.stats import gaussian_kde, laplace, norm\nimport numpy as np \nplt.style.use('seaborn')\n\n\nfilenames = ['label_correlation', 'hpo_implicit_als', 'hpo_implicit_bpr',\n 'hpo_lightfm_warp', 'hpo_lightfm_warp_fp', 'hpo_lightfm_bpr', 'nearest_neighbor']\n\nyear = 2015\n\ndef plot_fig_label(ax, lab):\n ax.text(0, 1.15, lab, transform=ax.transAxes,\n fontsize=24, va='top', ha='left')\n\ndef simple_ci(data):\n d_sorted = np.sort(data)\n low = int(d_sorted.shape[0] // (1/0.025))\n high = int(d_sorted.shape[0] // (1/0.975))\n return (d_sorted[low], d_sorted[high])\n\ndef plot_meanmed(nn=False):\n def simple_bootstrap(data, dist=norm, n=1000, take = 350):\n samples = np.random.choice(data, size=(n,take))\n estimates = [dist.fit(i)[0] for i in samples]\n return np.array(estimates)\n \n fig, ax = plt.subplots(nrows=2, ncols=1)\n fig.set_figheight(10)\n fig.set_figwidth(7)\n if nn:\n nnranks = np.load('./processed_data/2015_nearest_neighbor.npy')\n mask = nnranks>3\n \n for count, name in enumerate(filenames):\n #load\n print(name)\n ranks = np.load('./processed_data/'+str(year)+'_'+name+'.npy')\n if nn:\n ranks = ranks[mask]\n \n\n for a, fun in zip([0,1], [norm, laplace]):\n #analyse\n #logit transform ranks:\n logit_ranks = logit(ranks / 344)\n\n bstrap_ = simple_bootstrap(logit_ranks, fun, take=len(ranks))\n bstrap = expit(bstrap_)*344\n \n ci = simple_ci(bstrap)\n\n print(name, expit(np.mean(bstrap_))*344, ci)\n\n sjitter = np.abs(np.random.randn(len(bstrap))) / 10\n ljitter = np.random.randn(len(bstrap))/20\n ax[a].scatter(count+sjitter+0.15, bstrap+ljitter, alpha=0.05)\n \n \n ax[a].plot([count,count], [ci[0], ci[1]], lw=5.5, c='C'+str(count),zorder=10)\n ax[a].scatter([count], [np.mean(ci)], \n facecolor='white', \n edgecolor='C'+str(count),\n lw=3.5,\n zorder=20)\n \n fsize = 14\n \n \n for a in ax:\n a.plot([0,0], [1e6, 1e6+1], lw=5.5, c='k',zorder=10, label='95% CI')\n a.scatter([0], [1e6], c='k', alpha=0.6, label='Bootstrap estimates')\n \n ax[1].set_ylim(0,26)\n ax[1].legend()\n ax[1].set_ylabel('Median rank', fontsize=fsize)\n \n ax[1].set_xticks([0,1,2,3,4,5, 6])\n ax[1].set_xticklabels([i.replace(\"hpo_\", '') for i in filenames], rotation=35, ha='center',fontsize=fsize)\n \n ax[0].set_xticks([0,1,2,3,4,5, 6])\n ax[0].set_xticklabels(['' for _ in filenames], rotation=65, ha='center', fontsize=fsize)\n\n\n plot_fig_label(ax[0], 'A.')\n plot_fig_label(ax[1], 'B.')\n\n ax[0].legend()\n ax[0].set_ylim(0,25)\n ax[0].set_xlabel('', fontsize=fsize)\n ax[0].set_ylabel('Mean rank', fontsize=fsize)\n \n \n plt.tight_layout()\n\n return fig, ax\n\n\n\nfig, ax = plot_meanmed()\nfig.savefig('./figures/mean_median.png')\nfig.savefig('./figures/mean_median.tif')\nfig.savefig('./figures/mean_median.svg')\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"scipy.special.expit",
"numpy.random.choice",
"matplotlib.pyplot.subplots",
"numpy.sort",
"scipy.special.logit",
"numpy.mean",
"numpy.load",
"numpy.array",
"matplotlib.pyplot.style.use"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arch1baald/dlcourse_ai
|
[
"cb55a4de241c58dffa2331ba17326e4ef244694c"
] |
[
"assignments/assignment3/layers.py"
] |
[
"import numpy as np\n\n\ndef l2_regularization(W, reg_strength):\n '''\n Computes L2 regularization loss on weights and its gradient\n\n Arguments:\n W, np array - weights\n reg_strength - float value\n\n Returns:\n loss, single value - l2 regularization loss\n gradient, np.array same shape as W - gradient of weight by l2 loss\n '''\n W2 = W * W\n loss = reg_strength * np.sum(W2)\n grad = 2 * reg_strength * W\n return loss, grad\n\n\ndef softmax_with_cross_entropy(logits, target_index):\n \"\"\"\n Computes softmax and cross-entropy loss for model predictions,\n including the gradient\n\n Arguments:\n logits, np array, shape is either (N) or (batch_size, N) -\n classifier output\n target_index: np array of int, shape is (1) or (batch_size) -\n index of the true class for given sample(s)\n\n Returns:\n loss, single value - cross-entropy loss\n grad, np array same shape as predictions - gradient of predictions by loss value\n \"\"\"\n if len(logits.shape) == 1:\n logits = logits.copy().reshape(1, -1)\n \n probs = softmax(logits) \n loss = cross_entropy_loss(probs, target_index)\n b = probs.shape[0]\n k = probs.shape[1]\n target = np.zeros((b, k), dtype=np.float)\n if isinstance(target_index, int):\n target[np.arange(b), target_index] = 1.\n grad = probs - target\n grad = grad.reshape(k, )\n else:\n target[np.arange(b), target_index.flatten()] = 1.\n grad = (probs - target) / b\n return loss, grad\n\n\nclass Param:\n '''\n Trainable parameter of the model\n Captures both parameter value and the gradient\n '''\n def __init__(self, value):\n self.value = value\n self.grad = np.zeros_like(value)\n\n \nclass ReLULayer:\n def __init__(self):\n self.name = 'ReLU'\n self.x = None\n\n def forward(self, X):\n self.x = X\n result = X.copy()\n result[result < 0] = 0\n return result\n\n def backward(self, d_out):\n \"\"\"\n Backward pass\n\n Arguments:\n d_out, np array (batch_size, num_features) - gradient\n of loss function with respect to output\n\n Returns:\n d_result: np array (batch_size, num_features) - gradient\n with respect to input\n \"\"\"\n def heaviside(x):\n res = x.copy()\n res[res > 0] = 1\n res[res <= 0] = 0\n return res\n \n dx = d_out * heaviside(self.x) # dL / dx\n return dx\n\n def params(self):\n return {}\n \n def clear_grads(self):\n pass\n\n\nclass FullyConnectedLayer:\n def __init__(self, n_input, n_output):\n self.name = 'FC'\n self.W = Param(0.001 * np.random.randn(n_input, n_output))\n self.B = Param(0.001 * np.random.randn(1, n_output))\n self.X = None\n\n def forward(self, X):\n self.X = X\n result = np.dot(X, self.W.value) + self.B.value\n return result\n\n def backward(self, d_out):\n \"\"\"\n Backward pass\n Computes gradient with respect to input and\n accumulates gradients within self.W and self.B\n\n Arguments:\n d_out, np array (batch_size, n_output) - gradient\n of loss function with respect to output\n\n Returns:\n d_result: np array (batch_size, n_input) - gradient\n with respect to input\n \"\"\"\n d_input = np.dot(d_out, self.W.value.T)\n dW = np.dot(self.X.T, d_out)\n batch_size = d_out.shape[0]\n dB = np.dot(np.ones((1, batch_size)), d_out)\n self.W.grad += dW\n self.B.grad += dB\n return d_input\n\n def params(self):\n return {'W': self.W, 'B': self.B}\n \n def clear_grads(self):\n self.W.grad = np.zeros(self.W.value.shape)\n self.B.grad = np.zeros(self.B.value.shape)\n\n\nclass ConvolutionalLayer:\n def __init__(self, in_channels, n_filters,\n filter_size, padding, stride=1):\n '''\n Initializes the layer\n \n Arguments:\n in_channels, int - number of input channels\n n_filters, int - number of output channels\n filter_size, int - size of the conv filter\n padding, int - number of 'pixels' to pad on each side\n '''\n self.X = None\n self.filter_size = filter_size\n self.in_channels = in_channels\n self.n_filters = n_filters\n self.W = Param(np.random.randn(\n # Здесь n_filters в конце, а не в начале!\n filter_size, filter_size, in_channels, n_filters\n ))\n self.B = Param(np.zeros(n_filters))\n self.padding = padding\n self.stride = stride\n\n def forward(self, X):\n self.X = X.copy()\n batch_size, input_height, input_width, in_channels = X.shape\n out_height, out_width = self.get_output_shape(input_height, input_width)\n \n # В итоге z будет иметь другую размерность, см. конец ф-ии!\n z = np.zeros((batch_size, self.n_filters, out_height, out_width))\n for y in range(out_height):\n for x in range(out_width):\n x_start = x * self.stride\n x_end = x_start + self.filter_size\n y_start = y * self.stride\n y_end = y_start + self.filter_size\n # Не забыть про reshape!\n w = self.W.value.reshape(self.n_filters, self.filter_size, self.filter_size, in_channels)\n filtered = w * X[:, np.newaxis, y_start:y_end, x_start:x_end, :]\n z[:, :, y, x] = np.sum(filtered, axis=(2, 3, 4))\n\n # n_filters в конце, а не после batch_size, чтобы работало суммирование с self.B\n z = z.reshape(batch_size, out_height, out_width, self.n_filters)\n z += self.B.value\n return z\n\n def backward(self, d_out):\n # Hint: Forward pass was reduced to matrix multiply\n # You already know how to backprop through that\n # when you implemented FullyConnectedLayer\n # Just do it the same number of times and accumulate gradients\n if self.X is None:\n raise Exception('Backward before forward')\n\n batch_size, height, width, channels = self.X.shape\n _, out_height, out_width, n_filters = d_out.shape\n\n # TODO: Implement backward pass\n # Same as forward, setup variables of the right shape that\n # aggregate input gradient and fill them for every location\n # of the output\n\n # Try to avoid having any other loops here too\n \n d_input = np.zeros_like(self.X).astype(np.float)\n # Не проебать момент, что в forward reshape W!\n for y in range(out_height):\n for x in range(out_width):\n # TODO: Implement backward pass for specific location\n # Aggregate gradients for both the input and\n # the parameters (W and B)\n x_start = x * self.stride\n x_end = x_start + self.filter_size\n y_start = y * self.stride\n y_end = y_start + self.filter_size\n# https://github.com/SkalskiP/ILearnDeepLearning.py/blob/master/01_mysteries_of_neural_networks/06_numpy_convolutional_neural_net/src/layers/convolutional.py#L103\n# Возможно стоит задить хуй на сведение к FC и сделать как просто в Conv ^\n X_local = self.X[:, y_start:y_end, x_start:x_end, :]\n original_local_shape = X_local.shape\n hwc = self.filter_size * self.filter_size * self.in_channels\n X_local = X_local.reshape(batch_size, hwc)\n w = self.W.value.reshape(hwc, self.n_filters)\n b = self.B.value.reshape(1, self.n_filters)\n fc = FullyConnectedLayer(*w.shape)\n fc.X = X_local\n fc.W.value = w\n fc.B.value = b\n d_local_out = d_out[:, y, x, :]\n \n d_local_input = fc.backward(d_local_out)\n # Полный проеб\n d_local_input = d_local_input.reshape(*original_local_shape)\n d_input[:, y_start:y_end, x_start:x_end, :] += d_local_input\n# # Где-то проеб с порядком индексов в 2х2, в 3х3 вообще не работает\n w_grad = fc.W.grad.reshape(*self.W.grad.shape)\n# w_grad = fc.W.grad.reshape(self.n_filters, self.filter_size, self.filter_size, self.in_channels)\n# w_grad = fc.W.grad.reshape(self.filter_size, self.filter_size, self.in_channels, self.n_filters)\n self.W.grad += w_grad\n # Все окей\n b_grad = fc.B.grad.reshape(*self.B.grad.shape)\n self.B.grad += b_grad\n return d_input\n\n def params(self):\n return { 'W': self.W, 'B': self.B }\n\n def get_output_shape(self, input_height, input_width):\n out_height = 1 + (input_height - self.filter_size + 2 * self.padding) // self.stride\n out_width = 1 + (input_width - self.filter_size + 2 * self.padding) // self.stride\n return out_height, out_width\n\n\nclass MaxPoolingLayer:\n def __init__(self, pool_size, stride):\n '''\n Initializes the max pool\n\n Arguments:\n pool_size, int - area to pool\n stride, int - step size between pooling windows\n '''\n self.pool_size = pool_size\n self.stride = stride\n self.X = None\n\n def forward(self, X):\n batch_size, height, width, channels = X.shape\n # TODO: Implement maxpool forward pass\n # Hint: Similarly to Conv layer, loop on\n # output x/y dimension\n raise Exception(\"Not implemented!\")\n\n def backward(self, d_out):\n # TODO: Implement maxpool backward pass\n batch_size, height, width, channels = self.X.shape\n raise Exception(\"Not implemented!\")\n\n def params(self):\n return {}\n\n\nclass Flattener:\n def __init__(self):\n self.X_shape = None\n\n def forward(self, X):\n batch_size, height, width, channels = X.shape\n\n # TODO: Implement forward pass\n # Layer should return array with dimensions\n # [batch_size, hight*width*channels]\n raise Exception(\"Not implemented!\")\n\n def backward(self, d_out):\n # TODO: Implement backward pass\n raise Exception(\"Not implemented!\")\n\n def params(self):\n # No params!\n return {}\n"
] |
[
[
"numpy.dot",
"numpy.arange",
"numpy.ones",
"numpy.zeros_like",
"numpy.random.randn",
"numpy.zeros",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
windstamp/PaddleSeg
|
[
"828808ea306adf2e8b94c291b77e7b7cf558bc2a",
"828808ea306adf2e8b94c291b77e7b7cf558bc2a",
"828808ea306adf2e8b94c291b77e7b7cf558bc2a"
] |
[
"pdseg/metrics.py",
"contrib/HumanSeg/bg_replace.py",
"slim/prune/eval_prune.py"
] |
[
"# coding: utf8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\n\nclass ConfusionMatrix(object):\n \"\"\"\n Confusion Matrix for segmentation evaluation\n \"\"\"\n\n def __init__(self, num_classes=2, streaming=False):\n self.confusion_matrix = np.zeros([num_classes, num_classes],\n dtype='int64')\n self.num_classes = num_classes\n self.streaming = streaming\n\n def calculate(self, pred, label, ignore=None):\n # If not in streaming mode, clear matrix everytime when call `calculate`\n if not self.streaming:\n self.zero_matrix()\n\n label = np.transpose(label, (0, 2, 3, 1))\n ignore = np.transpose(ignore, (0, 2, 3, 1))\n mask = np.array(ignore) == 1\n\n label = np.asarray(label)[mask]\n pred = np.asarray(pred)[mask]\n one = np.ones_like(pred)\n # Accumuate ([row=label, col=pred], 1) into sparse matrix\n spm = csr_matrix((one, (label, pred)),\n shape=(self.num_classes, self.num_classes))\n spm = spm.todense()\n self.confusion_matrix += spm\n\n def zero_matrix(self):\n \"\"\" Clear confusion matrix \"\"\"\n self.confusion_matrix = np.zeros([self.num_classes, self.num_classes],\n dtype='int64')\n\n def mean_iou(self):\n iou_list = []\n avg_iou = 0\n # TODO: use numpy sum axis api to simpliy\n vji = np.zeros(self.num_classes, dtype=int)\n vij = np.zeros(self.num_classes, dtype=int)\n for j in range(self.num_classes):\n v_j = 0\n for i in range(self.num_classes):\n v_j += self.confusion_matrix[j][i]\n vji[j] = v_j\n\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n for c in range(self.num_classes):\n total = vji[c] + vij[c] - self.confusion_matrix[c][c]\n if total == 0:\n iou = 0\n else:\n iou = float(self.confusion_matrix[c][c]) / total\n avg_iou += iou\n iou_list.append(iou)\n avg_iou = float(avg_iou) / float(self.num_classes)\n return np.array(iou_list), avg_iou\n\n def accuracy(self):\n total = self.confusion_matrix.sum()\n total_right = 0\n for c in range(self.num_classes):\n total_right += self.confusion_matrix[c][c]\n if total == 0:\n avg_acc = 0\n else:\n avg_acc = float(total_right) / total\n\n vij = np.zeros(self.num_classes, dtype=int)\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n acc_list = []\n for c in range(self.num_classes):\n if vij[c] == 0:\n acc = 0\n else:\n acc = self.confusion_matrix[c][c] / float(vij[c])\n acc_list.append(acc)\n return np.array(acc_list), avg_acc\n\n def kappa(self):\n vji = np.zeros(self.num_classes)\n vij = np.zeros(self.num_classes)\n for j in range(self.num_classes):\n v_j = 0\n for i in range(self.num_classes):\n v_j += self.confusion_matrix[j][i]\n vji[j] = v_j\n\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n total = self.confusion_matrix.sum()\n\n # avoid spillovers\n # TODO: is it reasonable to hard code 10000.0?\n total = float(total) / 10000.0\n vji = vji / 10000.0\n vij = vij / 10000.0\n\n tp = 0\n tc = 0\n for c in range(self.num_classes):\n tp += vji[c] * vij[c]\n tc += self.confusion_matrix[c][c]\n\n tc = tc / 10000.0\n pe = tp / (total * total)\n po = tc / total\n\n kappa = (po - pe) / (1 - pe)\n return kappa\n",
"# coding: utf8\n# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport os.path as osp\nimport cv2\nimport numpy as np\n\nfrom utils.humanseg_postprocess import postprocess, threshold_mask\nimport models\nimport transforms\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='HumanSeg inference for video')\n parser.add_argument(\n '--model_dir',\n dest='model_dir',\n help='Model path for inference',\n type=str)\n parser.add_argument(\n '--image_path',\n dest='image_path',\n help='Image including human',\n type=str,\n default=None)\n parser.add_argument(\n '--background_image_path',\n dest='background_image_path',\n help='Background image for replacing',\n type=str,\n default=None)\n parser.add_argument(\n '--video_path',\n dest='video_path',\n help='Video path for inference',\n type=str,\n default=None)\n parser.add_argument(\n '--background_video_path',\n dest='background_video_path',\n help='Background video path for replacing',\n type=str,\n default=None)\n parser.add_argument(\n '--save_dir',\n dest='save_dir',\n help='The directory for saving the inference results',\n type=str,\n default='./output')\n parser.add_argument(\n \"--image_shape\",\n dest=\"image_shape\",\n help=\"The image shape for net inputs.\",\n nargs=2,\n default=[192, 192],\n type=int)\n\n return parser.parse_args()\n\n\ndef predict(img, model, test_transforms):\n model.arrange_transform(transforms=test_transforms, mode='test')\n img, im_info = test_transforms(img)\n img = np.expand_dims(img, axis=0)\n result = model.exe.run(\n model.test_prog,\n feed={'image': img},\n fetch_list=list(model.test_outputs.values()))\n score_map = result[1]\n score_map = np.squeeze(score_map, axis=0)\n score_map = np.transpose(score_map, (1, 2, 0))\n return score_map, im_info\n\n\ndef recover(img, im_info):\n keys = list(im_info.keys())\n for k in keys[::-1]:\n if k == 'shape_before_resize':\n h, w = im_info[k][0], im_info[k][1]\n img = cv2.resize(img, (w, h), cv2.INTER_LINEAR)\n elif k == 'shape_before_padding':\n h, w = im_info[k][0], im_info[k][1]\n img = img[0:h, 0:w]\n return img\n\n\ndef bg_replace(score_map, img, bg):\n h, w, _ = img.shape\n bg = cv2.resize(bg, (w, h))\n score_map = np.repeat(score_map[:, :, np.newaxis], 3, axis=2)\n comb = (score_map * img + (1 - score_map) * bg).astype(np.uint8)\n return comb\n\n\ndef infer(args):\n resize_h = args.image_shape[1]\n resize_w = args.image_shape[0]\n\n test_transforms = transforms.Compose(\n [transforms.Resize((resize_w, resize_h)),\n transforms.Normalize()])\n model = models.load_model(args.model_dir)\n\n if not osp.exists(args.save_dir):\n os.makedirs(args.save_dir)\n\n # 图像背景替换\n if args.image_path is not None:\n if not osp.exists(args.image_path):\n raise Exception('The --image_path is not existed: {}'.format(\n args.image_path))\n if args.background_image_path is None:\n raise Exception('The --background_image_path is not set. Please set it')\n else:\n if not osp.exists(args.background_image_path):\n raise Exception('The --background_image_path is not existed: {}'.format(\n args.background_image_path))\n img = cv2.imread(args.image_path)\n score_map, im_info = predict(img, model, test_transforms)\n score_map = score_map[:, :, 1]\n score_map = recover(score_map, im_info)\n bg = cv2.imread(args.background_image_path)\n save_name = osp.basename(args.image_path)\n save_path = osp.join(args.save_dir, save_name)\n result = bg_replace(score_map, img, bg)\n cv2.imwrite(save_path, result)\n\n # 视频背景替换,如果提供背景视频则以背景视频作为背景,否则采用提供的背景图片\n else:\n is_video_bg = False\n if args.background_video_path is not None:\n if not osp.exists(args.background_video_path):\n raise Exception('The --background_video_path is not existed: {}'.format(\n args.background_video_path))\n is_video_bg = True\n elif args.background_image_path is not None:\n if not osp.exists(args.background_image_path):\n raise Exception('The --background_image_path is not existed: {}'.format(\n args.background_image_path))\n else:\n raise Exception(\n 'Please offer backgound image or video. You should set --backbground_iamge_paht or --background_video_path'\n )\n\n disflow = cv2.DISOpticalFlow_create(\n cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)\n prev_gray = np.zeros((resize_h, resize_w), np.uint8)\n prev_cfd = np.zeros((resize_h, resize_w), np.float32)\n is_init = True\n if args.video_path is not None:\n print('Please wait. It is computing......')\n if not osp.exists(args.video_path):\n raise Exception('The --video_path is not existed: {}'.format(\n args.video_path))\n\n cap_video = cv2.VideoCapture(args.video_path)\n fps = cap_video.get(cv2.CAP_PROP_FPS)\n width = int(cap_video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap_video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n save_name = osp.basename(args.video_path)\n save_name = save_name.split('.')[0]\n save_path = osp.join(args.save_dir, save_name + '.avi')\n\n cap_out = cv2.VideoWriter(\n save_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps,\n (width, height))\n\n if is_video_bg:\n cap_bg = cv2.VideoCapture(args.background_video_path)\n frames_bg = cap_bg.get(cv2.CAP_PROP_FRAME_COUNT)\n current_frame_bg = 1\n else:\n img_bg = cv2.imread(args.background_image_path)\n while cap_video.isOpened():\n ret, frame = cap_video.read()\n if ret:\n score_map, im_info = predict(frame, model, test_transforms)\n cur_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))\n score_map = 255 * score_map[:, :, 1]\n optflow_map = postprocess(cur_gray, score_map, prev_gray, prev_cfd, \\\n disflow, is_init)\n prev_gray = cur_gray.copy()\n prev_cfd = optflow_map.copy()\n is_init = False\n optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)\n optflow_map = threshold_mask(\n optflow_map, thresh_bg=0.2, thresh_fg=0.8)\n score_map = recover(optflow_map, im_info)\n\n #循环读取背景帧\n if is_video_bg:\n ret_bg, frame_bg = cap_bg.read()\n if ret_bg:\n if current_frame_bg == frames_bg:\n current_frame_bg = 1\n cap_bg.set(cv2.CAP_PROP_POS_FRAMES, 0)\n else:\n break\n current_frame_bg += 1\n comb = bg_replace(score_map, frame, frame_bg)\n else:\n comb = bg_replace(score_map, frame, img_bg)\n\n cap_out.write(comb)\n else:\n break\n\n if is_video_bg:\n cap_bg.release()\n cap_video.release()\n cap_out.release()\n\n # 当没有输入预测图像和视频的时候,则打开摄像头\n else:\n cap_video = cv2.VideoCapture(0)\n if not cap_video.isOpened():\n raise IOError(\"Error opening video stream or file, \"\n \"--video_path whether existing: {}\"\n \" or camera whether working\".format(\n args.video_path))\n return\n\n if is_video_bg:\n cap_bg = cv2.VideoCapture(args.background_video_path)\n frames_bg = cap_bg.get(cv2.CAP_PROP_FRAME_COUNT)\n current_frame_bg = 1\n else:\n img_bg = cv2.imread(args.background_image_path)\n while cap_video.isOpened():\n ret, frame = cap_video.read()\n if ret:\n score_map, im_info = predict(frame, model, test_transforms)\n cur_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))\n score_map = 255 * score_map[:, :, 1]\n optflow_map = postprocess(cur_gray, score_map, prev_gray, prev_cfd, \\\n disflow, is_init)\n prev_gray = cur_gray.copy()\n prev_cfd = optflow_map.copy()\n is_init = False\n optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)\n optflow_map = threshold_mask(\n optflow_map, thresh_bg=0.2, thresh_fg=0.8)\n score_map = recover(optflow_map, im_info)\n\n #循环读取背景帧\n if is_video_bg:\n ret_bg, frame_bg = cap_bg.read()\n if ret_bg:\n if current_frame_bg == frames_bg:\n current_frame_bg = 1\n cap_bg.set(cv2.CAP_PROP_POS_FRAMES, 0)\n else:\n break\n current_frame_bg += 1\n comb = bg_replace(score_map, frame, frame_bg)\n else:\n comb = bg_replace(score_map, frame, img_bg)\n cv2.imshow('HumanSegmentation', comb)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n if is_video_bg:\n cap_bg.release()\n cap_video.release()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n infer(args)\n",
"# coding: utf8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n# GPU memory garbage collection optimization flags\nos.environ['FLAGS_eager_delete_tensor_gb'] = \"0.0\"\n\nimport sys\n\nLOCAL_PATH = os.path.dirname(os.path.abspath(__file__))\nSEG_PATH = os.path.join(LOCAL_PATH, \"../../\", \"pdseg\")\nsys.path.append(SEG_PATH)\n\nimport time\nimport argparse\nimport functools\nimport pprint\nimport cv2\nimport numpy as np\nimport paddle\nimport paddle.fluid as fluid\n\nfrom utils.config import cfg\nfrom utils.timer import Timer, calculate_eta\nfrom models.model_builder import build_model\nfrom models.model_builder import ModelPhase\nfrom reader import SegDataset\nfrom metrics import ConfusionMatrix\n\nfrom paddleslim.prune import load_model\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PaddleSeg model evalution')\n parser.add_argument(\n '--cfg',\n dest='cfg_file',\n help='Config file for training (and optionally testing)',\n default=None,\n type=str)\n parser.add_argument(\n '--use_gpu',\n dest='use_gpu',\n help='Use gpu or cpu',\n action='store_true',\n default=False)\n parser.add_argument(\n '--use_mpio',\n dest='use_mpio',\n help='Use multiprocess IO or not',\n action='store_true',\n default=False)\n parser.add_argument(\n 'opts',\n help='See utils/config.py for all options',\n default=None,\n nargs=argparse.REMAINDER)\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\ndef evaluate(cfg, ckpt_dir=None, use_gpu=False, use_mpio=False, **kwargs):\n np.set_printoptions(precision=5, suppress=True)\n\n startup_prog = fluid.Program()\n test_prog = fluid.Program()\n dataset = SegDataset(\n file_list=cfg.DATASET.VAL_FILE_LIST,\n mode=ModelPhase.EVAL,\n data_dir=cfg.DATASET.DATA_DIR)\n\n def data_generator():\n #TODO: check is batch reader compatitable with Windows\n if use_mpio:\n data_gen = dataset.multiprocess_generator(\n num_processes=cfg.DATALOADER.NUM_WORKERS,\n max_queue_size=cfg.DATALOADER.BUF_SIZE)\n else:\n data_gen = dataset.generator()\n\n for b in data_gen:\n yield b[0], b[1], b[2]\n\n data_loader, avg_loss, pred, grts, masks = build_model(\n test_prog, startup_prog, phase=ModelPhase.EVAL)\n\n data_loader.set_sample_generator(\n data_generator, drop_last=False, batch_size=cfg.BATCH_SIZE)\n\n # Get device environment\n places = fluid.cuda_places() if use_gpu else fluid.cpu_places()\n place = places[0]\n dev_count = len(places)\n print(\"#Device count: {}\".format(dev_count))\n\n exe = fluid.Executor(place)\n exe.run(startup_prog)\n\n test_prog = test_prog.clone(for_test=True)\n\n ckpt_dir = cfg.TEST.TEST_MODEL if not ckpt_dir else ckpt_dir\n\n if not os.path.exists(ckpt_dir):\n raise ValueError('The TEST.TEST_MODEL {} is not found'.format(ckpt_dir))\n\n if ckpt_dir is not None:\n print('load test model:', ckpt_dir)\n load_model(exe, test_prog, ckpt_dir)\n\n # Use streaming confusion matrix to calculate mean_iou\n np.set_printoptions(\n precision=4, suppress=True, linewidth=160, floatmode=\"fixed\")\n conf_mat = ConfusionMatrix(cfg.DATASET.NUM_CLASSES, streaming=True)\n fetch_list = [avg_loss.name, pred.name, grts.name, masks.name]\n num_images = 0\n step = 0\n all_step = cfg.DATASET.TEST_TOTAL_IMAGES // cfg.BATCH_SIZE + 1\n timer = Timer()\n timer.start()\n data_loader.start()\n while True:\n try:\n step += 1\n loss, pred, grts, masks = exe.run(\n test_prog, fetch_list=fetch_list, return_numpy=True)\n\n loss = np.mean(np.array(loss))\n\n num_images += pred.shape[0]\n conf_mat.calculate(pred, grts, masks)\n _, iou = conf_mat.mean_iou()\n _, acc = conf_mat.accuracy()\n\n speed = 1.0 / timer.elapsed_time()\n\n print(\n \"[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} step/sec={:.2f} | ETA {}\"\n .format(step, loss, acc, iou, speed,\n calculate_eta(all_step - step, speed)))\n timer.restart()\n sys.stdout.flush()\n except fluid.core.EOFException:\n break\n\n category_iou, avg_iou = conf_mat.mean_iou()\n category_acc, avg_acc = conf_mat.accuracy()\n print(\"[EVAL]#image={} acc={:.4f} IoU={:.4f}\".format(\n num_images, avg_acc, avg_iou))\n print(\"[EVAL]Category IoU:\", category_iou)\n print(\"[EVAL]Category Acc:\", category_acc)\n print(\"[EVAL]Kappa:{:.4f}\".format(conf_mat.kappa()))\n\n return category_iou, avg_iou, category_acc, avg_acc\n\n\ndef main():\n args = parse_args()\n if args.cfg_file is not None:\n cfg.update_from_file(args.cfg_file)\n if args.opts:\n cfg.update_from_list(args.opts)\n cfg.check_and_infer()\n print(pprint.pformat(cfg))\n evaluate(cfg, **args.__dict__)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.ones_like",
"numpy.asarray",
"scipy.sparse.csr_matrix",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
],
[
"numpy.expand_dims",
"numpy.squeeze",
"numpy.transpose",
"numpy.repeat",
"numpy.zeros"
],
[
"numpy.set_printoptions",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ikabanen/movement_recognition_and_record
|
[
"b2f31c40b8708509c15d3fed98f355835b887fa4"
] |
[
"movement_recogn_and_record.py"
] |
[
"import cv2\r\nimport time\r\nimport pandas\r\nfrom datetime import datetime\r\nimport numpy as np\r\n\r\nfirst_frame = None\r\nstatus_list = [None, None]\r\ntimes = []\r\ndf = pandas.DataFrame(columns=[\"Start\", \"End\"])\r\nvideo = cv2.VideoCapture(0)\r\n\r\n# Define the codec and create VideoWriter object\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))\r\nframe_number = 0\r\n\r\nwhile True:\r\n ret, frame = video.read()\r\n frame_number += 1\r\n status = 0\r\n if not ret:\r\n print(\"Can't receive video\")\r\n break\r\n\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\r\n\r\n if first_frame is None:\r\n first_frame = gray\r\n continue\r\n\r\n delta_frame = cv2.absdiff(first_frame, gray)\r\n thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]\r\n thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)\r\n\r\n (cnts, _) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for contour in cnts:\r\n if cv2.contourArea(contour) < 1000:\r\n continue\r\n status = 1\r\n\r\n (x, y, w, h) = cv2.boundingRect(contour)\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)\r\n status_list.append(status)\r\n if status_list[-1] == 1 and status_list[-2] == 0:\r\n times.append(datetime.now().strftime(\"%I-%M-%S_%p\"))\r\n\r\n if status_list[-1] == 0 and status_list[-2] == 1:\r\n times.append(datetime.now().strftime(\"%I-%M-%S_%p\"))\r\n\r\n cv2.imshow(\"Color Frame\", frame)\r\n out.write(frame)\r\n\r\n key = cv2.waitKey(1)\r\n\r\n if key == ord('q'):\r\n if status == 1:\r\n times.append(datetime.now().strftime(\"%I-%M-%S_%p\"))\r\n break\r\nprint(status_list)\r\nprint(times)\r\n\r\nfor i in range(0, len(times), 2):\r\n df = df.append({\"Start\": times[i], \"End\": times[i + 1]}, ignore_index=True)\r\nfile_name = datetime.now().strftime(\"%d-%m-%Y_%I-%M-%S_%p\")\r\ndf.to_csv(str(file_name) + \".csv\")\r\n\r\nvideo.release()\r\nout.release()\r\ncv2.destroyAllWindows()\r\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Gavin-G219/tensorflow
|
[
"9e0fa9578638f9147c0b180e6ea89d67d5c0bae3",
"5eb3d92fc5d7a0641ad5d1ad2b54870b6e5b5e58"
] |
[
"tensorflow/python/kernel_tests/control_flow_ops_py_test.py",
"tensorflow/python/autograph/impl/conversion.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=g-long-lambda\n\"\"\"Tests for tensorflow.ops.control_flow_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function as eager_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import cond_v2 # pylint: disable=unused-import\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_control_flow_ops\nfrom tensorflow.python.ops import gen_data_flow_ops\nfrom tensorflow.python.ops import gen_logging_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import linalg_ops\nfrom tensorflow.python.ops import logging_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops import while_v2 # pylint: disable=unused-import\n# pylint: disable=unused-import\nimport tensorflow.python.ops.tensor_array_grad\n# pylint: enable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.util import nest\n\n\ndef check_consumers(graph):\n \"\"\"Sanity check on the consumer list of the tensors.\"\"\"\n\n consumer_count = {}\n for op in graph.get_operations():\n for v in op.inputs:\n cnt = consumer_count.get(v, 0)\n consumer_count[v] = cnt + 1\n for k, v in consumer_count.items():\n if len(k.consumers()) != v:\n return False\n return True\n\n\ndef all_fetchables():\n tensor_names = []\n graph = ops.get_default_graph()\n for op in graph.get_operations():\n for t in op.outputs:\n if graph.is_fetchable(t):\n tensor_names.append(t.name)\n return tensor_names\n\n\ndef all_feedables():\n feedable_tensors = []\n graph = ops.get_default_graph()\n for op in graph.get_operations():\n for t in op.inputs:\n if graph.is_feedable(t):\n feedable_tensors.append(t)\n return feedable_tensors\n\n\ndef opt_cfg():\n return config_pb2.ConfigProto(\n allow_soft_placement=True,\n graph_options=config_pb2.GraphOptions(\n optimizer_options=config_pb2.OptimizerOptions(\n opt_level=config_pb2.OptimizerOptions.L1,\n do_function_inlining=True,\n do_constant_folding=True)))\n\n\ndef isum(s, maximum_iterations=None):\n i = constant_op.constant(0, name=\"i\")\n c = lambda i, s: math_ops.less(i, 10)\n b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]\n _, r_s = control_flow_ops.while_loop(\n c, b, [i, s], maximum_iterations=maximum_iterations)\n return r_s\n\n\n@test_util.with_control_flow_v2\nclass ControlFlowTest(test.TestCase):\n\n def testRefIdentity(self):\n with self.cached_session():\n v = variables.VariableV1(7)\n\n v = control_flow_ops._Identity(v)\n op = state_ops.assign(v, 9)\n v2 = control_flow_ops.with_dependencies([op], v)\n\n self.assertTrue(isinstance(v2, ops.Tensor))\n variables.global_variables_initializer().run()\n self.assertEqual(9, v2.eval())\n\n def testRefEnter(self):\n with self.cached_session():\n v = variables.VariableV1(7)\n\n enter_v = control_flow_ops._Enter(v, \"foo_1\", is_constant=True)\n nine = constant_op.constant(9)\n enter_nine = gen_control_flow_ops.enter(nine, \"foo_1\")\n op = state_ops.assign(enter_v, enter_nine)\n v2 = control_flow_ops.with_dependencies([op], enter_v)\n v3 = control_flow_ops.exit(v2)\n variables.global_variables_initializer().run()\n self.assertEqual(9, v3.eval())\n\n def testRefSwitch(self):\n with self.cached_session():\n v = variables.VariableV1(7)\n\n p = constant_op.constant(True)\n v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access\n v2 = state_ops.assign(v1[1], 9)\n variables.global_variables_initializer().run()\n self.assertEqual(9, v2.eval())\n\n def testEnterMulExit(self):\n with self.cached_session():\n data = constant_op.constant([1, 2, 3, 4, 5, 6], name=\"data\")\n enter_data = gen_control_flow_ops.enter(data, \"foo_1\", False)\n five = constant_op.constant(5)\n enter_five = gen_control_flow_ops.enter(five, \"foo_1\", False)\n mul_op = math_ops.multiply(enter_data, enter_five)\n exit_op = control_flow_ops.exit(mul_op)\n\n result = exit_op.eval()\n self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)\n\n def testEnterShapePropagation(self):\n with self.cached_session():\n v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)\n\n # If is_constant=True, the shape information should be propagated.\n enter_v_constant = gen_control_flow_ops.enter(\n v, \"frame1\", is_constant=True)\n self.assertEqual(enter_v_constant.shape, [2])\n\n # Otherwise, the shape should be unknown.\n enter_v_non_constant = gen_control_flow_ops.enter(\n v, \"frame2\", is_constant=False)\n self.assertEqual(enter_v_non_constant.shape, None)\n\n def testSwitchMergeIndexedSlices(self):\n with self.cached_session():\n values = constant_op.constant([1, 2, 3, 4, 5, 6])\n indices = constant_op.constant([0, 2, 4, 6, 8, 10])\n data = ops.IndexedSlices(values, indices)\n pred = ops.convert_to_tensor(True)\n switch_op = control_flow_ops.switch(data, pred)\n merge_op = control_flow_ops.merge(switch_op)[0]\n\n val = merge_op.values.eval()\n ind = merge_op.indices.eval()\n self.assertAllEqual(np.arange(1, 7), val)\n self.assertAllEqual(np.arange(0, 12, 2), ind)\n\n def testSwitchDeadBranch(self):\n with self.cached_session():\n data = constant_op.constant([1, 2, 3, 4, 5, 6], name=\"data\")\n ports = ops.convert_to_tensor(True, name=\"ports\")\n switch_op = control_flow_ops.switch(data, ports)\n dead_branch = array_ops.identity(switch_op[0])\n\n with self.assertRaisesWithPredicateMatch(\n errors_impl.InvalidArgumentError,\n lambda e: \"Retval[0] does not have value\" in str(e)):\n dead_branch.eval()\n\n def testSwitchMergeLess(self):\n with self.cached_session():\n data = constant_op.constant([1, 2, 3, 4, 5, 6], name=\"data\")\n zero = ops.convert_to_tensor(0)\n one = ops.convert_to_tensor(1)\n less_op = math_ops.less(zero, one)\n switch_op = control_flow_ops.switch(data, less_op)\n merge_op = control_flow_ops.merge(switch_op)[0]\n\n result = merge_op.eval()\n self.assertAllEqual(np.arange(1, 7), result)\n\n def testSwitchMergeAddIdentity(self):\n with self.cached_session():\n data = constant_op.constant([1, 2, 3, 4, 5, 6], name=\"data\")\n ports = ops.convert_to_tensor(False, name=\"ports\")\n switch_op = control_flow_ops.switch(data, ports)\n one = constant_op.constant(1)\n add_op = math_ops.add(switch_op[0], one)\n id_op = array_ops.identity(switch_op[1])\n merge_op = control_flow_ops.merge([add_op, id_op])[0]\n\n result = merge_op.eval()\n self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)\n\n def testSwitchMergeAddMul(self):\n with self.cached_session():\n data = constant_op.constant([1, 2, 3, 4, 5, 6], name=\"data\")\n ports = ops.convert_to_tensor(True, name=\"ports\")\n switch_op = control_flow_ops.switch(data, ports)\n one = constant_op.constant(1)\n add_op = math_ops.add(switch_op[0], one)\n five = constant_op.constant(5)\n mul_op = math_ops.multiply(switch_op[1], five)\n merge_op = control_flow_ops.merge([add_op, mul_op])[0]\n\n result = merge_op.eval()\n self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)\n\n def testLoop_false(self):\n with self.cached_session():\n false = ops.convert_to_tensor(False)\n n = constant_op.constant(10)\n\n enter_false = gen_control_flow_ops.enter(false, \"foo_1\", False)\n enter_n = gen_control_flow_ops.enter(n, \"foo_1\", False)\n\n merge_n = control_flow_ops.merge([enter_n, enter_n], name=\"merge_n\")[0]\n switch_n = control_flow_ops.switch(merge_n, enter_false)\n exit_n = control_flow_ops.exit(switch_n[0])\n next_n = control_flow_ops.next_iteration(switch_n[0])\n merge_n.op._update_input(1, next_n)\n\n result = exit_n.eval()\n self.assertAllEqual(10, result)\n\n def testLoop_1(self):\n with self.cached_session():\n zero = constant_op.constant(0)\n one = constant_op.constant(1)\n n = constant_op.constant(10)\n\n enter_i = gen_control_flow_ops.enter(zero, \"foo\", False)\n enter_one = gen_control_flow_ops.enter(one, \"foo\", True)\n enter_n = gen_control_flow_ops.enter(n, \"foo\", True)\n\n with ops.device(test.gpu_device_name()):\n merge_i = control_flow_ops.merge([enter_i, enter_i])[0]\n\n less_op = math_ops.less(merge_i, enter_n)\n cond_op = control_flow_ops.loop_cond(less_op)\n switch_i = control_flow_ops.switch(merge_i, cond_op)\n\n add_i = math_ops.add(switch_i[1], enter_one)\n\n next_i = control_flow_ops.next_iteration(add_i)\n merge_i.op._update_input(1, next_i)\n\n exit_i = control_flow_ops.exit(switch_i[0])\n result = exit_i.eval()\n self.assertAllEqual(10, result)\n\n def testLoop_2(self):\n with self.cached_session():\n zero = constant_op.constant(0)\n one = constant_op.constant(1)\n n = constant_op.constant(10)\n\n enter_i = gen_control_flow_ops.enter(zero, \"foo\", False)\n enter_one = gen_control_flow_ops.enter(one, \"foo\", True)\n enter_n = gen_control_flow_ops.enter(n, \"foo\", True)\n\n merge_i = control_flow_ops.merge([enter_i, enter_i])[0]\n\n less_op = math_ops.less(merge_i, enter_n)\n cond_op = control_flow_ops.loop_cond(less_op)\n switch_i = control_flow_ops.switch(merge_i, cond_op)\n\n add_i = math_ops.add(switch_i[1], enter_one)\n\n with ops.device(test.gpu_device_name()):\n next_i = control_flow_ops.next_iteration(add_i)\n merge_i.op._update_input(1, next_i)\n\n exit_i = control_flow_ops.exit(switch_i[0])\n result = exit_i.eval()\n self.assertAllEqual(10, result)\n\n def testDifferentFrame(self):\n with self.cached_session():\n data = array_ops.placeholder(dtypes.float32, shape=[])\n enter_1 = gen_control_flow_ops.enter(data, \"foo_1\", False)\n enter_2 = gen_control_flow_ops.enter(data, \"foo_2\", False)\n res = math_ops.add(enter_1, enter_2)\n with self.assertRaisesOpError(\"has inputs from different frames\"):\n res.eval(feed_dict={data: 1.0})\n\n def testCondBool(self):\n values = constant_op.constant(10)\n fn1 = lambda: math_ops.add(values, 1)\n fn2 = lambda: math_ops.subtract(values, 1)\n with self.assertRaisesRegexp(TypeError, \"must not be a Python bool\"):\n _ = control_flow_ops.cond(False, fn1, fn2)\n\n def testCondInt(self):\n p = array_ops.placeholder(dtypes.bool, shape=[])\n v = constant_op.constant(10)\n fn1 = lambda: math_ops.add(v, 1)\n fn2 = lambda: math_ops.subtract(v, 1)\n y = control_flow_ops.cond(p, fn1, fn2)\n grad = gradients_impl.gradients(y, [v])\n self.assertAllEqual([None], grad)\n\n def testCondOutputShape(self):\n x = constant_op.constant(1.0)\n b = control_flow_ops.cond(\n constant_op.constant(True), lambda: math_ops.square(x),\n lambda: math_ops.subtract(x, 1.))\n self.assertEqual(b.shape, tensor_shape.scalar())\n\n def testFetchable(self):\n with self.cached_session() as sess:\n x = array_ops.placeholder(dtypes.float32)\n control_flow_ops.cond(\n constant_op.constant(True), lambda: x + 2, lambda: x + 0)\n graph = ops.get_default_graph()\n for op in graph.get_operations():\n for t in op.inputs:\n if graph.is_fetchable(t.op):\n sess.run(t, feed_dict={x: 3})\n else:\n with self.assertRaisesRegexp(ValueError,\n \"has been marked as not fetchable\"):\n sess.run(t, feed_dict={x: 3})\n\n @test_util.disable_control_flow_v2(\"Not relevant\")\n def testFeedable(self):\n with self.cached_session() as sess:\n c = constant_op.constant(2)\n i0 = constant_op.constant(0)\n r = control_flow_ops.while_loop(lambda i: i < 1000,\n lambda i: math_ops.square(c) + i, [i0])\n self.assertEqual(1000, r.eval(feed_dict={i0: 0}))\n feedable_tensors = all_feedables()\n for t in feedable_tensors:\n sess.run(r, feed_dict={t: 3})\n graph = ops.get_default_graph()\n for op in graph.get_operations():\n for t in op.inputs:\n if t not in feedable_tensors and t.dtype is dtypes.int32:\n with self.assertRaisesRegexp(ValueError, \"may not be fed\"):\n sess.run(r, feed_dict={t: 3})\n\n @test_util.disable_control_flow_v2(\"b/113296180 (IndexedSlices)\")\n def testCondIndexedSlices(self):\n with self.cached_session():\n values = constant_op.constant(10)\n indices = constant_op.constant(0)\n x = ops.IndexedSlices(values, indices)\n pred = math_ops.less(1, 2)\n fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)\n fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n val = r.values.eval()\n ind = r.indices.eval()\n self.assertAllEqual(11, val)\n self.assertAllEqual(0, ind)\n\n @test_util.disable_control_flow_v2(\"b/113296161 (SparseTensors)\")\n def testCondSparseTensor(self):\n with self.cached_session():\n values = constant_op.constant([2.0, 4.0], name=\"values\")\n indices = constant_op.constant(\n [[0], [3]], dtype=dtypes.int64, name=\"indices\")\n shape = constant_op.constant([10], dtype=dtypes.int64, name=\"dense_shape\")\n x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)\n pred = math_ops.less(1, 2)\n fn1 = lambda: sparse_tensor.SparseTensor(\n indices + 1, x.values + 1, dense_shape=shape)\n fn2 = lambda: sparse_tensor.SparseTensor(\n indices, x.values - 1, dense_shape=shape)\n r = control_flow_ops.cond(pred, fn1, fn2)\n self.assertAllEqual([3.0, 5.0], r.values.eval())\n self.assertAllEqual([[1], [4]], r.indices.eval())\n self.assertAllEqual(r.values.get_shape(), (2,))\n\n def testCondResource(self):\n\n with self.cached_session():\n rv = resource_variable_ops.ResourceVariable(True)\n variables.global_variables_initializer().run()\n t = ops.convert_to_tensor(1.0)\n\n def case():\n assign = resource_variable_ops.assign_variable_op(rv.handle, False)\n with ops.control_dependencies([assign]):\n return array_ops.identity(t)\n\n self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())\n\n @test_util.disable_control_flow_v2(\"b/113293074\")\n def testCondIndexedSlicesDifferentTypes(self):\n with self.cached_session():\n values = constant_op.constant(10)\n i_32 = ops.convert_to_tensor(0, name=\"one\", dtype=dtypes.int32)\n i_64 = ops.convert_to_tensor(0, name=\"one\", dtype=dtypes.int64)\n x = ops.IndexedSlices(values, i_32)\n pred = math_ops.less(1, 2)\n fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)\n fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n val = r.values.eval()\n ind = r.indices.eval()\n self.assertAllEqual(11, val)\n self.assertAllEqual(0, ind)\n self.assertTrue(ind.dtype == np.int64)\n\n def testCondColocation(self):\n with self.test_session(use_gpu=True):\n with ops.device(\"/cpu:0\"):\n v = variables.Variable(7.0)\n\n x = constant_op.constant(10.0)\n pred = math_ops.less(1.0, 2.0)\n fn1 = lambda: math_ops.add(v, 1.0)\n fn2 = lambda: math_ops.subtract(x, 1.0)\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n for op in x.graph.get_operations():\n if op.name == \"cond/Add/Switch\":\n self.assertDeviceEqual(op.device, \"/cpu:0\")\n\n def _testCond_1(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n x = constant_op.constant(10)\n pred = math_ops.less(1, 2)\n fn1 = lambda: math_ops.add(x, 1)\n fn2 = lambda: math_ops.subtract(x, 1)\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n result = r.eval()\n self.assertAllEqual(11, result)\n\n def testCond_1(self):\n\n self._testCond_1(use_gpu=False)\n # TODO(b/116526896): Enable GPU tests.\n # self._testCond_1(use_gpu=True)\n\n def testCond_2(self):\n\n with self.cached_session():\n x = constant_op.constant(10)\n r = control_flow_ops.cond(\n math_ops.less(1, 0), lambda: math_ops.add(x, 1),\n lambda: math_ops.subtract(x, 1))\n result = r.eval()\n self.assertAllEqual(9, result)\n\n def testCond_3(self):\n\n with self.cached_session():\n x = constant_op.constant(10)\n pred = math_ops.less(1, 2)\n fn1 = lambda: math_ops.add(x, 1)\n fn2 = lambda: math_ops.subtract(x, 1)\n fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)\n r = control_flow_ops.cond(pred, fn3, fn2)\n\n result = r.eval()\n self.assertAllEqual(12, result)\n\n @test_util.disable_control_flow_v2(\"b/113324949 (ref vars)\")\n def testCond_4(self):\n with self.cached_session():\n v1 = variables.Variable(7)\n v2 = variables.Variable(7)\n v3 = variables.Variable(7)\n\n age = constant_op.constant(3)\n max_age = constant_op.constant(2)\n pred = math_ops.greater(age, max_age)\n fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]\n fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n variables.global_variables_initializer().run()\n self.assertEqual(len(r), 2)\n result = r[1].eval()\n self.assertAllEqual(True, result)\n self.assertAllEqual(7, v1.eval())\n self.assertAllEqual(2, v2.eval())\n self.assertAllEqual(7, v3.eval())\n\n def testCond_5(self):\n with self.cached_session():\n alive = constant_op.constant(True, name=\"alive\")\n count = constant_op.constant(0, name=\"count\")\n\n def body(i):\n return control_flow_ops.cond(\n alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],\n lambda: [alive, count])\n\n for i in range(10):\n alive, count = body(i)\n self.assertAllEqual(4, count.eval())\n\n def testCond_6(self):\n\n with self.cached_session():\n v1 = variables.Variable([7])\n\n age = constant_op.constant(3)\n pred = math_ops.greater(age, 4)\n fn1 = lambda: age\n fn2 = lambda: v1\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n variables.global_variables_initializer().run()\n result = r.eval()\n self.assertAllEqual(np.array([7]), result)\n\n def testCond_7(self):\n with self.cached_session() as sess:\n x = constant_op.constant(10)\n y = constant_op.constant(200)\n pred = math_ops.less(1, 2)\n fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]\n fn2 = lambda: [y, y]\n r = control_flow_ops.cond(pred, fn1, fn2)\n self.assertAllEqual([11, 12], sess.run(r))\n\n def testCondRef(self):\n\n with self.cached_session():\n x = gen_state_ops.variable(\n shape=[1],\n dtype=dtypes.float32,\n name=\"x\",\n container=\"\",\n shared_name=\"\")\n true_fn = lambda: x\n false_fn = lambda: constant_op.constant([2.0])\n r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)\n self.assertAllEqual([2.0], r.eval())\n\n @test_util.disable_control_flow_v2(\"b/79881896 (control deps)\")\n def testCondWithControl(self):\n with self.cached_session():\n control_holder = array_ops.placeholder(dtypes.float32, shape=())\n a = constant_op.constant(3)\n\n def true_branch():\n with ops.control_dependencies([control_holder]):\n _ = a + 1\n return a + 2\n\n r = control_flow_ops.cond(\n constant_op.constant(True), true_branch,\n lambda: constant_op.constant(1))\n self.assertEqual(5, r.eval())\n\n def testUninitializedRefIdentity(self):\n with self.cached_session() as sess:\n v = gen_state_ops.variable(\n shape=[1],\n dtype=dtypes.float32,\n name=\"v\",\n container=\"\",\n shared_name=\"\")\n inited = state_ops.is_variable_initialized(v)\n v_f, v_t = control_flow_ops.ref_switch(v, inited)\n # Both v_f and v_t are uninitialized references. However, an actual use\n # of the reference in the 'true' branch in the 'tf.identity' op will\n # not 'fire' when v is uninitialized, so this is a valid construction.\n # This test tests that ref_identity allows uninitialized ref as input\n # so that this construction is allowed.\n v_f_op = gen_array_ops.ref_identity(v_f)\n v_t_op = gen_array_ops.ref_identity(v_t)\n with ops.control_dependencies([v_f_op]):\n assign_v = state_ops.assign(v, [1.0])\n with ops.control_dependencies([v_t_op]):\n orig_v = array_ops.identity(v)\n merged_op = control_flow_ops.merge([assign_v, orig_v])\n self.assertAllEqual([1.0], sess.run(merged_op.output))\n\n @test_util.disable_control_flow_v2(\n \"b/112477618 (Operation returned from cond)\")\n def testCondSwitchIdentity(self):\n # Make sure the recv identity is not removed by optimization.\n with session.Session(config=opt_cfg()) as sess:\n pred = constant_op.constant(True)\n\n def fn1():\n return control_flow_ops.no_op()\n\n def fn2():\n return control_flow_ops.Assert(False, [\"Wrong branch!!!\"])\n\n r = control_flow_ops.cond(pred, fn1, fn2)\n sess.run(r)\n\n @test_util.disable_control_flow_v2(\n \"b/112477618 (Operation returned from cond)\")\n def testCondRecvIdentity(self):\n # Make sure the switch identity is not removed by optimization.\n with session.Session(config=opt_cfg()) as sess:\n with ops.device(test.gpu_device_name()):\n pred = constant_op.constant(True)\n\n def fn1():\n return control_flow_ops.no_op()\n\n def fn2():\n with ops.device(\"/cpu:0\"):\n return control_flow_ops.Assert(False, [\"Wrong branch!!!\"])\n\n r = control_flow_ops.cond(pred, fn1, fn2)\n sess.run(r)\n\n def testCondGrad_1(self):\n with self.cached_session():\n x = constant_op.constant(10.0, name=\"x\")\n pred = math_ops.less(1, 2)\n fn1 = lambda: array_ops.identity(x)\n fn2 = lambda: array_ops.identity(x)\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n grad = gradients_impl.gradients(r, [x])[0]\n self.assertAllEqual(1.0, grad.eval())\n\n def testCondGrad_2(self):\n with self.cached_session():\n c = array_ops.placeholder(dtypes.int32, shape=[])\n x = constant_op.constant(10.0)\n pred = math_ops.less(c, 2)\n fn1 = lambda: math_ops.multiply(x, 42.0)\n fn2 = lambda: math_ops.multiply(x, 3.0)\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n grad = gradients_impl.gradients(r, [x])[0]\n self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))\n self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))\n\n @test_util.disable_control_flow_v2(\n \"b/110550782 (gradient w.r.t external variable)\")\n def testCondGrad_3(self):\n with self.cached_session():\n c = array_ops.placeholder(dtypes.int32, shape=[])\n ox = constant_op.constant(10.0)\n pred = math_ops.less(c, 2)\n\n def fn1(x):\n m = x * x\n return gradients_impl.gradients(m, [ox])[0]\n\n fn2 = lambda: math_ops.multiply(ox, 3.0)\n y = math_ops.multiply(7.0, ox)\n r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)\n\n self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))\n self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))\n\n def testNestedCond_Simple(self):\n with self.cached_session():\n x = constant_op.constant(0., name=\"X\")\n y = control_flow_ops.cond(\n constant_op.constant(True), lambda: x,\n lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))\n result = gradients_impl.gradients(y, x)[0]\n self.assertEqual(1.0, result.eval())\n\n z = control_flow_ops.cond(\n constant_op.constant(False), lambda: x,\n lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))\n result = gradients_impl.gradients(z, x)[0]\n self.assertEqual(1.0, result.eval())\n\n @test_util.disable_control_flow_v2(\"b/113327884\")\n def testCondGrad_Gather(self):\n with self.cached_session() as sess:\n v1 = variables.Variable([1.0, 42.0])\n c = array_ops.placeholder(dtypes.int32, shape=[])\n pred = math_ops.less(c, 2)\n fn1 = lambda: array_ops.identity(v1)\n fn2 = lambda: array_ops.gather(v1, [1, 1])\n r = control_flow_ops.cond(pred, fn1, fn2)\n grad = gradients_impl.gradients(r, [v1])[0]\n variables.global_variables_initializer().run()\n # Should just be [1, 1], but possibly a sparse representation\n gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})\n dense_gv = [\n sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)\n ]\n self.assertAllEqual(dense_gv, [1.0, 1.0])\n # Should be [0, 2], as the else forwards v1[1] twice\n gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})\n dense_gv = [\n sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)\n ]\n self.assertAllEqual(dense_gv, [0.0, 2.0])\n\n # Microbenchmark: 256,000 iterations/s.\n @test_util.disable_control_flow_v2(\"b/116630618 (Times out)\")\n def testWhile_1(self):\n with self.cached_session():\n n = constant_op.constant(0)\n c = lambda x: math_ops.less(x, 10000)\n b = lambda x: math_ops.add(x, 1)\n r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)\n self.assertEqual(10000, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/79881896 (control deps)\")\n def testWhileExternalControlDependencies(self):\n with self.cached_session():\n v = variables.Variable(0.0)\n v.initializer.run()\n increment = v.assign_add(1.0)\n\n def body_fn(i):\n with ops.control_dependencies([increment]):\n return i + 1\n\n result = control_flow_ops.while_loop(cond=lambda i: i < 2,\n body=body_fn, loop_vars=[1])\n self.assertAllEqual(result.eval(), 2)\n self.assertAllEqual(v.eval(), 1.0)\n\n @test_util.disable_control_flow_v2(\"b/79881896 (control deps)\")\n def testWhileExternalControlDependenciesNoInput(self):\n with self.cached_session():\n v = variables.Variable(0.0)\n v.initializer.run()\n increment = v.assign_add(1.0)\n\n def body_fn(unused_i):\n with ops.control_dependencies([increment]):\n return constant_op.constant(5, name=\"five\")\n\n result = control_flow_ops.while_loop(cond=lambda i: i < 5,\n body=body_fn, loop_vars=[0])\n result.eval()\n self.assertAllEqual(v.eval(), 1.0)\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileWithRefs_1(self):\n with self.cached_session() as sess:\n x = variables.VariableV1(0)._ref() # pylint: disable=protected-access\n i = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 100)\n\n self.assertEqual(x.dtype, dtypes.int32_ref)\n\n def b(i, x):\n self.assertEqual(x.dtype, dtypes.int32_ref)\n return (i + 1, gen_array_ops.ref_identity(x))\n\n r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(r[0].dtype, dtypes.int32)\n self.assertEqual(r[1].dtype, dtypes.int32_ref)\n\n value_i, value_x = sess.run(r)\n\n self.assertEqual(100, value_i)\n self.assertEqual(0, value_x)\n\n def testWhile_2(self):\n with self.cached_session():\n s = constant_op.constant(0)\n r = isum(s)\n self.assertAllEqual(45, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/115776323 (max_iters)\")\n def testWhileWithMaximumIterations(self):\n with self.cached_session():\n s = constant_op.constant([1, 2, 3, 4, 5])\n r = isum(s, maximum_iterations=3)\n self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116339888 (non-tensor loop var)\")\n def testWhileWithMaximumIterationsAndSingleArgument(self):\n with self.cached_session():\n r = control_flow_ops.while_loop(\n lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)\n self.assertEqual(1, r.eval())\n\n @test_util.disable_control_flow_v2(\n \"b/116248044 (nested), b/115920078 (gradients)\")\n def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):\n v = constant_op.constant(1.0)\n\n def training_loop_with_gradient(i):\n out = control_flow_ops.while_loop(\n lambda i_, _: i_ < 3,\n lambda i_, j: [i_ + 1, j * v], [0, 1.0],\n maximum_iterations=i)\n g = gradients_impl.gradients(out, v)\n with ops.control_dependencies(g):\n return i + 1\n\n xla_context = control_flow_ops.XLAControlFlowContext()\n xla_context.Enter()\n # Create training loop, ensure we can call gradient() of\n # while_loop inside the training loop.\n loop = control_flow_ops.while_loop(lambda i: i < 3,\n training_loop_with_gradient, [0])\n xla_context.Exit()\n\n loop_execute = array_ops.identity(loop) # Because loop is not fetchable.\n\n # Should execute without issue.\n self.assertEqual(3, self.evaluate(loop_execute))\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested while_loop)\")\n def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):\n v = constant_op.constant(1.0)\n\n def inner_body(i, x):\n out = control_flow_ops.while_loop(\n lambda i, _: i < 3,\n lambda i, j: [i + 1, j * v], [0, x],\n maximum_iterations=i)\n return out\n\n def create_while_loop(maximum_iterations=None):\n return control_flow_ops.while_loop(\n lambda i, _: i < 3,\n inner_body, [0, 1.0],\n maximum_iterations=maximum_iterations)\n\n loop_no_xla = create_while_loop(maximum_iterations=5)\n # maximum_iterations is fine outside of an XLA scope\n gs = gradients_impl.gradients(loop_no_xla, v)\n self.evaluate(gs) # This should execute without error.\n\n xla_context = control_flow_ops.XLAControlFlowContext()\n xla_context.Enter()\n loop_no_maxiter = create_while_loop()\n loop_with_maxiter = create_while_loop(maximum_iterations=2)\n xla_context.Exit()\n\n with self.assertRaisesRegexp(\n ValueError,\n r\"Cannot create a gradient accumulator for tensor '.+' inside \"\n r\"XLA while_loop because maximum_iterations was not passed to \"\n r\"the tf.while_loop call \\('.+'\\).\"):\n _ = gradients_impl.gradients(loop_no_maxiter, v)\n\n with self.assertRaisesRegexp(\n ValueError,\n r\"Cannot create a gradient accumulator for tensor '.+' inside XLA \"\n r\"while_loop. maximum_iterations tensor '.+' for while_loop context \"\n r\"'.+' must be statically known \\(e.g. a constant value or known \"\n r\"shape dimension\\), or be defined at or outside the while loop \"\n r\"context '.*' \\(currently defined in '.*'\\)\"):\n _ = gradients_impl.gradients(loop_with_maxiter, v)\n\n @test_util.disable_control_flow_v2(\"b/115776323 (max_iters)\")\n def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):\n v = constant_op.constant(1.0)\n\n def create_while_loop():\n max_iter_holder = []\n\n def create_mi():\n max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))\n return 1.0\n\n _ = control_flow_ops.cond(\n constant_op.constant(True), create_mi, create_mi)\n\n return control_flow_ops.while_loop(\n lambda i, _: i < 3,\n lambda i, x: (i + 1, v * x), (0, 1.0),\n maximum_iterations=max_iter_holder[0])\n\n xla_context = control_flow_ops.XLAControlFlowContext()\n xla_context.Enter()\n loop = create_while_loop()\n xla_context.Exit()\n\n with self.assertRaisesRegexp(\n ValueError,\n r\"Cannot create a gradient accumulator for tensor '.+' inside XLA \"\n r\"while_loop. maximum_iterations tensor '.*Placeholder:0' for \"\n r\"while_loop context '.+' must be statically known \\(e.g. a constant \"\n r\"value or known shape dimension\\), or be defined at or outside the \"\n r\"while loop context '' \\(currently defined in 'cond/.+'\\)\"):\n _ = gradients_impl.gradients(loop, v)\n\n @test_util.disable_control_flow_v2(\n \"b/116248044 (nesting), b/115776323 (max_iters)\")\n def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):\n v = constant_op.constant(1.0)\n\n p = array_ops.placeholder(dtype=dtypes.int32)\n\n def mid_body_builder(iterations):\n\n def mid_body(i, x):\n r = control_flow_ops.while_loop(\n lambda *_: True,\n lambda i, x: (i + 1, v * x), (0, x),\n maximum_iterations=iterations,\n name=\"inner\")\n return (i + 1, gradients_impl.gradients(x + r[1], v)[0])\n\n return mid_body\n\n def outer_body(i, x):\n iterations = array_ops.size(p, name=\"iterations\")\n return (i + 1, x + control_flow_ops.while_loop(\n lambda *_: True,\n mid_body_builder(iterations), (0, x),\n maximum_iterations=iterations,\n name=\"mid\")[1])\n\n def create_while_loop():\n with ops.device(\"/cpu:0\"):\n r = control_flow_ops.while_loop(\n lambda *_: True,\n outer_body, (0, 1.0),\n maximum_iterations=5,\n name=\"outer\")\n return array_ops.identity(r[1])\n\n xla_context = control_flow_ops.XLAControlFlowContext()\n xla_context.Enter()\n final_with_xla_context = create_while_loop()\n xla_context.Exit()\n\n final_without_xla_context = create_while_loop()\n\n with self.test_session(use_gpu=False) as sess:\n opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n final_value_without_xla_context = sess.run(\n final_without_xla_context, feed_dict={\n p: [0, 0, 0]\n })\n\n final_value_with_xla_context = sess.run(\n final_with_xla_context,\n feed_dict={p: [0, 0, 0]},\n options=opts,\n run_metadata=run_metadata)\n\n node_stats = run_metadata.step_stats.dev_stats[0].node_stats\n stack_push_count = len(\n [x for x in node_stats if x.node_name.endswith(\"StackPushV2\")])\n # Pushes to the stack = product of maximum_iterations values;\n # the last two \"3\"s comes from size(p), when p == [0, 0, 0].\n self.assertEqual(stack_push_count, 5 * 3 * 3)\n\n self.assertAllClose(final_value_with_xla_context,\n final_value_without_xla_context)\n\n # Have more than 10 parallel iterations and hence exercise k-bound\n # most of the time.\n def testWhile_3(self):\n with self.cached_session():\n\n def compute(i, m, c, o):\n m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]\n o = math_ops.add(o, m)\n o = math_ops.add(o, c)\n i = math_ops.add(i, 1)\n return [i, m, c, o]\n\n i = ops.convert_to_tensor(0)\n m = ops.convert_to_tensor(0)\n c = ops.convert_to_tensor(0)\n o = ops.convert_to_tensor(0)\n d = ops.convert_to_tensor(100)\n r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),\n compute, [i, m, c, o])\n result = r[3].eval()\n self.assertAllEqual(10100, result)\n\n def testWhile_4(self):\n with self.cached_session():\n\n def compute(i, m, c, o):\n m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]\n o = math_ops.add(o, m)\n o = math_ops.add(o, c)\n i = math_ops.add(i, 1)\n return [i, m, c, o]\n\n i = ops.convert_to_tensor(0)\n m = ops.convert_to_tensor(0)\n c = ops.convert_to_tensor(0)\n o = ops.convert_to_tensor(0)\n x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])\n s = array_ops.size(x)\n r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),\n compute, [i, m, c, o])\n result = r[3].eval()\n self.assertAllEqual(42, result)\n\n def testWhile_5(self):\n with self.cached_session():\n\n def compute(i, c, o):\n c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),\n [1] + array_ops.expand_dims(i, 0))\n o = array_ops.concat([o, c], 0)\n i = math_ops.add(i, 1)\n return [i, c, o]\n\n i = ops.convert_to_tensor(0)\n c = ops.convert_to_tensor([0])\n o = ops.convert_to_tensor([0])\n x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])\n s = array_ops.size(x)\n r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),\n compute, [i, c, o], [\n i.get_shape(),\n tensor_shape.unknown_shape(),\n tensor_shape.unknown_shape()\n ])\n result = r[2].eval()\n self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)\n\n @test_util.disable_control_flow_v2(\"b/116338794 (buffer_reuse)\")\n def testBufferForwarding(self):\n run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n with self.cached_session() as sess:\n with ops.device(\"/cpu:0\"):\n c = constant_op.constant(2)\n i0 = constant_op.constant(0)\n r = control_flow_ops.while_loop(lambda i: i < 1000,\n lambda i: math_ops.square(c) + i, [i0])\n r_val = sess.run(r, options=run_options, run_metadata=run_metadata)\n self.assertEqual(1000, r_val)\n self.assertTrue(run_metadata.HasField(\"step_stats\"))\n unique_allocs = set()\n for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:\n for output in node_stat.output:\n unique_allocs.add(\n output.tensor_description.allocation_description.ptr)\n # Prior to cl/147536680, the number of unique allocations was about 1005.\n self.assertLess(len(unique_allocs), 756)\n\n def _testWhile_Gpu_1(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n n = constant_op.constant(1.0)\n c = lambda x: math_ops.less(x, 10.0)\n b = lambda x: math_ops.add(x, 1.0)\n r = control_flow_ops.while_loop(c, b, [n])\n self.assertAllClose(10.0, r.eval())\n\n def testWhile_Gpu_1(self):\n self._testWhile_Gpu_1(use_gpu=False)\n self._testWhile_Gpu_1(use_gpu=True)\n\n def _testWhile_Gpu_2(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n n = constant_op.constant(1.0)\n c = lambda x: math_ops.less(x, 10.0)\n\n def b(x):\n with ops.device(\"/cpu:0\"):\n return math_ops.add(x, 1.0)\n\n r = control_flow_ops.while_loop(c, b, [n])\n self.assertAllClose(10.0, r.eval())\n\n def testWhile_Gpu_2(self):\n self._testWhile_Gpu_2(use_gpu=False)\n self._testWhile_Gpu_2(use_gpu=True)\n\n def testWhileShape(self):\n with self.cached_session():\n i = constant_op.constant(0)\n m = array_ops.ones([2, 2])\n c = lambda i, j: math_ops.less(i, 2)\n\n def _b(i, j):\n new_i = math_ops.add(i, 1)\n new_j = array_ops.tile(j, [2, 2])\n return [new_i, new_j]\n\n r = control_flow_ops.while_loop(\n c, _b, [i, m],\n [i.get_shape(), tensor_shape.unknown_shape()])\n r = r[1] * array_ops.ones([8, 8])\n self.assertAllEqual(np.ones((8, 8)), r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116339888 (non-tensor loop var)\")\n def testWhileWithNonTensorInput_Scalar(self):\n with self.cached_session():\n n = 0\n c = lambda x: x < 10000\n b = lambda x: x + 1\n r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)\n self.assertEqual(10000, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116339888 (non-tensor loop var)\")\n def testWhileWithNonTensorInput_Vector(self):\n with self.cached_session():\n n = np.array([0]) # Note, [0] would not work here; that is a list\n c = lambda x: x[0] < 10000\n b = lambda x: array_ops.stack([x[0] + 1])\n r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)\n self.assertEqual([10000], r.eval())\n\n def testWhileShapeInference(self):\n with self.cached_session():\n i = constant_op.constant(0)\n m = array_ops.ones([2, 2])\n c = lambda i, j: math_ops.less(i, 2)\n\n def b(i, j):\n new_i = math_ops.add(i, 1)\n new_j = array_ops.concat([j, j], 0)\n return [new_i, new_j]\n\n r = control_flow_ops.while_loop(\n c, b, [i, m],\n [i.get_shape(), tensor_shape.TensorShape([None, 2])])\n self.assertIsNone(r[1].get_shape()[0].value)\n self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))\n\n with self.assertRaisesRegexp(\n ValueError,\n r\"Input tensor 'ones:0' enters the loop with shape \\(2, 2\\), but has \"\n r\"shape \\(4, 2\\) after one iteration. To allow the shape to vary \"\n r\"across iterations, use the `shape_invariants` argument of \"\n r\"tf.while_loop to specify a less-specific shape.\"):\n r = control_flow_ops.while_loop(c, b, [i, m])\n\n @test_util.disable_control_flow_v2(\"b/116328420 (SparseTensor)\")\n def testWhileShapeInferenceSparseTensor(self):\n with self.cached_session():\n values = constant_op.constant([2.0, 4.0], name=\"values\")\n indices = constant_op.constant(\n [[0], [3]], dtype=dtypes.int64, name=\"indices\")\n shape = constant_op.constant([10], dtype=dtypes.int64, name=\"dense_shape\")\n i = constant_op.constant(0)\n x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)\n\n def c(i, _):\n return i < 10\n\n def b(i, x):\n return [\n i + 1,\n sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)\n ]\n\n _, r = control_flow_ops.while_loop(c, b, [i, x])\n self.assertEqual(r.dense_shape.get_shape()[0].value, 1)\n\n _, r = control_flow_ops.while_loop(\n c, b, [i, x],\n [i.get_shape(), tensor_shape.TensorShape([None])])\n self.assertTrue(r.dense_shape.get_shape()[0].value is None)\n\n with self.assertRaisesRegexp(ValueError, \"is not compatible with\"):\n _, r = control_flow_ops.while_loop(\n c, b, [i, x],\n [i.get_shape(), tensor_shape.TensorShape([5])])\n\n @test_util.disable_control_flow_v2(\"b/116282023 (IndexedSlices)\")\n def testWhileShapeInferenceIndexedSlices(self):\n with self.cached_session():\n values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name=\"values\")\n indices = constant_op.constant([0, 3], name=\"indices\")\n shape = constant_op.constant([10, 2], name=\"dense_shape\")\n i = constant_op.constant(0)\n x = ops.IndexedSlices(values, indices, dense_shape=shape)\n\n def c(i, _):\n return i < 10\n\n def b(i, x):\n return [\n i + 1,\n ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)\n ]\n\n _, r = control_flow_ops.while_loop(c, b, [i, x])\n self.assertEqual(r.dense_shape.get_shape()[0].value, 2)\n self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))\n\n _, r = control_flow_ops.while_loop(\n c, b, [i, x],\n [i.get_shape(), tensor_shape.TensorShape([None, 2])])\n self.assertEqual(r.dense_shape.get_shape()[0].value, 2)\n self.assertTrue(r.values.get_shape()[0].value is None)\n self.assertEqual(r.values.get_shape()[1].value, 2)\n\n with self.assertRaisesRegexp(ValueError, \"is not compatible with\"):\n _, r = control_flow_ops.while_loop(\n c, b, [i, x],\n [i.get_shape(), tensor_shape.TensorShape([None, 5])])\n\n def _testNestedWhile_1(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n n = constant_op.constant(0)\n\n def cpu_sum(s):\n c = lambda i, s: math_ops.less(i, 10)\n\n def b(i, s):\n i1 = math_ops.add(i, 1)\n with ops.device(\"/cpu:0\"):\n s1 = math_ops.add(i, s)\n return i1, s1\n\n _, r_s = control_flow_ops.while_loop(c, b, [n, s])\n return r_s\n\n c = lambda x: math_ops.less(x, 200)\n b = lambda x: math_ops.add(x, cpu_sum(n))\n r = control_flow_ops.while_loop(c, b, [n])\n self.assertEqual(225, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested while)\")\n def testNestedWhile_1(self):\n self._testNestedWhile_1(use_gpu=False)\n self._testNestedWhile_1(use_gpu=True)\n\n def _testNestedWhile_2(self, use_gpu):\n # Test the cases that A -> Enter and Exit -> A are partitioned.\n with self.test_session(use_gpu=use_gpu):\n s0 = constant_op.constant(2.0)\n\n def inner_loop(s):\n c = lambda s: math_ops.less(s, 20.0)\n\n def b(s):\n s1 = math_ops.add(s, s)\n return s1\n\n r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)\n return r_s\n\n outer_c = lambda x: math_ops.less(x, 3000.0)\n\n def outer_b(x):\n x = logging_ops.Print(x, [x]) # Edge \"Print -> Enter\" is partitioned\n x = inner_loop(x)\n with ops.device(\"/cpu:0\"):\n x = math_ops.square(x) # Edge \"Exit -> Square\" is partitioned\n return x\n\n r = control_flow_ops.while_loop(\n outer_c, outer_b, [s0], parallel_iterations=1)\n self.assertEqual(1048576.0, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested while)\")\n def testNestedWhile_2(self):\n self._testNestedWhile_2(use_gpu=False)\n self._testNestedWhile_2(use_gpu=True)\n\n def testWhileWithControl_1(self):\n with self.cached_session():\n n = constant_op.constant(0)\n r = constant_op.constant(0)\n condition = lambda n_, r_: math_ops.less(n_, 10)\n\n def body(n_, r_):\n n_ = math_ops.add(n_, 1)\n with r_.graph.control_dependencies([r_]):\n r_ = constant_op.constant(12)\n return [n_, r_]\n\n res = control_flow_ops.while_loop(\n condition, body, [n, r], parallel_iterations=1)\n self.assertAllEqual(12, res[1].eval())\n\n def testWhileWithControl_2(self):\n with self.cached_session():\n r = constant_op.constant(0)\n condition = lambda r_: math_ops.less(r_, 10)\n\n def body(r_):\n with r_.graph.control_dependencies([r_]):\n r_ = constant_op.constant(12)\n return [r_]\n\n res = control_flow_ops.while_loop(\n condition, body, [r], parallel_iterations=1)\n self.assertAllEqual(12, res.eval())\n\n def testWhileWithControl_3(self):\n with self.cached_session() as sess:\n b = array_ops.placeholder(dtypes.bool)\n c = constant_op.constant(1)\n x0 = constant_op.constant(0)\n with ops.control_dependencies([b]):\n r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])\n self.assertEqual(10, sess.run(r, {b: True}))\n\n def testWhileWithControl_4(self):\n with self.cached_session() as sess:\n b = array_ops.placeholder(dtypes.bool)\n c = constant_op.constant(1)\n x0 = constant_op.constant(0)\n with ops.control_dependencies([b]):\n r = control_flow_ops.while_loop(\n lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])\n self.assertEqual(10, sess.run(r, {b: True}))\n\n @test_util.disable_control_flow_v2(\"b/79881896 (control_deps)\")\n def testWhileWithControl_5(self):\n with self.cached_session() as sess:\n b = array_ops.placeholder(dtypes.bool)\n c = constant_op.constant(1)\n x0 = constant_op.constant(0)\n\n def body(x):\n with ops.control_dependencies([b]):\n return x + c\n\n r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])\n self.assertEqual(10, sess.run(r, {b: True}))\n\n def testWhileCondWithControl(self):\n # Ensure that no control edges by an outer control dependency context are\n # added to nodes inside cond/while contexts.\n with self.cached_session() as sess:\n const_true = lambda: constant_op.constant(True)\n const_false = lambda: constant_op.constant(False)\n cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)\n body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)\n\n with ops.control_dependencies([control_flow_ops.no_op()]):\n loop = control_flow_ops.while_loop(cond, body,\n (constant_op.constant(5),))\n self.assertEqual(0, sess.run(loop))\n\n @test_util.disable_control_flow_v2(\"b/113324949 (ref vars)\")\n def testWhileCondWithControl_1(self):\n with self.cached_session():\n v = variable_scope.get_variable(\n \"v\", [], initializer=init_ops.constant_initializer(2))\n i0 = constant_op.constant(0)\n with ops.control_dependencies([i0]):\n\n def loop_condition(i):\n return i < 4\n\n def loop_body(i):\n some_cond = control_flow_ops.cond(\n constant_op.constant(True),\n lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)\n with ops.control_dependencies([some_cond]):\n return i + 1\n\n r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))\n variables.global_variables_initializer().run()\n self.assertEqual(4, r.eval())\n self.assertAllClose(65536.0, v.eval())\n\n @test_util.disable_control_flow_v2(\"b/113324949 (ref vars)\")\n def testWhileCondExitControl(self):\n\n with self.cached_session():\n v = variables.Variable(1)\n\n def false_branch():\n cond = lambda i: i < 100\n\n def body(i):\n x = state_ops.assign(v, i)\n return x + 1\n\n loop = control_flow_ops.while_loop(cond, body, [0])\n # Make sure to handle correctly control edge from Exit to a node.\n with ops.control_dependencies([loop]):\n return constant_op.constant(6.0)\n\n r = control_flow_ops.cond(\n constant_op.constant(False), lambda: constant_op.constant(1.0),\n false_branch)\n variables.global_variables_initializer().run()\n self.assertEqual(6.0, r.eval())\n self.assertEqual(99, v.eval())\n\n def testCondWhile_1(self):\n\n with self.cached_session():\n n = ops.convert_to_tensor(0, name=\"n\")\n c = lambda x: math_ops.less(x, 10)\n b = lambda x: math_ops.add(x, 1)\n r = control_flow_ops.cond(\n math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),\n lambda: n)\n self.assertAllEqual(10, r.eval())\n\n def testCondWhile_2(self):\n\n with self.cached_session():\n n = ops.convert_to_tensor(0)\n c = lambda x: math_ops.less(x, 10)\n b = lambda x: math_ops.add(x, 1)\n r = control_flow_ops.cond(\n math_ops.less(1, 0), lambda: math_ops.add(n, 1),\n lambda: control_flow_ops.while_loop(c, b, [n]))\n self.assertAllEqual(10, r.eval())\n\n def _testCondWhile_3(self, use_gpu):\n with self.test_session(use_gpu=use_gpu) as sess:\n p = array_ops.placeholder(dtypes.bool)\n n = constant_op.constant(0.0)\n\n def c(x):\n return math_ops.less(x, 10.0)\n\n def b(x):\n with ops.device(\"/cpu:0\"):\n x1 = math_ops.add(x, 1.0)\n return x1\n\n r = control_flow_ops.cond(p,\n lambda: control_flow_ops.while_loop(c, b, [n]),\n lambda: math_ops.multiply(n, 2.0))\n r1 = gradients_impl.gradients(r, [n])\n self.assertEqual(10., sess.run(r, {p: True}))\n self.assertEqual([1.0], sess.run(r1, {p: True}))\n self.assertEqual(0.0, sess.run(r, {p: False}))\n self.assertEqual([2.0], sess.run(r1, {p: False}))\n\n @test_util.disable_control_flow_v2(\"b/116743589\")\n def testCondWhile_3(self):\n self._testCondWhile_3(use_gpu=False)\n self._testCondWhile_3(use_gpu=True)\n\n def testWhileCond_1(self):\n\n with self.cached_session():\n i = ops.convert_to_tensor(0, name=\"i\")\n n = ops.convert_to_tensor(10, name=\"n\")\n one = ops.convert_to_tensor(1, name=\"one\")\n c = lambda x: math_ops.less(x, n)\n # pylint: disable=undefined-variable\n # for OSS build\n b = lambda x: control_flow_ops.cond(\n constant_op.constant(True),\n lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))\n # pylint: enable=undefined-variable\n r = control_flow_ops.while_loop(c, b, [i])\n self.assertAllEqual(10, r.eval())\n\n def testWhileCond_2(self):\n\n with self.cached_session():\n n = ops.convert_to_tensor(0, name=\"n\")\n c = lambda x: math_ops.less(x, 10)\n b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)\n r = control_flow_ops.while_loop(c, b, [n])\n self.assertAllEqual(10, r.eval())\n\n def testWhileCond_3(self):\n\n with self.cached_session():\n n = ops.convert_to_tensor(0)\n c = lambda x: math_ops.less(x, 10)\n # pylint: disable=undefined-variable\n # for OSS build\n b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),\n lambda: math_ops.add(x, 1),\n lambda: math_ops.subtract(x, 1))\n # pylint: enable=undefined-variable\n r = control_flow_ops.while_loop(c, b, [n])\n self.assertAllEqual(10, r.eval())\n\n # NOTE: It is ok to have parallel_iterations > 1\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileUpdateVariable_1(self):\n with self.cached_session():\n select = variables.Variable([3.0, 4.0, 5.0])\n n = constant_op.constant(0)\n\n def loop_iterator(j):\n return math_ops.less(j, 3)\n\n def loop_body(j):\n ns = state_ops.scatter_update(select, j, 10.0)\n nj = math_ops.add(j, 1)\n op = control_flow_ops.group(ns)\n nj = control_flow_ops.with_dependencies([op], nj)\n return [nj]\n\n r = control_flow_ops.while_loop(\n loop_iterator, loop_body, [n], parallel_iterations=1)\n variables.global_variables_initializer().run()\n self.assertEqual(3, r.eval())\n result = select.eval()\n self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileUpdateVariable_2(self):\n with self.cached_session():\n select1 = variables.Variable([3.0, 4.0, 5.0])\n select2 = variables.Variable([3.0, 4.0, 5.0])\n n = constant_op.constant(0)\n\n def loop_iterator(j):\n return math_ops.less(j, 3)\n\n def loop_body(j):\n ns1 = state_ops.scatter_update(select1, j, 10.0)\n ns2 = state_ops.scatter_update(select2, j, 10.0)\n nj = math_ops.add(j, 1)\n op = control_flow_ops.group(ns1, ns2)\n nj = control_flow_ops.with_dependencies([op], nj)\n return [nj]\n\n r = control_flow_ops.while_loop(\n loop_iterator, loop_body, [n], parallel_iterations=1)\n variables.global_variables_initializer().run()\n self.assertEqual(3, r.eval())\n result1 = select1.eval()\n self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)\n result2 = select2.eval()\n self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileUpdateVariable_3(self):\n with self.cached_session():\n select = variables.Variable([3.0, 4.0, 5.0])\n n = constant_op.constant(0)\n\n def loop_iterator(j, _):\n return math_ops.less(j, 3)\n\n def loop_body(j, _):\n ns = state_ops.scatter_update(select, j, 10.0)\n nj = math_ops.add(j, 1)\n return [nj, ns]\n\n r = control_flow_ops.while_loop(\n loop_iterator,\n loop_body, [n, array_ops.identity(select)],\n parallel_iterations=1)\n variables.global_variables_initializer().run()\n result = r[1].eval()\n self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileUpdateVariable_4(self):\n with self.cached_session():\n var_a = variables.Variable(0, name=\"a\")\n var_b = variables.Variable(0, name=\"b\")\n variables.global_variables_initializer().run()\n\n c = constant_op.constant(0, name=\"c\")\n asn1 = state_ops.assign_add(var_a, 1, name=\"a_add\")\n\n # Loop condition\n def pred(i):\n return math_ops.less(i, 10)\n\n # Loop body\n def loop_body(i):\n asn2 = state_ops.assign_add(var_b, asn1, name=\"b_add\")\n with ops.control_dependencies([asn2]):\n ni = math_ops.add(i, 1, name=\"i_add\")\n return ni\n\n lpa = control_flow_ops.while_loop(\n pred, loop_body, [c], parallel_iterations=1)\n\n self.assertEqual(0, var_b.eval())\n lpa.eval() # Run the loop\n self.assertEqual(10, var_b.eval())\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileUpdateVariable_5(self):\n with self.cached_session():\n # Create some variables.\n var_a = variables.Variable(0, name=\"a\")\n var_b = variables.Variable(0, name=\"b\")\n variables.global_variables_initializer().run()\n\n # Change condition to check var_b\n def pred(_):\n return math_ops.less(var_b, 10)\n\n # Change body to increment var_b\n def loop_body(i):\n asn1 = state_ops.assign_add(\n var_a, constant_op.constant(1), name=\"a_add\")\n asn2 = state_ops.assign_add(\n var_b, constant_op.constant(1), name=\"b_add\")\n with ops.control_dependencies([asn1, asn2]):\n inc_b = array_ops.identity(var_b)\n return inc_b\n\n lpa = control_flow_ops.while_loop(\n pred, loop_body, [var_b], parallel_iterations=1, name=\"loop\")\n\n self.assertEqual(0, var_b.eval())\n lpa.eval() # Run the loop\n self.assertEqual(10, var_a.eval())\n self.assertEqual(10, var_b.eval())\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileUpdateVariable_6(self):\n with self.cached_session():\n # Create some variables.\n var_a = variables.Variable(0, name=\"a\")\n var_b = variables.Variable(0, name=\"b\")\n c = constant_op.constant(0)\n variables.global_variables_initializer().run()\n\n # Loop condition\n def pred(i):\n return math_ops.less(i, 10)\n\n # Loop body\n def loop_body(i):\n asn1 = state_ops.assign_add(var_a, 1, name=\"a_add\")\n with ops.control_dependencies([asn1]):\n asn2 = state_ops.assign_add(var_b, var_a, name=\"b_add\")\n with ops.control_dependencies([asn2]):\n ni = math_ops.add(i, 1, name=\"i_add\")\n return ni\n\n lpa = control_flow_ops.while_loop(\n pred, loop_body, [c], parallel_iterations=1, name=\"loop\")\n\n self.assertEqual(0, var_b.eval())\n lpa.eval() # Run the loop\n self.assertEqual(55, var_b.eval())\n self.assertEqual(10, var_a.eval())\n\n @test_util.disable_control_flow_v2(\"b/116742472 (resource accumulator)\")\n def testWhileQueue_1(self):\n with self.cached_session():\n q = data_flow_ops.FIFOQueue(-1, dtypes.int32)\n i = constant_op.constant(0)\n\n def c(i):\n return math_ops.less(i, 10)\n\n def b(i):\n ni = math_ops.add(i, 1)\n ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)\n return ni\n\n r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)\n self.assertEqual([10], r.eval())\n for i in xrange(10):\n self.assertEqual([i], q.dequeue().eval())\n\n @test_util.disable_control_flow_v2(\"b/117119329 (stack)\")\n def testWhileStack_1(self):\n with self.cached_session():\n s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name=\"foo\")\n i = constant_op.constant(0)\n\n def c(i):\n return math_ops.less(i, 10)\n\n def b(i):\n ni = math_ops.add(i, 1)\n ni = control_flow_ops.with_dependencies(\n [gen_data_flow_ops.stack_push_v2(s, i)], ni)\n return ni\n\n r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)\n\n x = constant_op.constant(0)\n\n def c1(i, _):\n return math_ops.greater(i, 0)\n\n def b1(i, x):\n ni = math_ops.subtract(i, 1)\n nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)\n return [ni, nx]\n\n _, rx = control_flow_ops.while_loop(\n c1,\n b1, [r, x],\n [r.get_shape(), tensor_shape.unknown_shape()],\n parallel_iterations=1)\n self.assertEqual(45, rx.eval())\n\n def _testWhileGrad_ColocateGradients(self, colocate):\n gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(\n ) else \"/device:CPU:0\"\n\n graph = ops.Graph()\n with graph.as_default():\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n\n def b(x):\n with ops.device(gpu_dev_name):\n return math_ops.square(x)\n\n loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)\n r = gradients_impl.gradients(\n loop, v, colocate_gradients_with_ops=colocate)[0]\n\n r_ops = graph.get_operations()\n r_devices = [(op.name, op.device) for op in r_ops]\n\n self.assertTrue(any(\"Square\" in op.name for op in r_ops))\n\n for (name, dev) in r_devices:\n if not colocate and name.endswith(\"Square\"):\n # Only forward graph contain gpu in Square device\n self.assertTrue(gpu_dev_name in dev)\n elif colocate and \"Square\" in name:\n # Forward and backward graphs contain gpu in Square/Square_grad devices\n self.assertTrue(gpu_dev_name in dev)\n else:\n self.assertFalse(gpu_dev_name in dev)\n\n with self.session(graph=graph) as sess:\n self.assertAllClose(1024.0, sess.run(r))\n\n @test_util.disable_control_flow_v2(\"b/116351701 (colocation)\")\n def testWhileGrad_ColocateGradients(self):\n self._testWhileGrad_ColocateGradients(colocate=False)\n self._testWhileGrad_ColocateGradients(colocate=True)\n\n def testWhileGrad_Square(self):\n with self.cached_session():\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n b = math_ops.square\n r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)\n r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)\n\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(1024.0, r.eval())\n\n def testWhileGrad_Shape(self):\n with self.cached_session():\n x = array_ops.placeholder(dtypes.float32, shape=[None])\n v = constant_op.constant([2.0], name=\"v\")\n n = constant_op.constant(0, name=\"n\")\n c = lambda i, v: math_ops.less(i, 5)\n b = lambda i, v: [i + 1, math_ops.multiply(x, v)]\n r = control_flow_ops.while_loop(\n c,\n b, [n, v],\n [n.get_shape(), tensor_shape.unknown_shape()],\n parallel_iterations=1)\n\n r = gradients_impl.gradients(r[1], x)[0]\n self.assertEqual([None], r.get_shape().as_list())\n self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))\n\n def testWhileGrad_BaseShape(self):\n with self.cached_session() as sess:\n x = array_ops.placeholder(dtypes.float32, [None])\n v0 = constant_op.constant([2.0, 2.0], name=\"v\")\n c = lambda v: constant_op.constant(False)\n b = lambda v: math_ops.multiply(v, x)\n r = control_flow_ops.while_loop(c, b, [v0])\n y = math_ops.square(x)\n\n r = gradients_impl.gradients([r, y], x)[0]\n self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))\n\n def testWhileGrad_MultipleUses(self):\n with self.cached_session():\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n b = math_ops.square\n r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)\n r = math_ops.multiply(r, r)\n\n r = gradients_impl.gradients(r, v)[0]\n self.assertEqual(524288.0, r.eval())\n\n def testWhileGrad_LoopAdd(self):\n with self.cached_session():\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n b = math_ops.square\n r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)\n r = math_ops.add(r, r)\n\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(2048.0, r.eval())\n\n def _testWhileGrad_Mul(self, use_gpu, p_iters):\n with self.test_session(use_gpu=use_gpu) as sess:\n a = constant_op.constant(3.0, name=\"a\")\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n b = lambda v: math_ops.multiply(v, a)\n r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)\n\n grad_a, grad_v = gradients_impl.gradients(r, [a, v])\n grad_a_val, grad_v_val = sess.run([grad_a, grad_v])\n self.assertAllClose(216.0, grad_a_val)\n self.assertAllClose(81.0, grad_v_val)\n\n def testWhileGrad_Mul(self):\n self._testWhileGrad_Mul(use_gpu=False, p_iters=1)\n self._testWhileGrad_Mul(use_gpu=False, p_iters=10)\n self._testWhileGrad_Mul(use_gpu=True, p_iters=1)\n self._testWhileGrad_Mul(use_gpu=True, p_iters=10)\n\n def _testNestedWhileCondWhileGrad(self, use_gpu):\n\n with self.test_session(use_gpu=use_gpu):\n v = constant_op.constant(1.0)\n\n def inner_loop(s):\n z = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 4)\n b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]\n return control_flow_ops.while_loop(c, b, [z, s])\n\n c = lambda x: math_ops.less(x, 128.0)\n\n def b(x):\n return control_flow_ops.cond(\n constant_op.constant(True),\n lambda: math_ops.square(inner_loop(x)[1]),\n lambda: math_ops.multiply(x, 2.0))\n\n r = control_flow_ops.while_loop(c, b, [v])\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(512.0, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested while)\")\n def testNestedWhileCondWhileGrad(self):\n self._testNestedWhileCondWhileGrad(use_gpu=False)\n self._testNestedWhileCondWhileGrad(use_gpu=True)\n\n @test_util.disable_control_flow_v2(\"b/116823782\")\n def testWhileGrad_Variable(self):\n with self.cached_session():\n a = variables.Variable(3.0)\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n b = lambda v: math_ops.multiply(v, a)\n r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)\n\n r = gradients_impl.gradients(r, a)\n variables.global_variables_initializer().run()\n self.assertAllClose(216.0, r[0].eval())\n\n def testWhileGradInCond(self):\n\n with self.cached_session():\n n = ops.convert_to_tensor(1.0, name=\"n\")\n x = array_ops.placeholder(dtypes.float32, shape=None)\n c = lambda n: math_ops.less(n, 10.0)\n b = lambda n: math_ops.add(n, x)\n\n def fn1():\n r = control_flow_ops.while_loop(c, b, [n],\n [tensor_shape.unknown_shape()])\n return gradients_impl.gradients(r, x)\n\n r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)\n self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))\n\n @test_util.disable_control_flow_v2(\"b/116340060\")\n def testGradInWhileWrtInitialLoopVal(self):\n with self.cached_session():\n x = array_ops.placeholder(dtypes.float32, shape=(), name=\"x\")\n y = x + 1\n\n def body(i, v):\n z = v * 2\n return i + 1, gradients_impl.gradients(z, x)[0]\n\n with self.assertRaisesRegexp(\n ValueError,\n \"Cannot compute gradient inside while loop with respect to op 'x'. \"\n \"We do not support taking the gradient wrt or through the initial \"\n \"value of a loop variable. Gradients can be computed through \"\n \"loop invariants or wrt the input parameters to the loop body.\"):\n control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested while)\")\n def testWhileGradInWhile(self):\n with self.cached_session():\n n = ops.convert_to_tensor(1.0, name=\"n\")\n x = array_ops.placeholder(dtypes.float32, shape=None)\n c = lambda n: math_ops.less(n, 10.0)\n b = lambda n: math_ops.add(n, x)\n\n def b1(n):\n r = control_flow_ops.while_loop(c, b, [n],\n [tensor_shape.unknown_shape()])\n return gradients_impl.gradients(r, x)\n\n r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],\n [tensor_shape.unknown_shape()])\n self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested while)\")\n def testCondGradInNestedWhiles(self):\n\n def outer_body(i, x):\n _, x = control_flow_ops.while_loop(\n lambda j, x: j < 3, inner_body, [0, 0.0])\n return i + 1, x\n\n def inner_body(j, x):\n y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)\n return j + 1, gradients_impl.gradients(y, x)[0]\n\n i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])\n\n with self.cached_session() as sess:\n i_val, x_val = sess.run([i, x])\n self.assertEqual(i_val, 3)\n self.assertAllClose(x_val, 1.0)\n\n @test_util.disable_control_flow_v2(\"b/116255781 (flat_args)\")\n def testWhile_NestedInput(self):\n with self.cached_session() as sess:\n named = collections.namedtuple(\"named\", (\"a\", \"b\"))\n loop_vars = [\n named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),\n (constant_op.constant(2.0), constant_op.constant(3.0)),\n constant_op.constant(4.0)\n ]\n c = lambda lv0, _1, _2: lv0.a < 100.0\n\n def b(lv0, lv1, lv2):\n lv0 = named(a=lv0.a + 1, b=lv0.b)\n lv1 = (lv1[0] + 1, lv1[1])\n lv2 += 2\n return [lv0, lv1, lv2]\n\n r = control_flow_ops.while_loop(c, b, loop_vars)\n\n self.assertTrue(isinstance(r, list))\n self.assertTrue(isinstance(r[0], named))\n self.assertTrue(isinstance(r[1], tuple))\n self.assertTrue(isinstance(r[2], ops.Tensor))\n\n r_flattened = nest.flatten(r)\n self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],\n sess.run(r_flattened))\n\n @test_util.disable_control_flow_v2(\"b/116255781(flat_args)\")\n def testWhile_NestedBadArityFails(self):\n with self.cached_session():\n named = collections.namedtuple(\"named\", (\"a\", \"b\"))\n loop_vars = [\n named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),\n (constant_op.constant(2.0), constant_op.constant(3.0)),\n constant_op.constant(4.0)\n ]\n c = lambda lv0, _1, _2: lv0.a < 100.0\n\n def b(lv0, lv1, _):\n return [lv0, lv1]\n\n with self.assertRaisesRegexp(ValueError, \"the same number of elements\"):\n control_flow_ops.while_loop(c, b, loop_vars)\n\n def testWhileGrad_ys_xs(self):\n with self.cached_session():\n x = constant_op.constant(3.0, name=\"x\")\n y = constant_op.constant(2.0, name=\"y\")\n\n c = lambda x, y: math_ops.less(x, 100.0)\n\n def b(x, y):\n y1 = math_ops.add(x, y)\n x1 = math_ops.multiply(x, y1)\n return x1, y1\n\n rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)\n\n r = gradients_impl.gradients([rx, ry], x)\n self.assertAllClose(304.0, r[0].eval())\n r = gradients_impl.gradients([rx, ry], y)\n self.assertAllClose(124.0, r[0].eval())\n r = gradients_impl.gradients([rx], x)\n self.assertAllClose(295.0, r[0].eval())\n r = gradients_impl.gradients([rx], y)\n self.assertAllClose(120.0, r[0].eval())\n\n def testWhileGrad_Dependency(self):\n with self.cached_session():\n i = constant_op.constant(0, name=\"i\")\n x = constant_op.constant(2.0, name=\"x\")\n\n c = lambda i, x: math_ops.less(i, 10)\n\n def b(i, x):\n x = math_ops.multiply(x, 2.0)\n i = math_ops.add(i, 1)\n return i, x\n\n ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)\n\n r = gradients_impl.gradients([ri, rx], x)\n self.assertAllClose(1024.0, r[0].eval())\n r = gradients_impl.gradients([rx], x)\n self.assertAllClose(1024.0, r[0].eval())\n\n @test_util.disable_control_flow_v2(\"b/116355153 (back_prop flag)\")\n def testWhileGrad_NoGradient(self):\n with self.cached_session():\n v = constant_op.constant(2.0, name=\"v\")\n c = lambda v: math_ops.less(v, 100.0)\n b = math_ops.square\n r = control_flow_ops.while_loop(c, b, [v], back_prop=False)\n r = math_ops.add(r, v)\n r = gradients_impl.gradients(r, v)\n self.assertAllClose(1.0, r[0].eval())\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileGrad_NoDependency(self):\n with self.cached_session() as sess:\n variable = variables.Variable(array_ops.ones([2, 3]))\n duration = array_ops.zeros([], dtype=dtypes.int32)\n\n def cond(duration, tensor, _):\n del tensor\n return duration < 10\n\n def body(duration, tensor, _):\n return (duration + 1, tensor, tensor)\n\n loop_vars = [duration, variable, variable]\n tensors = control_flow_ops.while_loop(\n cond=cond, body=body, loop_vars=loop_vars)\n cost = math_ops.reduce_sum(tensors[2])\n grad = gradients_impl.gradients(cost, [variable])\n variables.global_variables_initializer().run()\n self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))\n\n def testWhileGrad_Const(self):\n with self.cached_session() as sess:\n c0 = constant_op.constant(0.0, name=\"c0\")\n c1 = constant_op.constant(1.0, name=\"c1\")\n duration = constant_op.constant(0, name=\"t\")\n\n def cond(duration, _):\n return duration < 1\n\n def body(duration, _):\n return duration + 1, c1\n\n loop_vars = [duration, c0]\n tensors = control_flow_ops.while_loop(\n cond=cond, body=body, loop_vars=loop_vars)\n cost = math_ops.reduce_sum(tensors[1])\n grad = gradients_impl.gradients(cost, [c0])\n self.assertAllClose(0.0, sess.run(grad[0]))\n\n def testWhileGrad_SerialTwoLoops(self):\n with self.cached_session():\n i = constant_op.constant(0, name=\"i\")\n x = constant_op.constant(2.0, name=\"x\")\n\n c = lambda i, x: math_ops.less(i, 5)\n\n def b(i, x):\n x = math_ops.multiply(x, 2.0)\n i = math_ops.add(i, 1)\n return i, x\n\n _, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)\n _, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)\n\n r = gradients_impl.gradients([rx], x)\n self.assertAllClose(1024.0, r[0].eval())\n\n def testWhileGrad_ParallelTwoLoops(self):\n with self.cached_session():\n i = constant_op.constant(0, name=\"i\")\n x = constant_op.constant(2.0, name=\"x\")\n\n c = lambda i, x: math_ops.less(i, 5)\n\n def b(i, x):\n x = math_ops.multiply(x, 2.0)\n i = math_ops.add(i, 1)\n return i, x\n\n _, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)\n _, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)\n rx = math_ops.add(r1, r2)\n\n r = gradients_impl.gradients([rx], x)\n self.assertAllClose(64.0, r[0].eval())\n\n def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):\n with self.cached_session():\n i = constant_op.constant(0, name=\"i\")\n x = constant_op.constant(1.0, name=\"x\")\n y = constant_op.constant(1.0, name=\"y\")\n c = lambda i, *_: math_ops.less(i, 1, name=\"cond_less\")\n\n def b(i, xi, yi):\n # return (i + 1, xi, xi + yi)\n return (math_ops.add(i, 1, name=\"inc\"), array_ops.identity(\n xi, name=\"xi\"), math_ops.add(xi, yi, name=\"xi_plus_yi\"))\n\n _, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])\n with ops.control_dependencies([x_f]):\n y_f_d = array_ops.identity(y_f, name=\"y_f_d\")\n\n self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0\n g = gradients_impl.gradients([y_f_d], [x])[0]\n self.assertTrue(g is not None)\n self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0\n\n def _testNestedWhileGrad_Simple(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n v = constant_op.constant(1.0)\n\n def inner_loop(s):\n c = lambda x: math_ops.less(x, 4.0)\n b = lambda x: math_ops.multiply(x, 2.0)\n return control_flow_ops.while_loop(c, b, [s])\n\n c = lambda x: math_ops.less(x, 2.0)\n b = lambda x: math_ops.multiply(inner_loop(x), 2.0)\n r = control_flow_ops.while_loop(c, b, [v])\n\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(8.0, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested)\")\n def testNestedWhileGrad_Simple(self):\n self._testNestedWhileGrad_Simple(use_gpu=False)\n self._testNestedWhileGrad_Simple(use_gpu=True)\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested)\")\n def testNestedWhileGrad_SerialInner(self):\n with self.cached_session():\n v = constant_op.constant(1.0)\n\n def inner_loop1(s):\n z = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 4)\n b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]\n return control_flow_ops.while_loop(c, b, [z, s])\n\n def inner_loop2(s):\n z = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 4)\n b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]\n return control_flow_ops.while_loop(c, b, [z, s])\n\n c = lambda x: math_ops.less(x, 128.0)\n b = lambda x: inner_loop2(inner_loop1(x)[1])[1]\n r = control_flow_ops.while_loop(c, b, [v])\n\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(256.0, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116248044 (nested)\")\n def testNestedWhileGrad_ParallelInner(self):\n with self.cached_session():\n v = constant_op.constant(1.0)\n\n def inner_loop1(s):\n z = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 4)\n b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]\n return control_flow_ops.while_loop(c, b, [z, s])\n\n def inner_loop2(s):\n z = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 4)\n b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]\n return control_flow_ops.while_loop(c, b, [z, s])\n\n c = lambda x: math_ops.less(x, 128.0)\n b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])\n r = control_flow_ops.while_loop(c, b, [v])\n\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(512.0, r.eval())\n\n @test_util.disable_control_flow_v2(\n \"Nested loops and TensorArrays not supported\")\n def testNestedWhileGrad_ParallelIterations(self):\n # Make sure the stack pushes and pops of an inner loop are executed in\n # the sequential order of the iterations of its outer loop.\n with self.cached_session() as sess:\n\n def inner_loop(t):\n fn = lambda n: n + math_ops.square(var)\n return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)\n\n def outer_loop(inp):\n return functional_ops.map_fn(\n fn=inner_loop, elems=inp, parallel_iterations=10)\n\n var = variables.Variable(constant_op.constant(3.0))\n inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])\n res = outer_loop(inp)\n optimizer = adam.AdamOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))\n sess.run(variables.global_variables_initializer())\n sess.run(train_op)\n self.assertAllClose(2.999, var.eval())\n\n def _testWhileCondGrad_Simple(self, use_gpu):\n with self.test_session(use_gpu=use_gpu):\n v = ops.convert_to_tensor(2.0, name=\"v\")\n n = ops.convert_to_tensor(100.0, name=\"n\")\n one = ops.convert_to_tensor(1.0, name=\"one\")\n c = lambda x: math_ops.less(x, n)\n # pylint: disable=undefined-variable\n # for OSS build\n b = lambda x: control_flow_ops.cond(constant_op.constant(True),\n lambda: math_ops.square(x),\n lambda: math_ops.subtract(x, one))\n # pylint: enable=undefined-variable\n r = control_flow_ops.while_loop(c, b, [v])\n r = gradients_impl.gradients(r, v)[0]\n self.assertAllClose(1024.0, r.eval())\n\n def testWhileCondGrad_Simple(self):\n self._testWhileCondGrad_Simple(use_gpu=False)\n if not control_flow_ops.ENABLE_WHILE_V2:\n # TODO(b/117519152): Enable.\n self._testWhileCondGrad_Simple(use_gpu=True)\n\n @test_util.disable_control_flow_v2(\"b/117276490\")\n def testWhileCondGrad_UnknownShape(self):\n with self.cached_session() as sess:\n v = array_ops.placeholder(dtypes.float32)\n n = ops.convert_to_tensor(100.0, name=\"n\")\n one = ops.convert_to_tensor(1.0, name=\"one\")\n c = lambda x: math_ops.less(x, n)\n # pylint: disable=undefined-variable\n # for OSS build\n b = lambda x: control_flow_ops.cond(constant_op.constant(True),\n lambda: math_ops.square(x),\n lambda: math_ops.subtract(x, one))\n # pylint: enable=undefined-variable\n r = control_flow_ops.while_loop(c, b, [v])\n r = gradients_impl.gradients(r, v)[0]\n r = sess.run(r, feed_dict={v: 2.0})\n self.assertAllClose(1024.0, r)\n\n def testWhileGrad_Concat(self):\n with self.cached_session() as sess:\n x = variable_scope.get_variable(\"x\", initializer=[[1., 2.]])\n i0 = constant_op.constant(0)\n h0 = array_ops.zeros([0, 2])\n\n def condition(i, _):\n return i < 2\n\n def body(i, h):\n return i + 1, array_ops.concat([h, x], 0)\n\n _, h = control_flow_ops.while_loop(\n condition, body, [i0, h0],\n [i0.get_shape(), tensor_shape.TensorShape([None, 2])])\n s = math_ops.reduce_sum(h)\n\n sess.run(variables.global_variables_initializer())\n optimizer = gradient_descent.GradientDescentOptimizer(0.01)\n op = optimizer.minimize(s)\n sess.run(op)\n self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileWithRefsWithGradients_1(self):\n with self.cached_session() as sess:\n x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access\n i = constant_op.constant(0)\n c = lambda i, x: math_ops.less(i, 10)\n\n self.assertEqual(x.dtype, dtypes.float32_ref)\n\n def body(i, x):\n self.assertEqual(x.dtype, dtypes.float32_ref)\n return [i + 1, gen_array_ops.ref_identity(x)]\n\n r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)\n\n grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access\n grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)\n\n variables.global_variables_initializer().run()\n\n self.assertEqual(r[0].dtype, dtypes.int32)\n self.assertEqual(r[1].dtype, dtypes.float32_ref)\n\n value_i, value_x, value_x_grad = sess.run(r + grad)\n\n self.assertEqual(10, value_i)\n self.assertEqual(0, value_x)\n self.assertEqual(73, value_x_grad)\n\n @test_util.disable_control_flow_v2(\"b/116282023 (IndexedSlices)\")\n def testWhileGrad_IndexedSlices(self):\n with self.cached_session():\n values = constant_op.constant([2.0, 4.0], name=\"values\")\n indices = constant_op.constant([0, 3], name=\"indices\")\n shape = constant_op.constant([10], name=\"dense_shape\")\n i = constant_op.constant(0)\n x = ops.IndexedSlices(values, indices, dense_shape=shape)\n\n def c(i, _):\n return i < 10\n\n def b(i, x):\n return [\n i + 1,\n ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)\n ]\n\n _, r = control_flow_ops.while_loop(c, b, [i, x])\n r = gradients_impl.gradients(r.values, values)[0]\n self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())\n\n @test_util.disable_control_flow_v2(\"b/116328420 (SparseTensor)\")\n def testWhileGrad_SparseTensor(self):\n with self.cached_session():\n values = constant_op.constant([2.0, 4.0], name=\"values\")\n indices = constant_op.constant(\n [[0], [3]], dtype=dtypes.int64, name=\"indices\")\n shape = constant_op.constant([10], dtype=dtypes.int64, name=\"dense_shape\")\n i = constant_op.constant(0)\n x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)\n\n def c(i, _):\n return i < 10\n\n def b(i, x):\n return [\n i + 1,\n sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)\n ]\n\n _, r = control_flow_ops.while_loop(c, b, [i, x])\n r = gradients_impl.gradients(r.values, values)[0]\n self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())\n\n @test_util.disable_control_flow_v2(\"b/115920078 (gradients)\")\n def testCallGradInLoop(self):\n with self.cached_session() as sess:\n i0 = constant_op.constant(0)\n params = constant_op.constant(5.0)\n params_1 = math_ops.square(params)\n\n def c(i, _):\n return i < 10\n\n def b(i, x):\n data = constant_op.constant([1.0, 2.0, 3.0])\n data = math_ops.multiply(data, params_1)\n x1 = x + gradients_impl.gradients(data, params)[0]\n return i + 1, x1\n\n output_grad = control_flow_ops.while_loop(\n c, b, [i0, constant_op.constant(0.0)])\n self.assertAllClose(600.0, sess.run(output_grad)[1])\n\n @test_util.disable_control_flow_v2(\n \"b/116255781 (flat_args), b/115660901 (TensorArray)\")\n def testWhileAndTensorArray(self):\n with self.cached_session() as sess:\n param = constant_op.constant(2.0)\n n0 = constant_op.constant(0)\n y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name=\"elems\")\n\n def c(i, _):\n return i < 10\n\n def b(i, y):\n return [\n i + 1,\n functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)\n ]\n\n r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)\n r = gradients_impl.gradients(r, param)[0]\n self.assertAllClose(107520.0, sess.run(r))\n\n def testWhileGrad_StopGrad(self):\n with self.cached_session():\n x = constant_op.constant(3.0, name=\"x\")\n y = constant_op.constant(2.0, name=\"y\")\n\n c = lambda x, y: math_ops.less(x, 100.0)\n\n def b(x, y):\n y1 = math_ops.square(y)\n x1 = math_ops.add(math_ops.square(x), y1)\n return x1, y1\n\n rx, ry = control_flow_ops.while_loop(c, b, [x, y])\n\n r = gradients_impl.gradients(rx, y)[0]\n self.assertEqual(136.0, r.eval())\n r = gradients_impl.gradients(ry, y)[0]\n self.assertEqual(32.0, r.eval())\n\n r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]\n self.assertEqual(r, None)\n r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]\n self.assertEqual(r, None)\n\n r = gradients_impl.gradients(\n array_ops.stop_gradient(math_ops.square(rx)), y)[0]\n self.assertEqual(r, None)\n r = gradients_impl.gradients(\n array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]\n self.assertEqual(r, None)\n r = gradients_impl.gradients(\n array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]\n self.assertEqual(r, None)\n\n r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]\n self.assertEqual(168.0, r.eval())\n r = gradients_impl.gradients(\n math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]\n self.assertEqual(136.0, r.eval())\n r = gradients_impl.gradients(\n math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]\n self.assertEqual(32.0, r.eval())\n\n def testWhileGrad_StopGradInside(self):\n with self.cached_session():\n x = constant_op.constant(3.0, name=\"x\")\n y = constant_op.constant(2.0, name=\"y\")\n\n c = lambda x, y: math_ops.less(x, 100.0)\n\n def b(x, y):\n y1 = array_ops.stop_gradient(math_ops.square(y))\n x1 = math_ops.add(math_ops.square(x), y1)\n return x1, y1\n\n rx, _ = control_flow_ops.while_loop(c, b, [x, y])\n\n r = gradients_impl.gradients(rx, y)[0]\n self.assertAllClose(0.0, r.eval())\n r = gradients_impl.gradients(rx, x)[0]\n self.assertAllClose(156.0, r.eval())\n\n def testWhileGrad_StopGradInsideNoShape(self):\n with self.cached_session() as sess:\n x = array_ops.placeholder(dtypes.float32)\n y = array_ops.placeholder(dtypes.float32)\n\n c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)\n\n def b(x, y):\n y1 = array_ops.stop_gradient(math_ops.square(y, name=\"stopped\"))\n x1 = math_ops.add(math_ops.square(x), y1)\n return x1, y1\n\n rx, _ = control_flow_ops.while_loop(c, b, [x, y])\n\n r = gradients_impl.gradients(rx, y)[0]\n feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}\n self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))\n r = gradients_impl.gradients(rx, x)[0]\n self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))\n name = \"gradients/while/stopped_grad\"\n all_ops = x.graph.get_operations()\n self.assertFalse(any([name in op.name for op in all_ops]))\n\n @test_util.disable_control_flow_v2(\"b/116255781 (flat args)\")\n def testWhileGradGradFail(self):\n theta = variables.Variable(initial_value=1.)\n\n def fn(prev, x):\n return prev + x * theta\n\n result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))\n grad_theta = gradients_impl.gradients(result, theta)\n with self.assertRaisesRegexp(TypeError, \"Second-order gradient\"):\n gradients_impl.gradients(grad_theta, theta)\n grad_theta_stopped = array_ops.stop_gradient(grad_theta)\n gradients_impl.gradients(grad_theta_stopped, theta)\n\n def testStopGradOnWhileGrad(self):\n with self.cached_session():\n x = constant_op.constant(2.0, name=\"x\")\n y = constant_op.constant(2.0, name=\"y\")\n\n c = lambda x: math_ops.less(x, 100.0)\n b = lambda x: math_ops.multiply(x, y)\n rx = control_flow_ops.while_loop(c, b, [x])\n\n rg = gradients_impl.gradients(rx, y)[0]\n rg = array_ops.stop_gradient(rg)\n r = math_ops.add(math_ops.square(y), rx)\n r = math_ops.add(r, rg)\n r = gradients_impl.gradients(r, y)[0]\n self.assertEqual(388.0, r.eval())\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileGradientWithNontrainablePath1(self):\n q = variables.Variable([7., 8.])\n\n def cond(_, y):\n del y\n return False\n\n def body(x, _):\n return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)\n\n _, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))\n dy_dq, = gradients_impl.gradients(y, q)\n self.assertIsNotNone(dy_dq)\n with self.cached_session() as sess:\n sess.run(q.initializer)\n self.assertAllClose([0., 0.], sess.run(dy_dq))\n\n @test_util.disable_control_flow_v2(\"b/113324949 (RefVariable)\")\n def testWhileGradientWithNontrainablePath2(self):\n q = variables.Variable([7., 8.])\n\n def cond(_, y):\n return math_ops.equal(y, 0.)\n\n def body(x, _):\n zero = constant_op.constant(0, dtype=dtypes.int64)\n return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)\n\n _, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))\n dy_dq, = gradients_impl.gradients(y, q)\n self.assertIsNotNone(dy_dq)\n with self.cached_session() as sess:\n sess.run(q.initializer)\n self.assertAllClose([1., 1.], sess.run(dy_dq))\n\n @test_util.disable_control_flow_v2(\"b/115920078 (gradients)\")\n def testIssue16504(self):\n c = constant_op.constant(np.arange(100), dtype=dtypes.float32)\n w = variables.Variable(\n initial_value=np.ones(100), dtype=dtypes.float32) / 100\n k = variables.Variable(0, dtype=dtypes.int32)\n chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)\n\n def cond(k, _, chg_w):\n return math_ops.logical_and(k < 10, chg_w > 1e-3)\n\n def body(k, w, chg_w):\n grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)\n w_n = w * math_ops.exp(-0.1 * grad)\n w_n /= math_ops.reduce_sum(w_n)\n chg_w = (\n math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(\n math_ops.abs(w)))\n return k + 1, w_n, chg_w\n\n _, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])\n grad, = gradients_impl.gradients(w, c)\n self.assertIsNotNone(grad)\n\n @test_util.disable_control_flow_v2(\"b/116270461 (resource)\")\n def testStopGradMultiFlows(self):\n with self.cached_session():\n\n def body(i, y, r):\n x = variable_scope.get_variable(\n \"x\",\n shape=(),\n dtype=dtypes.float32,\n initializer=init_ops.ones_initializer())\n y *= x\n return [i + 1, y, r + math_ops.reduce_sum(y)]\n\n i0 = constant_op.constant(0)\n y0 = array_ops.ones(5)\n r0 = constant_op.constant(0.0)\n cond = lambda i, y, r: i < 1\n _, _, r = control_flow_ops.while_loop(\n cond, body, [i0, y0, r0], back_prop=True)\n\n vars_ = variables.global_variables()\n grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])\n z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))\n result = gradients_impl.gradients(z, vars_)[0]\n variables.global_variables_initializer().run()\n self.assertEqual(5.0, result.eval())\n\n def testOneValueCond(self):\n\n with self.cached_session():\n c = array_ops.placeholder(dtypes.int32, shape=[])\n one = ops.convert_to_tensor(1, name=\"one\")\n two = ops.convert_to_tensor(2, name=\"two\")\n p = math_ops.greater_equal(c, 1)\n i = control_flow_ops.cond(p, lambda: one, lambda: two)\n self.assertTrue(isinstance(i, ops.Tensor))\n\n # True case: c = 2 is >= 1\n self.assertEqual([1], i.eval(feed_dict={c: 2}))\n\n # False case: c = 0 is not >= 1\n self.assertEqual([2], i.eval(feed_dict={c: 0}))\n\n def testExampleCond(self):\n\n with self.cached_session():\n x = ops.convert_to_tensor([-2.0, 2.0], name=\"x\")\n d = array_ops.placeholder(dtypes.int32, shape=[])\n\n def l2():\n return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))\n\n def l1():\n return math_ops.reduce_sum(math_ops.abs(x))\n\n i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)\n self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))\n self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))\n\n @test_util.disable_control_flow_v2(\n \"b/112477618 (Operation returned from cond)\")\n def testCase(self):\n with self.cached_session():\n x = constant_op.constant(1)\n y = constant_op.constant(2)\n z = constant_op.constant(3)\n f1 = lambda: constant_op.constant(17)\n f2 = lambda: constant_op.constant(23)\n f3 = lambda: constant_op.constant(-1)\n\n r1 = control_flow_ops.case(\n {\n x < y: f1,\n x > z: f2\n }, default=f3, exclusive=True)\n self.assertAllEqual(r1.eval(), 17)\n\n r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)\n self.assertAllEqual(r2.eval(), 23)\n\n # Duplicate events can happen, first one is selected\n r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)\n self.assertAllEqual(r3.eval(), 17)\n\n # Duplicate events cause an error if exclusive = True\n r4 = control_flow_ops.case(\n [(x < y, f1), (x < y, f2)], default=f3, exclusive=True)\n with self.assertRaisesOpError(\"Input error:\"):\n r4.eval()\n\n # Check that the default is called if none of the others are\n r5 = control_flow_ops.case({x > y: f1}, default=f3)\n self.assertAllEqual(r5.eval(), -1)\n\n ran_once = [False, False, False]\n\n def break_run_twice(ix):\n\n def _break():\n ran_once[ix] = True\n return constant_op.constant(ix)\n\n return _break\n\n # Should not fail - each conditional gets called exactly once\n # except default. Default gets called twice: once to create an\n # empty output and once for the actual cond switch.\n r6 = control_flow_ops.case(\n [(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],\n default=lambda: constant_op.constant(2))\n\n self.assertAllEqual(r6.eval(), 0)\n\n @test_util.disable_control_flow_v2(\n \"b/112477618 (Operation returned from cond)\")\n def testCaseSideEffects(self):\n with self.cached_session() as sess:\n v0 = variables.Variable(-1)\n v1 = variables.Variable(-1)\n v2 = variables.Variable(-1)\n\n a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)\n b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)\n c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)\n\n x = constant_op.constant(1)\n y = constant_op.constant(2)\n\n r0 = control_flow_ops.case(\n ((x < y, a), (x > y, b)), default=c, exclusive=True)\n r1 = control_flow_ops.case(\n ((x > y, a), (x < y, b)), default=c, exclusive=True)\n r2 = control_flow_ops.case(\n ((x > y, a), (x > y, b)), default=c, exclusive=True)\n\n variables.global_variables_initializer().run()\n self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)\n self.assertEqual(2, r2.eval())\n self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])\n\n variables.global_variables_initializer().run()\n self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)\n self.assertEqual(1, r1.eval())\n self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])\n\n variables.global_variables_initializer().run()\n self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)\n self.assertEqual(0, r0.eval())\n self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])\n\n @test_util.disable_control_flow_v2(\"b/113324949 (ref vars)\")\n def testOneOpCond(self):\n with self.cached_session():\n v = variables.Variable(0)\n c = ops.convert_to_tensor(0)\n one = ops.convert_to_tensor(1)\n two = ops.convert_to_tensor(2)\n p = math_ops.greater_equal(c, 1)\n\n def a():\n return state_ops.assign(v, one)\n\n def b():\n return state_ops.assign(v, two)\n\n i = control_flow_ops.cond(p, a, b)\n self.assertTrue(isinstance(i, ops.Tensor))\n variables.global_variables_initializer().run()\n\n self.assertEqual(0, v.eval())\n\n # True case: c = 2 is >= 1, v is set to 1.\n self.assertEqual(1, i.eval(feed_dict={c.name: 2}))\n self.assertEqual(1, v.eval())\n\n # False case: c = 0 is not >= 1, v is set to 2.\n self.assertEqual(2, i.eval(feed_dict={c.name: 0}))\n self.assertEqual(2, v.eval())\n\n def testWithOpsDependencies(self):\n with self.cached_session() as sess:\n v = variables.VariableV1(0.0)\n c = constant_op.constant(10)\n\n # Fetching v directly will result in an uninitialized error\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n sess.run([c, v])\n\n # Use a control dependency to ensure init_variable is run\n # while asking for c\n real_v = control_flow_ops.with_dependencies(\n name=\"real_tensor\",\n output_tensor=v._ref(), # pylint: disable=protected-access\n dependencies=[v.initializer])\n c_val, real_v_val = sess.run([c, real_v])\n\n # Ensure the result of 'real_c' is the same as 'c'\n self.assertAllEqual(10, c_val)\n\n # Ensure that 'v' is initialized\n self.assertAllClose(0.0, real_v_val)\n\n def testWithTensorDependencies(self):\n with self.cached_session():\n v = variables.VariableV1(0.0)\n c1 = constant_op.constant(10)\n c2 = constant_op.constant(20)\n\n # c1_with_init_v depends on the init op for v\n c1_with_init_v = control_flow_ops.with_dependencies(\n name=\"c1_with_init_v\", output_tensor=c1, dependencies=[v.initializer])\n # c2_with_c1 depends on the value of c1_with_init_v\n c2_with_c1_dep = control_flow_ops.with_dependencies(\n name=\"c2_with_c1_dep\",\n output_tensor=c2,\n dependencies=[c1_with_init_v])\n\n # Fetching v directly will result in an uninitialized error\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n v.eval()\n\n # Get the value of 'c2_with_c1_dep', which should cause 'v'\n # to be initialized.\n self.assertAllEqual(20, c2_with_c1_dep.eval())\n\n # Ensure that 'v' is initialized\n self.assertAllClose(0.0, v.eval())\n\n def testWithIndexedSlicesDependencies(self):\n with self.cached_session():\n v = variables.VariableV1(\n np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))\n v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))\n gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)\n v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],\n v_at_1)\n gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,\n v_at_1_after_init.indices)\n\n # Fetching gather_v_at_1 will result in an uninitialized error\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n gather_v_at_1.eval()\n\n # Getting gather_v_at_1_after_init will work, and initialize v.\n self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())\n\n # Double check that 'v' is initialized\n self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())\n\n def testDependenciesDevice(self):\n with ops.Graph().as_default():\n # device set on tensor => same device on dep.\n with ops.device(\"/job:ps\"):\n vd = variables.VariableV1([0.0])\n with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)\n self.assertTrue(\"/job:ps\" in with_vd_dep.device)\n\n # No device set on tensor => no device on dep.\n vnod = variables.VariableV1([0.0])\n with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],\n vnod)\n self.assertDeviceEqual(None, with_vnod_dep.device)\n\n # device set on tensor, default device on graph => default device on dep.\n vdef = variables.VariableV1([0.0], name=\"vdef\")\n with ops.device(\"/job:worker/device:GPU:1\"):\n with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],\n vdef)\n # The device is empty, but the colocation constraint is set.\n self.assertDeviceEqual(\"\", with_vdef_dep.device)\n self.assertEqual([b\"loc:@vdef\"], with_vdef_dep.op.colocation_groups())\n\n def testGroup(self):\n with self.cached_session() as sess:\n v1 = variables.VariableV1([0.0])\n v2 = variables.VariableV1([1.0])\n\n # Group init1 and init2 and run.\n init = control_flow_ops.group(v1.initializer, v2.initializer)\n # Fetching v1 directly will result in an uninitialized error\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n v1.eval()\n\n # Runs \"init\" before fetching v1 and v2.\n init.run()\n v1_val, v2_val = sess.run([v1, v2])\n\n # Ensure that v1 and v2 are initialized\n self.assertAllClose([0.0], v1_val)\n self.assertAllClose([1.0], v2_val)\n\n def testGroupEmpty(self):\n op = control_flow_ops.group()\n self.assertEqual(op.type, \"NoOp\")\n self.assertEqual(op.control_inputs, [])\n\n def testMergeShapes(self):\n # All inputs unknown.\n p1 = array_ops.placeholder(dtypes.float32)\n p2 = array_ops.placeholder(dtypes.float32)\n p3 = array_ops.placeholder(dtypes.float32)\n m, index = control_flow_ops.merge([p1, p2, p3])\n self.assertIs(None, m.get_shape().ndims)\n self.assertEqual([], index.get_shape())\n\n # All inputs known with different ranks.\n p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])\n p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertIs(None, m.get_shape().ndims)\n self.assertEqual([], index.get_shape())\n\n # All inputs known with some dimensions different.\n p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])\n p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertEqual([None, None], m.get_shape().as_list())\n self.assertEqual([], index.get_shape())\n\n p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])\n p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertEqual([None, 2], m.get_shape().as_list())\n self.assertEqual([], index.get_shape())\n\n p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])\n p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertEqual([None, 2], m.get_shape().as_list())\n self.assertEqual([], index.get_shape())\n\n # All inputs known with same dimensions.\n p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])\n p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertEqual([1, 2], m.get_shape().as_list())\n self.assertEqual([], index.get_shape())\n\n p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])\n p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertEqual([None, 2], m.get_shape().as_list())\n self.assertEqual([], index.get_shape())\n\n p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])\n p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])\n m, index = control_flow_ops.merge([p1, p2])\n self.assertEqual([None, None], m.get_shape().as_list())\n self.assertEqual([], index.get_shape())\n\n def testRefSelect(self):\n index = array_ops.placeholder(dtypes.int32)\n\n # All inputs unknown.\n p1 = array_ops.placeholder(dtypes.float32)\n p2 = array_ops.placeholder(dtypes.float32)\n p3 = array_ops.placeholder(dtypes.float32)\n v1 = variables.VariableV1(p1, validate_shape=False)\n v2 = variables.VariableV1(p2, validate_shape=False)\n v3 = variables.VariableV1(p3, validate_shape=False)\n self.assertIs(None, v1.get_shape().ndims)\n s = control_flow_ops.ref_select(index, [v1, v2, v3])\n self.assertIs(None, s.get_shape().ndims)\n\n # All inputs known but different.\n v1 = variables.VariableV1([[1, 2]])\n v2 = variables.VariableV1([[2], [1]])\n s = control_flow_ops.ref_select(index, [v1, v2])\n self.assertIs(None, s.get_shape().ndims)\n\n # All inputs known and same.\n v1 = variables.VariableV1([[1, 2]])\n v2 = variables.VariableV1([[1, 2]])\n s = control_flow_ops.ref_select(index, [v1, v2])\n self.assertEqual([1, 2], s.get_shape())\n\n # Possibly the same but not guaranteed.\n v1 = variables.VariableV1([[1., 2.]])\n p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])\n v2 = variables.VariableV1(p2, validate_shape=False)\n s = control_flow_ops.ref_select(index, [v1, v2])\n self.assertEqual(None, s.get_shape())\n\n def testRunLoopTensor(self):\n with self.cached_session() as sess:\n tensor_list = []\n\n def condition(t):\n return t < constant_op.constant(5)\n\n def body(_):\n tensor_list.append(constant_op.constant(5))\n return constant_op.constant(10)\n\n result = control_flow_ops.while_loop(condition, body,\n [constant_op.constant(4)])\n self.assertEqual(10, sess.run(result))\n\n # Ensure that we cannot run a tensor that escapes the loop body\n # accidentally.\n with self.assertRaises(ValueError):\n sess.run(tensor_list[0])\n\n def testWhilePyFuncBasic(self):\n\n def func(x):\n return np.square(x)\n\n with self.cached_session():\n r = control_flow_ops.while_loop(\n lambda i, v: i < 4,\n lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],\n [constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],\n [tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])\n self.assertEqual(r[1].eval(), 65536.0)\n\n def testWhileFuncBasic(self):\n\n @function.Defun(dtypes.float32)\n def func(x):\n return math_ops.square(math_ops.square(x))\n\n with self.cached_session():\n x = constant_op.constant(2.0, dtypes.float32)\n r = control_flow_ops.while_loop(\n lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],\n [constant_op.constant(0), x],\n [tensor_shape.unknown_shape(),\n tensor_shape.unknown_shape()])\n self.assertEqual(r[1].eval(), 65536.0)\n\n r = gradients_impl.gradients(r, x)[0]\n self.assertEqual(r.eval(), 524288.0)\n # while_v2 does not have stacks.\n if not control_flow_ops.ENABLE_WHILE_V2:\n self.assertEqual(\n len([op for op in x.graph.get_operations() if op.type == \"StackV2\"\n ]), 1)\n\n\nclass ControlFlowContextCheckTest(test.TestCase):\n\n def _getWhileTensor(self):\n \"\"\"Creates and returns a tensor from a while context.\"\"\"\n tensor = []\n\n def body(i):\n if not tensor:\n tensor.append(constant_op.constant(1))\n return i + tensor[0]\n\n control_flow_ops.while_loop(lambda i: i < 10, body, [0])\n return tensor[0]\n\n def _getCondTensor(self):\n cond_tensor = []\n\n def true_fn():\n if not cond_tensor:\n cond_tensor.append(constant_op.constant(1))\n return cond_tensor[0]\n\n control_flow_ops.cond(\n math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))\n return cond_tensor[0]\n\n def testInvalidContext(self):\n # Accessing a while loop tensor outside of control flow is illegal.\n while_tensor = self._getWhileTensor()\n with self.assertRaisesRegexp(\n ValueError,\n \"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' \"\n \"is in a while loop. See info log for more details.\"):\n math_ops.add(1, while_tensor)\n\n def testInvalidContextInCond(self):\n # Accessing a while loop tensor in cond is illegal.\n while_tensor = self._getWhileTensor()\n with self.assertRaisesRegexp(\n ValueError, \"Cannot use 'while/Const_1' as input to 'cond/Add' because \"\n \"'while/Const_1' is in a while loop. See info log for more details.\"):\n # TODO(skyewm): this passes if we return while_tensor directly instead\n # of using it as input to another op.\n control_flow_ops.cond(\n math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),\n lambda: constant_op.constant(0))\n\n def testInvalidContextInWhile(self):\n # Accessing a while loop tensor in a different while loop is illegal.\n while_tensor = self._getWhileTensor()\n with self.assertRaisesRegexp(\n ValueError,\n \"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are \"\n \"in different while loops. See info log for more details.\"):\n control_flow_ops.while_loop(lambda i: i < 10,\n lambda x: math_ops.add(1, while_tensor), [0])\n\n with self.assertRaisesRegexp(\n ValueError,\n \"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' \"\n \"because they are in different while loops. See info log for more \"\n \"details.\"):\n control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])\n\n def testValidCondContext(self):\n # Accessing a tensor from a cond context is OK (although dangerous).\n cond_tensor = self._getCondTensor()\n math_ops.add(1, cond_tensor)\n\n def testValidCondContextBranches(self):\n # Accessing a tensor from a cond context from the other branch's cond\n # context is OK (although dangerous).\n cond_tensor = []\n\n def branch_fn():\n if not cond_tensor:\n cond_tensor.append(constant_op.constant(1))\n return cond_tensor[0]\n\n control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)\n\n def testValidWhileContext(self):\n # Accessing a tensor in a nested while is OK.\n def body(_):\n c = constant_op.constant(1)\n return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])\n\n control_flow_ops.while_loop(lambda i: i < 5, body, [0])\n\n def testValidNestedContexts(self):\n # Accessing a tensor from a cond context in a while context, all inside an\n # outer while context, is OK.\n def body(_):\n cond_tensor = self._getCondTensor()\n # Create another cond containing the while loop for good measure\n return control_flow_ops.cond(\n math_ops.less(1, 2),\n lambda: control_flow_ops.while_loop(lambda i: i < 3,\n lambda i: i + cond_tensor, [0]),\n lambda: constant_op.constant(0))\n\n control_flow_ops.while_loop(lambda i: i < 5, body, [0])\n\n def testInvalidNestedContexts(self):\n # Accessing a tensor from a while context in a different while context, all\n # inside a cond context, is illegal.\n def true_fn():\n while_tensor = self._getWhileTensor()\n return control_flow_ops.while_loop(lambda i: i < 3,\n lambda i: i + while_tensor, [0])\n\n with self.assertRaisesRegexp(\n ValueError,\n \"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because\"\n \" they are in different while loops. See info log for more details.\"):\n control_flow_ops.cond(\n math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))\n\n\nclass TupleTest(test.TestCase):\n\n def testTensors(self):\n for v1_first in [True, False]:\n with self.cached_session():\n v1 = variables.VariableV1([1.0])\n add1 = math_ops.add(\n control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access\n 2.0)\n v2 = variables.VariableV1([10.0])\n add2 = math_ops.add(\n control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access\n 20.0)\n t1, _, t2 = control_flow_ops.tuple([add1, None, add2])\n\n # v1 is not initialized.\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n v1.eval()\n\n # v2 is not initialized.\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n v2.eval()\n\n if v1_first:\n # Getting t1 initializes v2.\n self.assertAllClose([3.0], t1.eval())\n self.assertAllClose([10.0], v2.eval())\n else:\n # Getting t2 initializes v1.\n self.assertAllClose([30.0], t2.eval())\n self.assertAllClose([1.0], v1.eval())\n\n def testIndexedSlices(self):\n for v1_first in [True, False]:\n with self.cached_session():\n v1 = variables.VariableV1(\n np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(\n np.float32))\n v1_at_1 = ops.IndexedSlices(\n control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access\n constant_op.constant([1]))\n\n v2 = variables.VariableV1(\n np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(\n np.float32))\n v2_at_1 = ops.IndexedSlices(\n control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access\n constant_op.constant([1]))\n\n st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])\n g1 = array_ops.gather(st1.values, st1.indices)\n g2 = array_ops.gather(st2.values, st2.indices)\n\n # v1 is not initialized.\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n v1.eval()\n\n # v2 is not initialized.\n with self.assertRaisesOpError(\"Attempting to use uninitialized value\"):\n v2.eval()\n\n if v1_first:\n # Getting g1 initializes v2.\n self.assertAllClose([[10.0, 11.0]], g1.eval())\n self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],\n v2.eval())\n else:\n # Getting g2 initializes v1.\n self.assertAllClose([[10.1, 11.1]], g2.eval())\n self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],\n v1.eval())\n\n def testAcceptTensorsAsControlInputs(self):\n with self.cached_session():\n var = variables.VariableV1(0)\n assign = state_ops.assign(var, 1)\n t, = control_flow_ops.tuple(\n [constant_op.constant(0)], control_inputs=[assign])\n\n # Should trigger the assign.\n t.eval()\n\n self.assertEquals(1, var.eval())\n\n\nclass AssertTest(test.TestCase):\n\n def testGuardedAssertDoesNotCopyWhenTrue(self):\n with self.test_session(use_gpu=True) as sess:\n with ops.device(test.gpu_device_name()):\n value = constant_op.constant(1.0)\n with ops.device(\"/cpu:0\"):\n true = constant_op.constant(True)\n guarded_assert = control_flow_ops.Assert(true, [value], name=\"guarded\")\n unguarded_assert = gen_logging_ops._assert(\n true, [value], name=\"unguarded\")\n opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)\n guarded_metadata = config_pb2.RunMetadata()\n sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)\n unguarded_metadata = config_pb2.RunMetadata()\n sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)\n guarded_nodestat_names = [\n n.node_name\n for d in guarded_metadata.step_stats.dev_stats\n for n in d.node_stats\n ]\n unguarded_nodestat_names = [\n n.node_name\n for d in unguarded_metadata.step_stats.dev_stats\n for n in d.node_stats\n ]\n guarded_memcpy_nodestat_names = [\n n for n in guarded_nodestat_names if \"MEMCPYDtoH\" in n\n ]\n unguarded_memcpy_nodestat_names = [\n n for n in unguarded_nodestat_names if \"MEMCPYDtoH\" in n\n ]\n if \"GPU\" in [d.device_type for d in device_lib.list_local_devices()]:\n # A copy was performed for the unguarded assert\n self.assertLess(0, len(unguarded_memcpy_nodestat_names))\n # No copy was performed for the guarded assert\n self.assertEqual([], guarded_memcpy_nodestat_names)\n\n\nclass WhileOpBenchmark(test.Benchmark):\n \"\"\"Evaluate the performance of while_loop op.\"\"\"\n\n def _getInitVariables(self):\n batch_size = 10\n image_size = 256\n kernel_size = 3\n depth = 16\n\n init_step = constant_op.constant(-1)\n image = variable_scope.get_variable(\n \"image\",\n initializer=random_ops.random_normal(\n [batch_size, image_size, image_size, depth],\n dtype=dtypes.float32,\n stddev=1e-1))\n kernel = variable_scope.get_variable(\n \"weights\",\n initializer=random_ops.truncated_normal(\n [kernel_size, kernel_size, depth, depth],\n dtype=dtypes.float32,\n stddev=1e-1))\n return init_step, image, kernel\n\n def _runOneBenchmark(self,\n default_device,\n num_iters=10,\n static_unroll=False,\n steps=10):\n \"\"\"Evaluate the while loop performance.\n\n Args:\n default_device: The default device to run all ops except the loop_body.\n loop_body is always run on GPU.\n num_iters: Number of iterations to run.\n static_unroll: If true, run unrolled version; otherwise, run while_loop.\n steps: Total number of repeated steps to run the loop.\n\n Returns:\n The duration of the run in seconds.\n \"\"\"\n\n def loop_body(i, x):\n with ops.device(\"/gpu:0\"):\n # Always put loop body on GPU.\n nx = nn_ops.conv2d(\n input=x,\n filter=kernel,\n strides=[1, 1, 1, 1],\n padding=\"SAME\",\n data_format=\"NHWC\",\n name=\"conv2d\")\n ni = math_ops.add(i, 1)\n return ni, nx\n\n ops.reset_default_graph()\n with session.Session() as sess, ops.device(default_device):\n # Get the initial id i, input x, and kernel.\n i, x, kernel = self._getInitVariables()\n sess.run(variables.global_variables_initializer())\n\n if static_unroll:\n for _ in xrange(steps):\n i, x = loop_body(i, x)\n else:\n i, x = control_flow_ops.while_loop(\n lambda i, _: i < steps,\n loop_body, [i, x],\n parallel_iterations=steps,\n swap_memory=True)\n\n r = math_ops.reduce_sum(x)\n dx, dk = gradients_impl.gradients(r, [x, kernel])\n # Use group to avoid fetching back results.\n r = control_flow_ops.group(dx, dk)\n\n for _ in xrange(3):\n # exclude warm up time\n sess.run(r)\n\n start_time = time.time()\n for _ in xrange(num_iters):\n sess.run(r)\n return (time.time() - start_time) / num_iters\n\n def benchmarkWhileOpCrossDevicePlacement(self):\n iters = 10\n # Run loop body on GPU, but other ops on CPU.\n duration = self._runOneBenchmark(\"cpu\", iters, static_unroll=False)\n self.report_benchmark(\n name=\"while_op_cross_device\", iters=iters, wall_time=duration)\n\n def benchmarkWhileOpSameDevicePlacement(self):\n iters = 10\n # Run all ops on the same GPU device.\n duration = self._runOneBenchmark(\"gpu\", iters, static_unroll=False)\n self.report_benchmark(\n name=\"while_op_same_device\", iters=iters, wall_time=duration)\n\n def benchmarkWhileOpUnrollCrossDevicePlacement(self):\n iters = 10\n # Run loop body on GPU, but other ops on CPU.\n duration = self._runOneBenchmark(\"cpu\", iters, static_unroll=True)\n self.report_benchmark(\n name=\"unroll_cross_device_cpu\", iters=iters, wall_time=duration)\n\n def benchmarkWhileOpUnrollSameDevicePlacement(self):\n iters = 10\n # Run all ops on GPU.\n duration = self._runOneBenchmark(\"gpu\", iters, static_unroll=True)\n self.report_benchmark(\n name=\"unroll_same_device\", iters=iters, wall_time=duration)\n\n\n@test_util.with_control_flow_v2\nclass EagerTest(test.TestCase):\n\n def testCond(self):\n with context.eager_mode():\n pred = math_ops.less(1, 2)\n fn1 = lambda: [constant_op.constant(10)]\n fn2 = lambda: [constant_op.constant(20)]\n r = control_flow_ops.cond(pred, fn1, fn2)\n\n self.assertAllEqual(r.numpy(), 10)\n self.assertFalse(isinstance(r, list))\n\n # TODO(b/117279927): Re-enable once msan failure is fixed.\n def DISABLED_testCondInDefun(self):\n with context.eager_mode():\n\n @eager_function.defun\n def foo(pred):\n # TODO(b/111124878): this only needs to output one element.\n fn1 = lambda: (constant_op.constant(10), constant_op.constant(100))\n fn2 = lambda: (constant_op.constant(20), constant_op.constant(200))\n return control_flow_ops.cond(constant_op.constant(pred), fn1, fn2)\n\n r = foo(True)\n self.assertAllEqual(r[0].numpy(), 10)\n self.assertNotIsInstance(r, list)\n\n r = foo(False)\n self.assertAllEqual(r[0].numpy(), 20)\n self.assertFalse(isinstance(r, list))\n\n def testWhileLoop(self):\n with context.eager_mode():\n tensor = constant_op.constant([1, 2, 3, 4, 5])\n self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])\n\n def testWhileLoopWithMaxIterations(self):\n with context.eager_mode():\n tensor = constant_op.constant([1, 2, 3, 4, 5])\n self.assertAllEqual(\n isum(tensor, maximum_iterations=3).numpy(),\n [1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])\n\n def testWhileWithMaximumIterationsAndSingleArgument(self):\n with context.eager_mode():\n tensor = constant_op.constant(0)\n r = control_flow_ops.while_loop(\n lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)\n self.assertEqual(1, r.numpy())\n\n def testWithDependencies(self):\n with context.eager_mode():\n t1 = constant_op.constant(1)\n t2 = constant_op.constant(2)\n t3 = control_flow_ops.with_dependencies(t1, t2)\n self.assertAllEqual(t2.numpy(), t3.numpy())\n\n def testTuple(self):\n with context.eager_mode():\n t1 = constant_op.constant(1)\n t2 = constant_op.constant(2)\n tup1, tup2 = control_flow_ops.tuple([t1, t2])\n self.assertAllEqual(t1.numpy(), tup1.numpy())\n self.assertAllEqual(t2.numpy(), tup2.numpy())\n\n def testCase(self):\n with context.eager_mode():\n x = constant_op.constant(1)\n y = constant_op.constant(2)\n z = constant_op.constant(3)\n f1 = lambda: constant_op.constant(17)\n f2 = lambda: constant_op.constant(23)\n f3 = lambda: constant_op.constant(-1)\n\n r1 = control_flow_ops.case(\n [(x < y, f1), (x > z, f2)], default=f3, exclusive=True)\n self.assertAllEqual(r1.numpy(), 17)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Core conversion logic, serves as main point of access.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport imp\n\nimport gast\n\nfrom tensorflow.python.autograph import operators\nfrom tensorflow.python.autograph import utils\nfrom tensorflow.python.autograph.converters import asserts\nfrom tensorflow.python.autograph.converters import break_statements\nfrom tensorflow.python.autograph.converters import builtin_functions\nfrom tensorflow.python.autograph.converters import call_trees\nfrom tensorflow.python.autograph.converters import conditional_expressions\nfrom tensorflow.python.autograph.converters import continue_statements\nfrom tensorflow.python.autograph.converters import control_flow\nfrom tensorflow.python.autograph.converters import decorators\nfrom tensorflow.python.autograph.converters import directives\nfrom tensorflow.python.autograph.converters import error_handlers\nfrom tensorflow.python.autograph.converters import function_scopes\nfrom tensorflow.python.autograph.converters import lists\nfrom tensorflow.python.autograph.converters import logical_expressions\nfrom tensorflow.python.autograph.converters import return_statements\nfrom tensorflow.python.autograph.converters import side_effect_guards\nfrom tensorflow.python.autograph.converters import slices\nfrom tensorflow.python.autograph.core import config\nfrom tensorflow.python.autograph.core import converter\nfrom tensorflow.python.autograph.core import errors\nfrom tensorflow.python.autograph.core import function_wrapping\nfrom tensorflow.python.autograph.pyct import ast_util\nfrom tensorflow.python.autograph.pyct import inspect_utils\nfrom tensorflow.python.autograph.pyct import origin_info\nfrom tensorflow.python.autograph.pyct import parser\nfrom tensorflow.python.autograph.pyct import qual_names\nfrom tensorflow.python.autograph.pyct import templates\nfrom tensorflow.python.autograph.pyct import transformer\nfrom tensorflow.python.util import tf_inspect\n\n\n# TODO(mdan): Might we not need any renaming at all?\n\n\ndef is_whitelisted_for_graph(o):\n \"\"\"Check whether an entity is whitelisted for use in graph mode.\n\n Examples of whitelisted entities include all members of the tensorflow\n package.\n\n Args:\n o: A Python entity.\n Returns:\n Boolean\n \"\"\"\n m = tf_inspect.getmodule(o)\n for prefix, in config.DEFAULT_UNCOMPILED_MODULES:\n if m.__name__.startswith(prefix):\n return True\n if hasattr(o, 'autograph_info__'):\n return True\n return False\n\n\ndef entity_to_graph(o, program_ctx, arg_values, arg_types):\n \"\"\"Compile a Python entity into equivalent TensorFlow.\n\n The function will also recursively compile all the entities that `o`\n references, updating `dependency_cache`.\n\n This function is reentrant, and relies on dependency_cache to avoid\n generating duplicate code.\n\n Args:\n o: A Python entity.\n program_ctx: A ProgramContext object.\n arg_values: A dict containing value hints for symbols like function\n parameters.\n arg_types: A dict containing type hints for symbols like function\n parameters.\n\n Returns:\n A tuple (ast, new_name, namespace):\n * ast: An AST representing an entity with interface equivalent to `o`,\n but which when executed it creates TF a graph.\n * new_name: The symbol name under which the new entity can be found.\n * namespace: A dict mapping all symbols visible to the converted entity,\n keyed by their symbol name.\n\n Raises:\n ValueError: if the entity type is not supported.\n \"\"\"\n if tf_inspect.isclass(o):\n node, name, ns = class_to_graph(o, program_ctx)\n elif tf_inspect.isfunction(o):\n # TODO(mdan): This is not a reliable mechanism.\n # The most reliable way is to check the source code, the AST will contain\n # a Lambda node instead of a FunctionDef\n if o.__name__ == '<lambda>':\n raise NotImplementedError(\n 'lambda functions are not yet supported; declare the function'\n ' using def instead: %s' % o)\n else:\n node, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)\n elif tf_inspect.ismethod(o):\n node, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)\n # TODO(mdan,yashkatariya): Remove when object conversion is implemented.\n elif hasattr(o, '__class__'):\n raise NotImplementedError(\n 'Object conversion is not yet supported. If you are '\n 'trying to convert code that uses an existing object, '\n 'try including the creation of that object in the '\n 'conversion. For example, instead of converting the method '\n 'of a class, try converting the entire class instead. '\n 'See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/'\n 'contrib/autograph/README.md#using-the-functional-api '\n 'for more information.')\n else:\n raise ValueError(\n 'Entity \"%s\" has unsupported type \"%s\". Only functions and classes are '\n 'supported for now.' % (o, type(o)))\n\n # TODO(mdan): This is temporary. it should be created using a converter.\n # TODO(mdan): The attribute should be added with a helper, not directly.\n # The helper can ensure there are no collisions.\n template = '''\n entity.autograph_info__ = {}\n '''\n node.extend(templates.replace(template, entity=name))\n\n program_ctx.add_to_cache(o, node)\n\n if program_ctx.options.recursive:\n while True:\n candidate = None\n for obj in program_ctx.name_map.keys():\n if obj not in program_ctx.dependency_cache:\n candidate = obj\n break\n if candidate is None:\n break\n if (hasattr(candidate, 'im_class') and\n getattr(candidate, 'im_class') not in program_ctx.partial_types):\n # Class members are converted with their objects, unless they're\n # only converted partially.\n continue\n entity_to_graph(candidate, program_ctx, {}, {})\n\n return node, name, ns\n\n\ndef class_to_graph(c, program_ctx):\n \"\"\"Specialization of `entity_to_graph` for classes.\"\"\"\n converted_members = {}\n method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)\n members = tf_inspect.getmembers(c, predicate=method_filter)\n if not members:\n raise ValueError('Cannot convert %s: it has no member methods.' % c)\n\n class_namespace = {}\n for _, m in members:\n # Only convert the members that are directly defined by the class.\n if inspect_utils.getdefiningclass(m, c) is not c:\n continue\n node, _, namespace = function_to_graph(\n m,\n program_ctx=program_ctx,\n arg_values={},\n arg_types={'self': (c.__name__, c)},\n owner_type=c,\n rewrite_errors=False)\n if class_namespace is None:\n class_namespace = namespace\n else:\n class_namespace.update(namespace)\n converted_members[m] = node[0]\n namer = program_ctx.new_namer(class_namespace)\n class_name = namer.compiled_class_name(c.__name__, c)\n\n # TODO(mdan): This needs to be explained more thoroughly.\n # Process any base classes: if the superclass if of a whitelisted type, an\n # absolute import line is generated. Otherwise, it is marked for conversion\n # (as a side effect of the call to namer.compiled_class_name() followed by\n # program_ctx.update_name_map(namer)).\n output_nodes = []\n renames = {}\n base_names = []\n for base in c.__bases__:\n if isinstance(object, base):\n base_names.append('object')\n continue\n if is_whitelisted_for_graph(base):\n alias = namer.new_symbol(base.__name__, ())\n output_nodes.append(\n gast.ImportFrom(\n module=base.__module__,\n names=[gast.alias(name=base.__name__, asname=alias)],\n level=0))\n else:\n # This will trigger a conversion into a class with this name.\n alias = namer.compiled_class_name(base.__name__, base)\n base_names.append(alias)\n renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)\n program_ctx.update_name_map(namer)\n\n # Generate the definition of the converted class.\n bases = [gast.Name(n, gast.Load(), None) for n in base_names]\n class_def = gast.ClassDef(\n class_name,\n bases=bases,\n keywords=[],\n body=list(converted_members.values()),\n decorator_list=[])\n # Make a final pass to replace references to the class or its base classes.\n # Most commonly, this occurs when making super().__init__() calls.\n # TODO(mdan): Making direct references to superclass' superclass will fail.\n class_def = qual_names.resolve(class_def)\n renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)\n class_def = ast_util.rename_symbols(class_def, renames)\n\n output_nodes.append(class_def)\n\n return output_nodes, class_name, class_namespace\n\n\ndef _add_reserved_symbol(namespace, name, entity):\n if name not in namespace:\n namespace[name] = entity\n elif namespace[name] != entity:\n raise ValueError('The name \"%s\" is reserved and may not be used.' % name)\n\n\nag_internal = None\n\n\ndef _add_self_references(namespace, autograph_module):\n \"\"\"Adds namespace references to the module that exposes the api itself.\"\"\"\n global ag_internal\n if ag_internal is None:\n # Craft a module that exposes parts of the external API as well as certain\n # internal modules.\n ag_internal = imp.new_module('autograph')\n ag_internal.converted_call = autograph_module.converted_call\n ag_internal.ConversionOptions = converter.ConversionOptions\n ag_internal.utils = utils\n ag_internal.function_scope = function_wrapping.function_scope\n ag_internal.rewrite_graph_construction_error = (\n errors.rewrite_graph_construction_error)\n # TODO(mdan): Add safeguards against name clashes.\n # We don't want to create a submodule because we want the operators to be\n # accessible as ag__.<operator>\n ag_internal.__dict__.update(operators.__dict__)\n\n _add_reserved_symbol(namespace, 'ag__', ag_internal)\n\n\ndef function_to_graph(f,\n program_ctx,\n arg_values,\n arg_types,\n owner_type=None,\n rewrite_errors=True):\n \"\"\"Specialization of `entity_to_graph` for callable functions.\"\"\"\n\n node, source = parser.parse_entity(f)\n node = node.body[0]\n origin_info.resolve(node, source, f)\n namespace = inspect_utils.getnamespace(f)\n _add_self_references(namespace, program_ctx.autograph_module)\n namer = program_ctx.new_namer(namespace)\n\n entity_info = transformer.EntityInfo(\n source_code=source,\n source_file='<fragment>',\n namespace=namespace,\n arg_values=arg_values,\n arg_types=arg_types,\n owner_type=owner_type)\n context = converter.EntityContext(namer, entity_info, program_ctx)\n node = node_to_graph(node, context, rewrite_errors=rewrite_errors)\n\n # TODO(mdan): This somewhat duplicates the call rename logic in call_trees.py\n new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)\n if not did_rename:\n new_name = f.__name__\n if node.name != f.__name__:\n raise NotImplementedError('Strange corner case. Send us offending code!')\n node.name = new_name\n\n program_ctx.update_name_map(namer)\n # TODO(mdan): Use this at compilation.\n\n return [node], new_name, namespace\n\n\ndef node_to_graph(node, context, rewrite_errors=True):\n \"\"\"Convert Python code to equivalent TF graph mode code.\n\n Args:\n node: AST, the code to convert.\n context: converter.EntityContext\n rewrite_errors: Boolean, whether or not to rewrite the error traceback.\n\n Returns:\n A tuple (node, deps):\n * node: A Python ast node, representing the converted code.\n * deps: A set of strings, the fully qualified names of entity\n dependencies that this node has.\n \"\"\"\n # TODO(mdan): Insert list_comprehensions somewhere.\n\n node = converter.standard_analysis(node, context, is_initial=True)\n # Past this point, line numbers are no longer accurate so we ignore the\n # source.\n # TODO(mdan): Is it feasible to reconstruct intermediate source code?\n context.info.source_code = None\n\n node = converter.apply_(node, context, decorators)\n node = converter.apply_(node, context, directives)\n node = converter.apply_(node, context, break_statements)\n node = converter.apply_(node, context, asserts)\n # Note: sequencing continue canonicalization before for loop one avoids\n # dealing with the extra loop increment operation that the for\n # canonicalization creates.\n node = converter.apply_(node, context, continue_statements)\n context.info.namespace['len'] = len\n node = converter.apply_(node, context, return_statements)\n node = converter.apply_(node, context, lists)\n node = converter.apply_(node, context, slices)\n node = converter.apply_(node, context, builtin_functions)\n node = converter.apply_(node, context, call_trees)\n node = converter.apply_(node, context, control_flow)\n node = converter.apply_(node, context, conditional_expressions)\n node = converter.apply_(node, context, logical_expressions)\n node = converter.apply_(node, context, side_effect_guards)\n node = converter.apply_(node, context, function_scopes)\n if rewrite_errors:\n node = converter.apply_(node, context, error_handlers)\n return node\n"
] |
[
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.ops.gen_array_ops.ref_identity",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.gen_state_ops.variable",
"tensorflow.python.ops.array_ops.stop_gradient",
"numpy.square",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.ops.control_flow_ops.ref_switch",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.control_flow_ops._Identity",
"tensorflow.python.ops.control_flow_ops.loop_cond",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.training.adam.AdamOptimizer",
"numpy.array",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.ops.gen_logging_ops._assert",
"tensorflow.python.platform.test.gpu_device_name",
"tensorflow.python.ops.state_ops.scatter_update",
"tensorflow.python.ops.control_flow_ops.switch",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.math_ops.greater_equal",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.ops.gen_data_flow_ops.stack_v2",
"tensorflow.python.ops.math_ops.logical_and",
"tensorflow.python.ops.control_flow_ops._Enter",
"tensorflow.python.ops.gen_data_flow_ops.stack_pop_v2",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.ops.gen_control_flow_ops.enter",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.gen_data_flow_ops.stack_push_v2",
"tensorflow.python.ops.variables.global_variables",
"tensorflow.python.ops.nn_ops.conv2d",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.control_flow_ops.case",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.ops.functional_ops.map_fn",
"numpy.ones",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.control_flow_ops.ref_select",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.math_ops.argmin",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"tensorflow.python.ops.state_ops.is_variable_initialized",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.ops.control_flow_ops.merge",
"tensorflow.core.protobuf.config_pb2.OptimizerOptions",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.logging_ops.Print",
"tensorflow.python.ops.control_flow_ops.next_iteration",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.control_flow_ops.XLAControlFlowContext",
"tensorflow.python.ops.control_flow_ops.Assert",
"tensorflow.python.framework.test_util.disable_control_flow_v2",
"numpy.arange",
"tensorflow.python.ops.init_ops.ones_initializer",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.resource_variable_ops.assign_variable_op",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.script_ops.py_func",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.control_flow_ops.exit",
"tensorflow.python.ops.random_ops.random_normal",
"tensorflow.python.ops.control_flow_ops.tuple",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.util.tf_inspect.isclass",
"tensorflow.python.util.tf_inspect.ismethod",
"tensorflow.python.util.tf_inspect.getmodule",
"tensorflow.python.autograph.pyct.origin_info.resolve",
"tensorflow.python.autograph.pyct.templates.replace",
"tensorflow.python.autograph.pyct.parser.parse_entity",
"tensorflow.python.autograph.pyct.ast_util.rename_symbols",
"tensorflow.python.autograph.pyct.qual_names.resolve",
"tensorflow.python.autograph.pyct.transformer.EntityInfo",
"tensorflow.python.autograph.core.converter.EntityContext",
"tensorflow.python.autograph.core.converter.standard_analysis",
"tensorflow.python.util.tf_inspect.getmembers",
"tensorflow.python.util.tf_inspect.isfunction",
"tensorflow.python.autograph.pyct.inspect_utils.getdefiningclass",
"tensorflow.python.autograph.core.converter.apply_",
"tensorflow.python.autograph.pyct.inspect_utils.getnamespace",
"tensorflow.python.autograph.pyct.qual_names.QN"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"2.2",
"1.12"
]
}
] |
CORE-Robotics-Lab/Cross_Loss_Influence_Functions
|
[
"6f0fa45f8896cd6c238c143eca6ddebef97b642c",
"6f0fa45f8896cd6c238c143eca6ddebef97b642c",
"6f0fa45f8896cd6c238c143eca6ddebef97b642c"
] |
[
"cross_loss_influence/models/skip_gram_word2vec.py",
"cross_loss_influence/helpers/test_set_alignments.py",
"cross_loss_influence/helpers/bolukbasi_prior_work/we.py"
] |
[
"# Created by Andrew Silva\n\"\"\"\nAdapted from https://github.com/Adoni/word2vec_pytorch\n\n\"\"\"\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass SkipGramModel(nn.Module):\n \"\"\"Skip gram model of word2vec.\n Attributes:\n emb_size: Embedding size.\n emb_dimention: Embedding dimention, typically from 50 to 500.\n u_embedding: Embedding for center word.\n v_embedding: Embedding for neibor words.\n \"\"\"\n\n def __init__(self, vocab_size, embedding_dim, sparse=True):\n \"\"\"Initialize model parameters.\n Args:\n vocab_size: size of vocabulary.\n embedding_dim: size of each embedding\n \"\"\"\n super(SkipGramModel, self).__init__()\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n # self.u_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim, sparse=True)\n # self.v_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim, sparse=True)\n self.u_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim, sparse=sparse)\n self.v_embeddings = nn.Embedding(self.vocab_size, self.embedding_dim, sparse=sparse)\n init_range = 1/np.sqrt(vocab_size + embedding_dim)\n self.u_embeddings.weight.data.uniform_(-init_range, init_range)\n self.v_embeddings.weight.data.uniform_(-0, 0)\n self.log_sig = nn.LogSigmoid()\n\n def forward(self, targets, contexts, negatives):\n \"\"\"\n Args:\n targets: target word ids\n contexts: context word ids\n negatives: negative word ids\n Returns:\n negative sampling loss\n \"\"\"\n emb_u = self.u_embeddings(targets)\n emb_v = self.v_embeddings(contexts)\n target_context = torch.mul(emb_u, emb_v)\n target_context = torch.sum(target_context, dim=1)\n target_context = self.log_sig(target_context)\n\n neg_emb_v = self.v_embeddings(negatives) # .permute(1, 0, 2) # Move batch dimension to the front\n neg_score = torch.bmm(neg_emb_v, emb_u.unsqueeze(2)).squeeze()\n neg_score = F.logsigmoid(-1 * neg_score)\n\n # return -1 * (torch.sum(target_context)+torch.sum(neg_score))\n return -(torch.mean(target_context) + torch.mean(neg_score))/2\n\n def forward_no_negatives(self, targets, contexts):\n \"\"\"\n Args:\n targets: target word ids\n contexts: context word ids\n negatives: negative word ids\n Returns:\n negative sampling loss\n \"\"\"\n emb_u = self.u_embeddings(targets)\n emb_v = self.v_embeddings(contexts)\n target_context = torch.mul(emb_u, emb_v)\n target_context = torch.sum(target_context, dim=1)\n target_context = self.log_sig(target_context)\n\n return -(torch.mean(target_context))\n\n def predict(self, token):\n return self.u_embeddings(token)\n\n def predict_diff(self, token1, token2):\n return self.u_embeddings(token1) - self.v_embeddings(token2)\n\n\n\n\n",
"# Created by Andrew Silva\n\"\"\"\nThis is basically weat.py but for my own personal tests\n\"\"\"\nfrom cross_loss_influence.helpers import weat\nfrom cross_loss_influence.helpers.sklearn_cluster_embeddings import get_embeddings\nimport numpy as np\n\n\ndef similarity_diff_sing(word, attrs_A):\n cos_attr_one = []\n for a_A in attrs_A:\n cos_attr_one.append(weat.cos_diff(word, a_A))\n return np.mean(cos_attr_one)\n\nif __name__ == \"__main__\":\n window_size = 10\n negatives=10\n checkpoint='60'\n embeddings, all_keys = get_embeddings(model_fn=f'DENSE_neutral_window-{window_size}_negatives-{negatives}_{checkpoint}_checkpoint.pth.tar',\n vocab_fn='biased_data_stoi.pkl')\n\n X = ['josh', 'alan', 'ryan', 'andrew', 'jack', 'greg', 'amanda', 'katie', 'nancy', 'ellen']\n Y = ['theo', 'jerome', 'leroy', 'lamar', 'lionel', 'malik', 'tyrone', 'ebony', 'jasmine', 'tia', ]\n A = ['jew', 'jewish', 'islam', 'religion', 'islamic', 'muslim']\n\n x_embed = [embeddings[all_keys.index(x)] for x in X]\n y_embed = [embeddings[all_keys.index(x)] for x in Y]\n a_embed = [embeddings[all_keys.index(x)] for x in A]\n mean_one = []\n mean_two = []\n std_all = []\n for x, y in zip(x_embed, y_embed):\n m1 = similarity_diff_sing(x, a_embed)\n m2 = similarity_diff_sing(y, a_embed)\n mean_one.append(m1)\n mean_two.append(m2)\n std_all.append(m1)\n std_all.append(m2)\n print(f\"Average European similarity to religion: {np.mean(mean_one)}\")\n print(f\"Average African similarity to religion: {np.mean(mean_two)}\")\n effect_size = (np.mean(mean_one) - np.mean(mean_two)) / np.std(std_all)\n print(f\"Effect = {effect_size}\")\n\n window_size = 3\n negatives=5\n checkpoint='last'\n embeddings, all_keys = get_embeddings(model_fn=f'DENSE_scifi_window-{window_size}_negatives-{negatives}_{checkpoint}_checkpoint.pth.tar',\n vocab_fn='all_scripts_stoi.pkl')\n\n X = ['anakin', 'yoda', 'kanan', 'ezra', 'ahsoka']\n Y = ['vader', 'sidious', 'dooku', 'maul', 'inquisitor']\n A = ['sith', 'evil', 'anger', 'hate', 'fear']\n B = ['jedi', 'good', 'defense', 'knowledge', 'peace']\n\n x_embed = [embeddings[all_keys.index(x)] for x in X]\n y_embed = [embeddings[all_keys.index(x)] for x in Y]\n a_embed = [embeddings[all_keys.index(x)] for x in A]\n b_embed = [embeddings[all_keys.index(x)] for x in B]\n mean_one = []\n mean_two = []\n std_all = []\n for x, y in zip(x_embed, y_embed):\n m1 = weat.similarity_diff(x, a_embed, b_embed)\n m2 = weat.similarity_diff(y, a_embed, b_embed)\n mean_one.append(m1)\n mean_two.append(m2)\n std_all.append(m1)\n std_all.append(m2)\n print(f\"Average Anakin similarity to Sith-Jedi: {np.mean(mean_one)}\")\n print(f\"Average Vader similarity to Sith-Jedi: {np.mean(mean_two)}\")\n print(np.std(std_all))\n mean_one = []\n mean_two = []\n std_all = []\n for x, y in zip(x_embed, y_embed):\n m1 = similarity_diff_sing(x, a_embed)\n m2 = similarity_diff_sing(y, a_embed)\n mean_one.append(m1)\n mean_two.append(m2)\n std_all.append(m1)\n std_all.append(m2)\n print(f\"Average Anakin similarity to Sith: {np.mean(mean_one)}\")\n print(f\"Average Vader similarity to Sith: {np.mean(mean_two)}\")\n mean_one = []\n mean_two = []\n std_all = []\n for x, y in zip(x_embed, y_embed):\n m1 = similarity_diff_sing(x, b_embed)\n m2 = similarity_diff_sing(y, b_embed)\n mean_one.append(m1)\n mean_two.append(m2)\n std_all.append(m1)\n std_all.append(m2)\n print(f\"Average Anakin similarity to Jedi: {np.mean(mean_one)}\")\n print(f\"Average Vader similarity to Jedi: {np.mean(mean_two)}\")\n test_stat = weat.test_statistic(x_embed, y_embed, a_embed, b_embed)\n effect = weat.effect_size(x_embed, y_embed, a_embed, b_embed)\n p_val = weat.p_value(x_embed, y_embed, a_embed, b_embed)\n effect_size = (np.mean(mean_one) - np.mean(mean_two)) / np.std(std_all)\n print(f\"Test statistic = {test_stat} || Effect = {effect} || P = {p_val}\")\n",
"from __future__ import print_function, division\nimport re\nimport sys\nfrom cross_loss_influence.helpers.sklearn_cluster_embeddings import get_embeddings\nimport numpy as np\nimport scipy.sparse\nfrom sklearn.decomposition import PCA\nif sys.version_info[0] < 3:\n import io\n open = io.open\nelse:\n unicode = str\n\"\"\"\nTools for debiasing word embeddings\n\nMan is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings\nTolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai\n2016\n\"\"\"\n\nDEFAULT_NUM_WORDS = 27000\nFILENAMES = {\"g_wiki\": \"glove.6B.300d.small.txt\",\n \"g_twitter\": \"glove.twitter.27B.200d.small.txt\",\n \"g_crawl\": \"glove.840B.300d.small.txt\",\n \"w2v\": \"GoogleNews-word2vec.small.txt\",\n \"w2v_large\": \"GoogleNews-word2vec.txt\"}\n\n\ndef dedup(seq):\n seen = set()\n return [x for x in seq if not (x in seen or seen.add(x))]\n\n\ndef safe_word(w):\n # ignore words with numbers, etc.\n # [a-zA-Z\\.'_\\- :;\\(\\)\\]] for emoticons\n return (re.match(r\"^[a-z_]*$\", w) and len(w) < 20 and not re.match(r\"^_*$\", w))\n\n\ndef to_utf8(text, errors='strict', encoding='utf8'):\n \"\"\"Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8.\"\"\"\n if isinstance(text, unicode):\n return text.encode('utf8')\n # do bytestring -> unicode -> utf8 full circle, to ensure valid utf8\n return unicode(text, encoding, errors=errors).encode('utf8')\n\n\nclass WordEmbedding:\n def __init__(self, fname, given_data=None):\n self.thresh = None\n self.max_words = None\n self.desc = fname\n print(\"*** Reading data from \" + fname)\n if fname.endswith(\".bin\"):\n import gensim.models\n model =gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)\n words = sorted([w for w in model.vocab], key=lambda w: model.vocab[w].index)\n vecs = [model[w] for w in words]\n elif fname == 'given':\n vecs, words = given_data\n else:\n vecs = []\n words = []\n\n with open(fname, \"r\", encoding='utf8') as f:\n for line in f:\n s = line.split()\n v = np.array([float(x) for x in s[1:]])\n if len(vecs) and vecs[-1].shape!=v.shape:\n print(\"Got weird line\", line)\n continue\n # v /= np.linalg.norm(v)\n words.append(s[0])\n vecs.append(v)\n self.vecs = np.array(vecs, dtype='float32')\n print(self.vecs.shape)\n self.words = words\n self.reindex()\n norms = np.linalg.norm(self.vecs, axis=1)\n if max(norms)-min(norms) > 0.0001:\n self.normalize()\n\n def reindex(self):\n self.index = {w: i for i, w in enumerate(self.words)}\n self.n, self.d = self.vecs.shape\n assert self.n == len(self.words) == len(self.index)\n self._neighbors = None\n print(self.n, \"words of dimension\", self.d, \":\", \", \".join(self.words[:4] + [\"...\"] + self.words[-4:]))\n\n def v(self, word):\n return self.vecs[self.index[word]]\n\n def diff(self, word1, word2):\n v = self.vecs[self.index[word1]] - self.vecs[self.index[word2]]\n return v/np.linalg.norm(v)\n\n def normalize(self):\n self.desc += \", normalize\"\n self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]\n self.reindex()\n\n def shrink(self, numwords):\n self.desc += \", shrink \" + str(numwords)\n self.filter_words(lambda w: self.index[w]<numwords)\n\n def filter_words(self, test):\n \"\"\"\n Keep some words based on test, e.g. lambda x: x.lower()==x\n \"\"\"\n self.desc += \", filter\"\n kept_indices, words = zip(*[[i, w] for i, w in enumerate(self.words) if test(w)])\n self.words = list(words)\n self.vecs = self.vecs[kept_indices, :]\n self.reindex()\n\n def save(self, filename):\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join([w+\" \" + \" \".join([str(x) for x in v]) for w, v in zip(self.words, self.vecs)]))\n print(\"Wrote\", self.n, \"words to\", filename)\n\n def save_w2v(self, filename, binary=True):\n with open(filename, 'wb') as fout:\n fout.write(to_utf8(\"%s %s\\n\" % self.vecs.shape))\n # store in sorted order: most frequent words at the top\n for i, word in enumerate(self.words):\n row = self.vecs[i]\n if binary:\n fout.write(to_utf8(word) + b\" \" + row.tostring())\n else:\n fout.write(to_utf8(\"%s %s\\n\" % (word, ' '.join(\"%f\" % val for val in row))))\n\n def remove_directions(self, directions): #directions better be orthogonal\n self.desc += \", removed\"\n for direction in directions:\n self.desc += \" \"\n if type(direction) is np.ndarray:\n v = direction / np.linalg.norm(direction)\n self.desc += \"vector \"\n else:\n w1, w2 = direction\n v = self.diff(w1, w2)\n self.desc += w1 + \"-\" + w2\n self.vecs = self.vecs - self.vecs.dot(v)[:, np.newaxis].dot(v[np.newaxis, :])\n self.normalize()\n\n def compute_neighbors_if_necessary(self, thresh, max_words):\n thresh = float(thresh) # dang python 2.7!\n if self._neighbors is not None and self.thresh == thresh and self.max_words == max_words:\n return\n print(\"Computing neighbors\")\n self.thresh = thresh\n self.max_words = max_words\n vecs = self.vecs[:max_words]\n dots = vecs.dot(vecs.T)\n dots = scipy.sparse.csr_matrix(dots * (dots >= 1-thresh/2))\n from collections import Counter\n rows, cols = dots.nonzero()\n nums = list(Counter(rows).values())\n print(\"Mean:\", np.mean(nums)-1)\n print(\"Median:\", np.median(nums)-1)\n rows, cols, vecs = zip(*[(i, j, vecs[i]-vecs[j]) for i, j, x in zip(rows, cols, dots.data) if i<j])\n self._neighbors = rows, cols, np.array([v/np.linalg.norm(v) for v in vecs])\n\n def neighbors(self, word, thresh=1):\n dots = self.vecs.dot(self.v(word))\n return [self.words[i] for i, dot in enumerate(dots) if dot >= 1-thresh/2]\n\n def more_words_like_these(self, words, topn=50, max_freq=100000):\n v = sum(self.v(w) for w in words)\n dots = self.vecs[:max_freq].dot(v)\n thresh = sorted(dots)[-topn]\n words = [w for w, dot in zip(self.words, dots) if dot>=thresh]\n return sorted(words, key=lambda w: self.v(w).dot(v))[-topn:][::-1]\n\n def best_analogies_dist_thresh(self, v, thresh=1, topn=500, max_words=50000):\n \"\"\"Metric is cos(a-c, b-d) if |b-d|^2 < thresh, otherwise 0\n \"\"\"\n vecs, vocab = self.vecs[:max_words], self.words[:max_words]\n self.compute_neighbors_if_necessary(thresh, max_words)\n rows, cols, vecs = self._neighbors\n scores = vecs.dot(v/np.linalg.norm(v))\n pi = np.argsort(-abs(scores))\n\n ans = []\n usedL = set()\n usedR = set()\n for i in pi:\n if abs(scores[i])<0.001:\n break\n row = rows[i] if scores[i] > 0 else cols[i]\n col = cols[i] if scores[i] > 0 else rows[i]\n if row in usedL or col in usedR:\n continue\n usedL.add(row)\n usedR.add(col)\n ans.append((vocab[row], vocab[col], abs(scores[i])))\n if len(ans)==topn:\n break\n\n return ans\n\n\ndef viz(analogies):\n print(\"\\n\".join(str(i).rjust(4)+a[0].rjust(29) + \" | \" + a[1].ljust(29) + (str(a[2]))[:4] for i, a in enumerate(analogies)))\n\n\ndef text_plot_words(xs, ys, words, width = 90, height = 40, filename=None):\n PADDING = 10 # num chars on left and right in case words spill over\n res = [[' ' for i in range(width)] for j in range(height)]\n def rescale(nums):\n a = min(nums)\n b = max(nums)\n return [(x-a)/(b-a) for x in nums]\n print(\"x:\", (min(xs), max(xs)), \"y:\",(min(ys),max(ys)))\n xs = rescale(xs)\n ys = rescale(ys)\n for (x, y, word) in zip(xs, ys, words):\n i = int(x*(width - 1 - PADDING))\n j = int(y*(height-1))\n row = res[j]\n z = list(row[i2] != ' ' for i2 in range(max(i-1, 0), min(width, i + len(word) + 1)))\n if any(z):\n continue\n for k in range(len(word)):\n if i+k>=width:\n break\n row[i+k] = word[k]\n string = \"\\n\".join(\"\".join(r) for r in res)\n# return string\n if filename:\n with open(filename, \"w\", encoding=\"utf8\") as f:\n f.write(string)\n print(\"Wrote to\", filename)\n else:\n print(string)\n\n\ndef doPCA(pairs, embedding, num_components = 10):\n matrix = []\n num_components = min(len(pairs), num_components)\n for a, b in pairs:\n center = (embedding.v(a) + embedding.v(b))/2\n matrix.append(embedding.v(a) - center)\n matrix.append(embedding.v(b) - center)\n matrix = np.array(matrix)\n pca = PCA(n_components = num_components)\n pca.fit(matrix)\n # bar(range(num_components), pca.explained_variance_ratio_)\n return pca\n\n\ndef drop(u, v):\n return u - v * u.dot(v) / v.dot(v)"
] |
[
[
"torch.mean",
"numpy.sqrt",
"torch.nn.functional.logsigmoid",
"torch.sum",
"torch.nn.Embedding",
"torch.nn.LogSigmoid",
"torch.mul"
],
[
"numpy.std",
"numpy.mean"
],
[
"numpy.median",
"numpy.linalg.norm",
"numpy.mean",
"numpy.array",
"sklearn.decomposition.PCA"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dahvida/FOCUS
|
[
"796f546bec536528afbb77cec02fb8238bcbec27"
] |
[
"utils.py"
] |
[
"from rdkit import Chem\nfrom rdkit.Chem import AllChem, Descriptors\nimport numpy as np\nfrom rdkit.ML.Descriptors import MoleculeDescriptors\nfrom sklearn import preprocessing\nimport random\nfrom hyperopt import tpe, fmin, Trials\nfrom sklearn.metrics import average_precision_score, roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom datetime import datetime\nfrom imxgboost.imbalance_xgb import imbalance_xgboost as imb_xgb\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n#%%\n\"DESCRIPTOR FUNCTIONS\"\n\ndef getmorgan(mols, radius, bits):\n \"\"\"\n Calculates ECFP fingerprints of given radius and number of bits for a list N of rdkit molecules.\n Returns a np.array (N, bits).\n \"\"\"\n\n Morgan = [AllChem.GetMorganFingerprintAsBitVect(x,radius,nBits=bits) for x in mols]\n final = np.array(Morgan, dtype = np.float32)\n\n return final\n\ndef make2D(mols):\n \"\"\"\n Calculates RDKIT descriptors for a list N of rdkit molecules.\n Returns a sanitized np.array.\n \"\"\"\n names = [x[0] for x in Descriptors._descList]\n calc = MoleculeDescriptors.MolecularDescriptorCalculator(names)\n descs = [0] * len(mols)\n counter = 0\n for i in range(len(descs)):\n counter = counter + 1\n if counter == 1000:\n print(i)\n counter = 0\n temp = calc.CalcDescriptors(mols[i])\n if temp is not None:\n temp = temp[1:]\n descs[i] = temp\n descs = np.array(descs)\n descs = np.nan_to_num(descs, posinf=10e10, neginf=-10e10)\n return descs\n\n\n#%%\n\"OPTIMIZATION HELPER FUNCTIONS\"\n\ndef create_set(full_dataset, y, under):\n \"\"\"\n Input\n full_dataset: np.array (N, D), molecular descriptors D for each compound N\n y: np.array (N, 1), binary label for each compound N \n under: dict value or int, defines how many inactive compounds to use\n in each subset\n \n Output\n inbalanced: np.array (Nx, D), molecular descriptors D for each compound Nx \n after resampling\n y: np.array (Nx, 1), binary label for each compound Nx \n after resampling\n \"\"\"\n \n #find idx of all 0s and 1s\n y_pos = []\n y_neg = []\n under = int(under) #make sure parameter is in the right format\n for i in range(len(y)):\n if y[i] == 1:\n y_pos.append(i)\n else:\n y_neg.append(i)\n \n #divide dataset in respective classes\n actives = [full_dataset[x] for x in y_pos]\n inactives = [full_dataset[x] for x in y_neg]\n \n #sample inactive class and reconstruct\n subsample = random.sample(inactives, under)\n inbalanced = np.array(actives+subsample)\n y_pos = [1] * len(actives)\n y_neg = [0] * len(subsample)\n y = np.array(y_pos + y_neg) \n\n return inbalanced, y\n\ndef crossval(dataset, y, params, norm):\n \"\"\"\n Input\n dataset: np.array (Nt, D), molecular descriptors D for each compound N\n y: np.array (N, 1), binary label for each compound N \n params: dictionary (5 params), hyperparameters of the ensemble\n \n Output\n avg_pr: np.float, average area-under-curve for the Precision-Recall curve\n in 5fold stratified cross-validation\n avg_roc: np.float, average area-under-curve for the ROC curve\n in 5fold stratified cross-validation\n \"\"\"\n \n #initialize placeholders, start timer\n global_time = datetime.now()\n box_PR = []\n box_ROC = []\n n_folds = 5 \n params['n_e'] = int(params['n_e']) #make sure the param is in the right format\n kfold = StratifiedKFold(n_folds, shuffle=True, random_state=1) #set state for reproducibility\n for train_index, test_index in kfold.split(dataset, y):\n cv_time = datetime.now() #monitor time per fold\n #get folds\n X_t = np.array([dataset[i] for i in train_index])\n X_v = np.array([dataset[i] for i in test_index])\n y_t = np.array([y[i] for i in train_index])\n y_v = np.array([y[i] for i in test_index])\n \n #initialize box for ensemble predictions\n preds_box = []\n \n if norm == \"yes\":\n scaler = preprocessing.StandardScaler().fit(X_t) \n X_t = scaler.transform(X_t)\n X_v = scaler.transform(X_v)\n \n #iterate for each model in the ensemble\n for i in range(params['n_e']):\n \n #get dataset\n Res, y_r = create_set(X_t, y_t,\n params['under'])\n \n #create model with right parameters \n model = imb_xgb(\n special_objective='focal',\n focal_gamma = params['gamma'],\n max_depth = params['depth'],\n eta = params['eta']\n )\n \n #fit, predict and store in box\n model.fit(Res, y_r)\n preds = model.predict_sigmoid(X_v)\n preds_box.append(preds)\n \n #calculate mean of all predictions in the box\n preds_box = np.transpose(np.array(preds_box))\n preds_box = np.mean(preds_box, axis=1)\n \n #get fold metric, save in metric box for overall CV performance\n PR_AUC = average_precision_score(y_v, preds_box)\n ROC_AUC = roc_auc_score(y_v, preds_box)\n box_PR.append(PR_AUC)\n box_ROC.append(ROC_AUC)\n print(\"time elapsed:\", datetime.now() - cv_time)\n print(\"PR-AUC:\", PR_AUC)\n print(\"ROC-AUC:\", ROC_AUC)\n print(\"--------FOLD DONE -----------\")\n\n #calculate average CV metrics\n avg_pr = np.mean(np.array(box_PR))\n avg_roc = np.mean(np.array(box_ROC))\n print(\"Cross validation finished, time elapsed:\", datetime.now() - global_time)\n print(\"Average PR_AUC:\", avg_pr)\n print(\"STD:\", np.std(np.array(box_PR)))\n print(\"Average ROC_AUC:\", avg_roc)\n return avg_pr, avg_roc\n\n#%%\n\"OPTIMIZATION FUNCTION\"\n\ndef optimize(x: np.array,\n y: np.array,\n search: dict,\n iters = 30,\n norm = \"no\") -> dict:\n \n\n def model_eval(args):\n \n #define training loop\n params = args\n print(\"----------------------------------\")\n print(\"PARAMETERS:\")\n print('Gamma:', params['gamma'])\n print('Depth:', params['depth'])\n print('n_e:', params['n_e'])\n print('eta:', params['eta'])\n print('Majority:', params['under'])\n \n print(\"----------------------------------\")\n avg_pr, avg_roc = crossval(x, y, params, norm)\n print(\"--------------EVALUATION END--------------\")\n return 1-avg_pr #have to do this since fmax doesnt work\n\n trials = Trials()\n #get optimum hyperparameters\n optimum = fmin(\n fn = model_eval,\n space = search,\n algo = tpe.suggest,\n max_evals = iters, \n trials = trials,\n )\n \n return optimum\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"numpy.nan_to_num",
"sklearn.model_selection.StratifiedKFold",
"numpy.mean",
"sklearn.metrics.average_precision_score",
"sklearn.preprocessing.StandardScaler",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daisyden/lpot
|
[
"d8709bb73ce13cfc0fd760845e0be40af22f5a45",
"d8709bb73ce13cfc0fd760845e0be40af22f5a45",
"d8709bb73ce13cfc0fd760845e0be40af22f5a45",
"d8709bb73ce13cfc0fd760845e0be40af22f5a45"
] |
[
"lpot/adaptor/tf_utils/util.py",
"examples/tensorflow/recommendation/wide_deep_large_ds/inference.py",
"lpot/policy/magnitude.py",
"test/test_bayesian.py"
] |
[
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2019 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nfrom google.protobuf import text_format\nimport tensorflow as tf\n\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.framework.ops import Graph\nfrom tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\nfrom lpot.utils import logger\n\n\ndef read_graph(in_graph, in_graph_is_binary=True):\n \"\"\"Reads input graph file as GraphDef.\n\n :param in_graph: input graph file.\n :param in_graph_is_binary: whether input graph is binary, default True.\n :return: input graphDef.\n \"\"\"\n if not gfile.Exists(in_graph):\n raise ValueError('Input graph pb file %s does not exist.' % in_graph)\n\n input_graph_def = graph_pb2.GraphDef()\n mode = \"rb\" if in_graph_is_binary else \"r\"\n with gfile.Open(in_graph, mode) as f:\n data = f.read()\n if in_graph_is_binary:\n input_graph_def.ParseFromString(data)\n else:\n text_format.Merge(data, input_graph_def)\n\n return input_graph_def\n\n\ndef write_graph(out_graph_def, out_graph_file):\n \"\"\"Write output graphDef to file.\n\n :param out_graph_def: output graphDef.\n :param out_graph_file: path to output graph file.\n :return: None.\n \"\"\"\n if not isinstance(out_graph_def, tf.compat.v1.GraphDef):\n raise ValueError(\n 'out_graph_def is not instance of TensorFlow GraphDef.')\n if out_graph_file and not os.path.exists(os.path.dirname(out_graph_file)):\n raise ValueError('\"output_graph\" directory does not exists.')\n f = gfile.GFile(out_graph_file, 'wb')\n f.write(out_graph_def.SerializeToString())\n\n\ndef is_ckpt_format(model_path):\n \"\"\"check the model_path format is ckpt or not.\n\n Args:\n model_path (string): the model folder path\n\n Returns:\n string: return the ckpt prefix if the model_path contains ckpt format data else None.\n \"\"\"\n file_list = [os.path.splitext(i)[-1] for i in os.listdir(model_path)]\n if file_list.count('.meta') == 1 and file_list.count('.index') == 1:\n return [os.path.splitext(i)[0] for i in os.listdir(model_path) if i.endswith(\".meta\")][0]\n\n return None\n\n\ndef is_keras_savedmodel_format(model_path):\n \"\"\"check the model_path format is keras saved model or not.\n\n Args:\n model_path (string): the model folder path\n\n Returns:\n bool: return the keras model if the model is keras model else None.\n \"\"\"\n if is_saved_model_format(model_path):\n model = tf.keras.models.load_model(model_path)\n if isinstance(model, tf.keras.Model):\n return model\n return None\n\ndef parse_ckpt_model(ckpt_prefix, outputs):\n \"\"\"Parse the ckpt model\n\n Args:\n ckpt_prefix (string): the ckpt prefix for parsing\n \"\"\"\n with tf.compat.v1.Session() as sess:\n saver = tf.compat.v1.train.import_meta_graph(ckpt_prefix + '.meta',\n clear_devices=True)\n sess.run(tf.compat.v1.global_variables_initializer())\n saver.restore(sess, ckpt_prefix)\n graph_def = sess.graph.as_graph_def()\n _parse_ckpt_bn_input(graph_def)\n\n output_graph_def = graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=graph_def,\n output_node_names=outputs)\n\n return output_graph_def\n\n\ndef _parse_ckpt_bn_input(graph_def):\n \"\"\"parse ckpt batch norm inputs to match correct moving mean and variance\n Args:\n graph_def (graph_def): original graph_def\n Returns:\n graph_def: well linked graph_def\n \"\"\"\n for node in graph_def.node:\n if node.op == 'FusedBatchNorm':\n moving_mean_op_name = node.input[3]\n moving_var_op_name = node.input[4]\n moving_mean_op = _get_nodes_from_name(moving_mean_op_name, graph_def)[0]\n moving_var_op = _get_nodes_from_name(moving_var_op_name, graph_def)[0]\n\n if moving_mean_op.op == 'Const':\n name_part = moving_mean_op_name.rsplit('/', 1)[0]\n real_moving_mean_op_name = name_part + '/moving_mean'\n if len(_get_nodes_from_name(real_moving_mean_op_name, graph_def)) > 0:\n # replace the real moving mean op name\n node.input[3] = real_moving_mean_op_name\n\n if moving_var_op.op == 'Const':\n name_part = moving_var_op_name.rsplit('/', 1)[0]\n real_moving_var_op_name = name_part + '/moving_variance'\n if len(_get_nodes_from_name(real_moving_var_op_name, graph_def)) > 0:\n # replace the real moving mean op name\n node.input[4] = real_moving_var_op_name\n\n return graph_def\n\n\ndef _get_nodes_from_name(node_name, graph_def):\n \"\"\"get nodes from graph_def using node name\n Args:\n graph_def (graph_def): graph_def\n node_name (str): node name\n\n Returns:\n node (NodeDef): graph node\n \"\"\"\n return [node for node in graph_def.node if node.name == node_name]\n\n\ndef is_saved_model_format(model_path):\n \"\"\"check the model_path format is saved_model or not\n\n Args:\n model_path (string): the model folder path\n\n Returns:\n bool: return True if the model_path contains saved_model format else False.\n \"\"\"\n file_list = [os.path.splitext(i)[-1] for i in os.listdir(model_path)]\n return bool(file_list.count('.pb') == 1 and ('variables') in os.listdir(model_path))\n\n\ndef parse_kerasmodel_model(model):\n \"\"\"Convert Keras Model to graphdef\n\n Args:\n model (keras.Model): Keras model object\n\n Returns:\n graph_def: the parsed graph_def object.\n input_names: input node names\n output_names: output node name\n \"\"\"\n\n kwargs = dict(zip(model.input_names, model.inputs))\n full_model = tf.function(lambda **kwargs: model(kwargs.values()))\n concrete_function = full_model.get_concrete_function(**kwargs)\n frozen_model = convert_variables_to_constants_v2(concrete_function)\n graph_def = frozen_model.graph.as_graph_def()\n input_names = [node.name for node in graph_def.node if node.op == 'Placeholder']\n output_names = [output.split(':')[0] for output in model.output_names]\n # replace the output name with squential\n for output_name in output_names:\n for node in graph_def.node[::-1]:\n if node.op == 'Identity' and output_name in node.input[0]:\n node.name = output_name\n break\n\n return graph_def, input_names, output_names\n\n\ndef parse_savedmodel_model(model_path):\n \"\"\"Convert SavedModel to graphdef\n\n Args:\n model_path (string): the model folder path\n\n Returns:\n graphdef: the parsed graphdef object.\n input_names: input node names\n output_names: output node name\n \"\"\"\n\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n\n meta_graph = tf.compat.v1.saved_model.loader.load(\n sess, [\"serve\"], model_path)\n\n model_graph_signature = list(\n meta_graph.signature_def.items())[0][1]\n\n input_names = [input_item[1].name\n for input_item in model_graph_signature.inputs.items()]\n\n output_names = [output_item[1].name\n for output_item in model_graph_signature.outputs.items()]\n\n output_graph_def = graph_util.convert_variables_to_constants(\n sess=sess,\n input_graph_def=sess.graph_def,\n output_node_names=[output_item[0]\n for output_item in model_graph_signature.outputs.items()])\n\n return output_graph_def, input_names, output_names\n\n\ndef convert_pb_to_savedmodel(graph_def, input_tensor_names, output_tensor_names, output_dir):\n \"\"\"Convert the graphdef to SavedModel\n\n Args:\n graph_def (graphdef): parsed graphdef object.\n input_tensor_names (list): input tensor names list.\n output_tensor_names (list): output tensor names list.\n output_dir (string): Converted SavedModel store path.\n \"\"\"\n builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(output_dir)\n\n sigs = {}\n with tf.compat.v1.Session() as sess:\n tf.import_graph_def(graph_def, name=\"\")\n g = tf.compat.v1.get_default_graph()\n\n input_tensors = {}\n for input_tensor_name in output_tensor_names:\n input_tensors[input_tensor_name.split(':')[0]] = g.get_tensor_by_name(\n \"{}\".format(input_tensor_name))\n\n output_tensors = {}\n for output_tensor_name in input_tensor_names:\n output_tensors[output_tensor_name.split(':')[0]] = g.get_tensor_by_name(\n \"{}\".format(output_tensor_name))\n\n sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \\\n tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(\n output_tensors, input_tensors)\n\n builder.add_meta_graph_and_variables(sess,\n [tag_constants.SERVING],\n signature_def_map=sigs)\n\n builder.save()\n\n\ndef get_slim_graph(model, model_func, arg_scope, images, outputs=None, **kwargs):\n assert tf.version.VERSION < '2.0.0', 'slim model only used in tensorflow 1.x'\n import tf_slim as slim\n with tf.compat.v1.Session() as sess:\n with slim.arg_scope(arg_scope) as scope: # pylint: disable=not-context-manager\n model_func(images, is_training=False, **kwargs)\n graph_def = sess.graph.as_graph_def()\n\n if outputs is None:\n outputs = graph_def.node[-1].name\n\n from tensorflow.python.tools.freeze_graph import freeze_graph_with_def_protos\n graph_def = freeze_graph_with_def_protos(\n input_graph_def=graph_def,\n input_saver_def=None,\n input_checkpoint=model,\n output_node_names=outputs,\n restore_op_name='save/restore_all',\n filename_tensor_name='save/Const:0',\n output_graph='',\n clear_devices=True,\n initializer_nodes='')\n\n graph = tf.Graph()\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n return graph\n\ndef get_estimator_graph(estimator, input_fn):\n with tf.Graph().as_default() as g:\n features, input_hooks = estimator._get_features_from_input_fn(\n input_fn, tf.estimator.ModeKeys.PREDICT)\n estimator_spec = estimator._call_model_fn(features, None,\n tf.estimator.ModeKeys.PREDICT, estimator.config)\n\n outputs = [tensor.name for tensor in estimator_spec.predictions.values()] if\\\n isinstance(estimator_spec.predictions, dict) else \\\n [estimator_spec.predictions.name]\n logger.info('estimator output tensor names is {}'.format(outputs))\n with tf.compat.v1.Session(graph=g) as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n # Freezing a graph requires output_node_names, which can be found in\n # estimator_spec.predictions that contains prediction tensors as a\n # dictionary\n # When a model uses Iterator, we need to have 'MakeIterator' (default\n # name used by TF) in the output_node_names as well.\n output_nodes = list(set([output.split(':')[0] for output in outputs]))\n if 'MakeIterator' in [node.op for node in g.as_graph_def().node]:\n output_nodes.append('MakeIterator')\n \n graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(sess,\n g.as_graph_def(), output_nodes)\n\n graph = tf.Graph()\n with graph.as_default():\n tf.import_graph_def(graph_def, name='') \n return graph\n \ndef get_tensor_by_name(graph, name, try_cnt=3):\n \"\"\"Get the tensor by name considering the 'import' scope when model\n may be imported more then once, handle naming format like both name:0 and name\n \n Args:\n graph (tf.compat.v1.GraphDef): the model to get name from\n name (string): tensor of tensor_name:0 or tensor_name without suffixes\n try_cnt: the times to add 'import/' to find tensor\n \n Returns:\n tensor: tensor got by name.\n \"\"\"\n if name.find(':') == -1:\n name = name + ':0'\n for _ in range(try_cnt):\n try:\n return graph.get_tensor_by_name(name)\n except BaseException:\n name = 'import/' + name\n raise ValueError('can not find tensor by name')\n\ndef iterator_sess_run(sess, iter_op, feed_dict, output_tensor, iteration=-1):\n \"\"\"Run the graph that have iterator integrated in the graph\n \n Args:\n sess (tf.compat.v1.Session): the model sess to run the graph\n iter_op (Operator): the MakeIterator op\n feed_dict(dict): the feeds to initialize a new iterator\n output_tensor(list): the output tensors \n iteration(int): iterations to run, when -1 set, run to end of iterator\n \n Returns:\n preds: the results of the predictions\n \"\"\"\n sess.run(iter_op, feed_dict)\n preds = []\n idx = 0 \n while idx < iteration or iteration == -1:\n try:\n prediction = sess.run(output_tensor)\n preds.append(prediction)\n idx += 1\n except tf.errors.OutOfRangeError:\n break\n except:\n logger.warning('not run out of the preds...')\n break\n preds = list(zip(*preds))\n return preds\n\ndef validate_graph_input(graph_def, input_node_names):\n \"\"\"Check input node existence, have 3 conditions:\n 1. input node names empty, return False\n 2. input node names in the graph node list, return True\n 3. input node names not in the graph node list, raise Error\n\n Args:\n graph_def (GraphDef):GraphDef\n input_node_namess ([String]): input node names list.\n\n Returns:\n status (bool): the validation status\n \"\"\"\n if len(input_node_names) == 0:\n return False\n all_node_name = [node.name for node in graph_def.node]\n for user_input_name in input_node_names:\n assert user_input_name in all_node_name, \\\n \"Input node name {} doesn't exist in the model, please check the yaml.\".\\\n format(user_input_name)\n return True\n\ndef validate_graph_output(graph_def, output_node_names):\n \"\"\"Check output node existence, have 3 conditions:\n 1. output node names empty, return False\n 2. output node names in the graph node list, return True\n 3. output node names not in the graph node list, raise Error\n\n Args:\n graph_def (GraphDef):GraphDef\n output_node_namess ([String]): output node names list.\n\n Returns:\n status (bool): the validation status\n \"\"\"\n if len(output_node_names) == 0:\n return False\n all_node_name = [node.name for node in graph_def.node]\n for user_output_name in output_node_names:\n assert user_output_name in all_node_name,\\\n \"Output node name {} doesn't exist in the model, please check the yaml.\".\\\n format(user_output_name)\n return True\n\ndef get_input_node_names(graph_def):\n from .graph_rewriter.graph_util import GraphAnalyzer\n g = GraphAnalyzer()\n g.graph = graph_def\n g.parse_graph()\n return g.get_graph_input_output()[0]\n\ndef get_output_node_names(graph_def):\n from .graph_rewriter.graph_util import GraphAnalyzer\n g = GraphAnalyzer()\n g.graph = graph_def\n g.parse_graph()\n return g.get_graph_input_output()[1]\n\ndef get_graph_def(model, outputs=[], auto_input_output=False):\n \"\"\"Get the input model graphdef\n\n Args:\n model ([Graph, GraphDef or Path String]): support Graph, GraphDef, keras.Model,\n frozen pb or ckpt/savedmodel path.\n outputs ([String]): output node names list.\n\n Returns:\n graph_def (graphdef): parsed graphdef object.\n \"\"\"\n graph_def = None\n if isinstance(model, Graph):\n graph_def = model.as_graph_def()\n elif isinstance(model, tf.compat.v1.GraphDef):\n graph_def = model\n elif isinstance(model, tf.keras.Model):\n graph_def, _, _ = parse_kerasmodel_model(model)\n elif isinstance(model, str):\n graph_def = tf.compat.v1.GraphDef()\n model = os.path.expanduser(model)\n if model.endswith('.pb') and os.path.isfile(model):\n with open(model, 'rb') as f:\n graph_def.ParseFromString(f.read())\n elif model.endswith('.ckpt') and os.path.isfile(model):\n raise ValueError('use get_slim_graph to get the graph first')\n elif model.endswith('.h5') and os.path.isfile(model):\n # (TODO) support h5 saved model, notice there is also h5 weights\n raise ValueError('saved model h5 format not supported yet, soon')\n elif os.path.isdir(model):\n # tf2.x checkpoint only save weight and do not contain any\n # description of the computation, so we drop tf2.x checkpoint support\n ckpt_prefix = is_ckpt_format(model)\n assert outputs\n if ckpt_prefix is not None:\n graph_def = parse_ckpt_model(\n os.path.join(model, ckpt_prefix), outputs)\n # (TODO) support tf2.x saved model\n # tf1.x saved model is out of date and few examples, drop\n if is_saved_model_format(model):\n keras_model = is_keras_savedmodel_format(model)\n if keras_model is not None:\n graph_def, _, _ = parse_kerasmodel_model(keras_model)\n else:\n raise ValueError('tf saved model format not supported yet, soon')\n if graph_def is None:\n raise ValueError('only support tf1.x checkpoint or tf2.x keras saved model')\n else:\n raise ValueError('only support frozen pb file or model path')\n else:\n raise ValueError(\n 'only support Graph, GraghDef, keras.Model, tf1.x checkpoint, keras saved model')\n\n return graph_def\n",
"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport collections\nimport time\nimport math\nimport json\nimport datetime\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import ops\nfrom tensorflow.core.framework import graph_pb2\nfrom google.protobuf import text_format\nfrom argparse import ArgumentParser\nfrom tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference\n\ntf.compat.v1.disable_eager_execution()\n\ndef load_graph(model_file):\n \"\"\"This is a function to load TF graph from pb file\n\n Args:\n model_file (string): TF pb file local path\n\n Returns:\n graph: TF graph object\n \"\"\"\n graph = tf.Graph()\n #graph_def = tf.compat.v1.GraphDef()\n graph_def = graph_pb2.GraphDef()\n\n file_ext = os.path.splitext(model_file)[1]\n\n with open(model_file, \"rb\") as f:\n if file_ext == '.pbtxt':\n text_format.Merge(f.read(), graph_def)\n else:\n graph_def.ParseFromString(f.read())\n\n with graph.as_default():\n tf.import_graph_def(graph_def, name='')\n\n return graph\n\n\nnumeric_feature_names = [\"numeric_1\"]\nstring_feature_names = [\"string_1\"]\n\ndef get_feature_name(compute_accuracy):\n\n if compute_accuracy:\n full_features_names = numeric_feature_names + string_feature_names + [\"label\"]\n feature_datatypes = [tf.io.FixedLenSequenceFeature([], tf.float32, default_value=0.0, allow_missing=True)]+[tf.io.FixedLenSequenceFeature(\n [], tf.int64, default_value=0, allow_missing=True)]+[tf.io.FixedLenSequenceFeature([], tf.int64, default_value=0, allow_missing=True)]\n else:\n full_features_names = numeric_feature_names + string_feature_names\n feature_datatypes = [tf.io.FixedLenSequenceFeature([], tf.float32, default_value=0.0, allow_missing=True)]+[tf.io.FixedLenSequenceFeature(\n [], tf.int64, default_value=0, allow_missing=True)]\n return full_features_names, feature_datatypes\n\ndef input_fn(data_file, num_epochs, shuffle, batch_size, compute_accuracy=True):\n \"\"\"Generate an input function for the Estimator.\"\"\"\n full_features_names, feature_datatypes = get_feature_name(compute_accuracy)\n def _parse_function(proto):\n f = collections.OrderedDict(\n zip(full_features_names, feature_datatypes))\n parsed_features = tf.io.parse_example(proto, f)\n parsed_feature_vals_num = [tf.reshape(\n parsed_features[\"numeric_1\"], shape=[-1, 13])]\n parsed_feature_vals_str = [tf.reshape(\n parsed_features[\"string_1\"], shape=[-1, 2]) for i in string_feature_names]\n parsed_feature_vals = parsed_feature_vals_num + parsed_feature_vals_str\n if compute_accuracy:\n parsed_feature_vals_label = [tf.reshape(parsed_features[i], shape=[-1]) for i in [\"label\"]]\n parsed_feature_vals = parsed_feature_vals + parsed_feature_vals_label\n return parsed_feature_vals\n\n # Extract lines from input files using the Dataset API.\n dataset = tf.data.TFRecordDataset([data_file])\n if shuffle:\n dataset = dataset.shuffle(buffer_size=20000)\n dataset = dataset.batch(batch_size)\n dataset = dataset.map(_parse_function, num_parallel_calls=28)\n dataset = dataset.prefetch(batch_size*10)\n return dataset\n\nclass eval_classifier_optimized_graph:\n \"\"\"Evaluate image classifier with optimized TensorFlow graph\"\"\"\n\n def __init__(self):\n arg_parser = ArgumentParser(description='Parse args')\n arg_parser.add_argument('-i', '--input_graph', type=str,\n help='Specify the input of the model',\n dest='input_graph',\n required=True)\n arg_parser.add_argument('-o', '--output_graph', type=str,\n help='Specify the output of the model',\n dest='output_graph')\n arg_parser.add_argument('--calibration_data_location', type=str,\n help='full path of calibration data file',\n dest='calib_data')\n arg_parser.add_argument('--evaluation_data_location', type=str,\n help='full path of validation data file',\n dest='eval_data',\n required=True)\n arg_parser.add_argument('--batch_size', type=int,\n help='batch size for inference.Default is 512',\n default=512,\n dest='batch_size')\n arg_parser.add_argument('--num_intra_threads', type=int,\n help='number of threads for an operator',\n required=False,\n default=0,\n dest='num_intra_threads')\n arg_parser.add_argument('--num_inter_threads', type=int,\n help='number of threads across operators',\n required=False,\n default=0,\n dest='num_inter_threads')\n arg_parser.add_argument('--kmp_blocktime', type=str,\n help='KMP_BLOCKTIME value',\n required=False,\n default=None,\n dest='kmp_blocktime')\n arg_parser.add_argument('-r', \"--accuracy_only\",\n help='For accuracy measurement only.',\n dest='accuracy_only', action='store_true')\n arg_parser.add_argument(\"--config\", default=None,\n help=\"tuning config\")\n arg_parser.add_argument('--benchmark',\n dest='benchmark',\n action='store_true',\n help='run benchmark')\n arg_parser.add_argument('--tune',\n dest='tune',\n action='store_true',\n help='use lpot to tune.')\n arg_parser.add_argument(\"--warmup-steps\",\n type=int, default=50,\n help=\"number of warmup steps\")\n arg_parser.add_argument(\"--steps\",\n type=int, default=2000,\n help=\"number of iterations\")\n\n arg_parser.add_argument('--env',\n dest='env',\n help='specific Tensorflow env',\n default='mkl')\n\n\n self.args = arg_parser.parse_args()\n\n def auto_tune(self):\n \"\"\"This is lpot tuning part to generate a quantized pb\n Returns:\n graph: it will return a quantized pb\n \"\"\"\n from lpot import Quantization\n\n fp32_graph = load_graph(self.args.input_graph)\n quantizer = Quantization(self.args.config)\n if self.args.calib_data:\n calib_dataloader = Dataloader(self.args.calib_data, self.args.batch_size)\n q_model = quantizer(\n fp32_graph,\n q_dataloader=calib_dataloader,\n eval_func=self.eval_inference,\n eval_dataloader=None)\n return q_model\n else:\n print(\"Please provide calibration dataset!\")\n\n def eval_inference(self, infer_graph):\n print(\"Run inference\")\n\n data_config = tf.compat.v1.ConfigProto()\n data_config.intra_op_parallelism_threads = self.args.num_intra_threads\n data_config.inter_op_parallelism_threads = self.args.num_inter_threads\n data_config.use_per_session_threads = 1\n\n infer_config = tf.compat.v1.ConfigProto()\n if self.args.env == 'mkl':\n print(\"Set inter and intra for mkl: \")\n print(\"intra_op_parallelism_threads = \", self.args.num_intra_threads)\n print(\"inter_op_parallelism_threads = \", self.args.num_inter_threads)\n infer_config.intra_op_parallelism_threads = self.args.num_intra_threads\n infer_config.inter_op_parallelism_threads = self.args.num_inter_threads\n infer_config.use_per_session_threads = 1\n\n total_test_samples = sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(self.args.eval_data))\n total_batches = math.ceil(float(total_test_samples)/self.args.batch_size)\n placeholder_list = ['new_numeric_placeholder','new_categorical_placeholder']\n input_tensor = [infer_graph.get_tensor_by_name(name + \":0\") for name in placeholder_list]\n output_name = \"import/head/predictions/probabilities\"\n output_tensor = infer_graph.get_tensor_by_name(output_name + \":0\" )\n correctly_predicted = 0\n evaluate_duration = 0.0\n\n features_list = []\n data_graph = tf.Graph()\n with data_graph.as_default():\n res_dataset = input_fn(self.args.eval_data, 1, False, self.args.batch_size)\n iterator = tf.compat.v1.data.make_one_shot_iterator(res_dataset)\n next_element = iterator.get_next()\n with tf.compat.v1.Session(config=data_config, graph=data_graph) as data_sess:\n for i in range(int(total_batches)):\n batch = data_sess.run(next_element)\n features=batch[0:3]\n features_list.append(features)\n\n if (not self.args.accuracy_only):\n iteration = 0\n warm_up_iteration = self.args.warmup_steps\n total_run = self.args.steps\n\n if total_run > total_batches:\n total_run = total_batches\n\n with tf.compat.v1.Session(config=infer_config, graph=infer_graph) as infer_sess:\n i = 0\n for i in range(int(total_run)):\n start_time = time.time()\n logistic = infer_sess.run(output_tensor, dict(zip(input_tensor, features_list[iteration][0:2])))\n time_consume = time.time() - start_time\n\n if iteration > warm_up_iteration:\n evaluate_duration += time_consume\n\n iteration += 1\n if iteration > total_batches:\n iteration = 0\n test_batches = total_run - warm_up_iteration\n else:\n with tf.compat.v1.Session(config=infer_config, graph=infer_graph) as infer_sess:\n i = 0\n for i in range(int(total_batches)):\n start_time = time.time()\n logistic = infer_sess.run(output_tensor, dict(zip(input_tensor, features_list[i][0:2])))\n time_consume = time.time() - start_time\n evaluate_duration += time_consume\n\n predicted_labels = np.argmax(logistic,1)\n correctly_predicted=correctly_predicted+np.sum(features_list[i][2] == predicted_labels)\n\n i=i+1\n\n accuracy = float(correctly_predicted) / float(total_test_samples)\n test_batches = total_batches\n\n no_of_test_samples = test_batches * self.args.batch_size\n latency = 1000 * float(evaluate_duration) / float(test_batches)\n throughput = no_of_test_samples / evaluate_duration\n\n print('--------------------------------------------------')\n print('Total test records: %d' % no_of_test_samples)\n print('Number of batches: %d' % test_batches)\n print('Batch size = %d' % self.args.batch_size)\n print('Latency: %.3f ms' % latency)\n print('Throughput: %.3f records/sec' % throughput)\n if self.args.accuracy_only:\n print(\"Accuracy: %.5f\" % accuracy)\n print('--------------------------------------------------')\n\n if self.args.accuracy_only:\n return accuracy\n\n def run(self):\n \"\"\" This is lpot function include tuning and benchmark option \"\"\"\n\n if self.args.tune:\n q_model = evaluate_opt_graph.auto_tune()\n def save(model, path):\n from tensorflow.python.platform import gfile\n f = gfile.GFile(path, 'wb')\n f.write(model.as_graph_def().SerializeToString())\n\n save(q_model, self.args.output_graph)\n\n if self.args.benchmark:\n infer_graph = load_graph(self.args.input_graph)\n\n self.eval_inference(infer_graph)\n\n\n\nclass Dataloader(object):\n def __init__(self, data_location, batch_size):\n \"\"\"dataloader generator\n\n Args:\n data_location (str): tf recorder local path\n batch_size (int): dataloader batch size\n \"\"\"\n self.batch_size = batch_size\n self.data_file = data_location\n self.total_samples = sum(1 for _ in tf.compat.v1.python_io.tf_record_iterator(data_location))\n self.n = math.ceil(float(self.total_samples) / batch_size)\n print(\"batch size is \" + str(self.batch_size) + \",\" + str(self.n) + \" iteration\")\n\n def __iter__(self):\n data_graph = tf.Graph()\n with data_graph.as_default():\n self.dataset = input_fn(self.data_file, 1, False, self.batch_size)\n self.dataset_iterator = tf.compat.v1.data.make_one_shot_iterator(self.dataset)\n next_element = self.dataset_iterator.get_next()\n\n with tf.compat.v1.Session(graph=data_graph) as sess:\n for i in range(self.n):\n batch = sess.run(next_element)\n yield (batch[0:2], batch[2])\n\n\nif __name__ == \"__main__\":\n evaluate_opt_graph = eval_classifier_optimized_graph()\n evaluate_opt_graph.run()\n\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom .policy import policy_registry, PrunePolicy\nfrom ..utils import logger\n\n\n@policy_registry\nclass MagnitudePrunePolicy(PrunePolicy):\n def __init__(self, model, local_config, global_config, adaptor):\n super(MagnitudePrunePolicy, self).__init__(model, local_config, global_config, adaptor)\n\n def on_epoch_begin(self, epoch):\n logger.debug(\"start pruning in epoch {}\".format(str(epoch)))\n self.sparsity = self.update_sparsity(epoch)\n self.is_last_epoch = epoch == self.end_epoch\n logger.debug(\"epoch {} sparsity = {}\".format(str(epoch), str(self.sparsity)))\n if epoch >= self.start_epoch and epoch <= self.end_epoch:\n self.compute_mask()\n\n def on_batch_begin(self, batch_id):\n for weight in self.weights:\n if weight in self.masks:\n new_weight = self.masks[weight] * \\\n np.array(self.adaptor.get_weight(self.model, weight))\n new_weight_zeros = (new_weight == 0).sum()\n self.adaptor.update_weights(self.model, weight, new_weight)\n\n def compute_mask(self):\n \"\"\"compute masks according to absolute values\"\"\"\n for weight in self.weights:\n tensor = np.array(self.adaptor.get_weight(self.model, weight))\n if len(tensor.shape) in self.tensor_dims:\n if self.method == \"per_channel\":\n tensor_flat = tensor.copy().reshape([tensor.shape[0], tensor.shape[1], -1])\n tensor_flat.sort(axis=-1)\n threshold = tensor_flat[:, :, int(self.sparsity * tensor_flat.shape[-1])]\n threshold = np.expand_dims(np.expand_dims(threshold, -1), -1)\n threshold = np.repeat(threshold, tensor.shape[-1], axis=-1)\n threshold = np.repeat(threshold, tensor.shape[-2], axis=-2)\n self.masks[weight] = threshold < tensor\n else:\n tensor_flat = sorted(np.abs(tensor.flatten()))\n threshold = float(tensor_flat[int(len(tensor_flat) * self.sparsity)])\n self.masks[weight] = threshold < np.abs(tensor)\n\n def on_epoch_end(self):\n if self.is_last_epoch:\n for weight in self.weights:\n if weight in self.masks:\n logger.info(\n \"{} with mask sparsity {} {} {}\".format(\n weight, str(\n self.masks[weight].size), str(\n self.masks[weight].sum()), str(\n 1 - self.masks[weight].sum() / self.masks[weight].size)))\n new_weight = self.masks[weight] * \\\n np.array(self.adaptor.get_weight(self.model, weight))\n self.adaptor.update_weights(self.model, weight, new_weight)\n\n def on_batch_end(self):\n for weight in self.weights:\n if weight in self.masks:\n new_weight = self.masks[weight] * \\\n np.array(self.adaptor.get_weight(self.model, weight))\n self.adaptor.update_weights(self.model, weight, new_weight)\n",
"\"\"\"Tests for quantization\"\"\"\r\nimport numpy as np\r\nimport unittest\r\nimport os\r\nimport yaml\r\nimport tensorflow as tf\r\nfrom tensorflow.core.framework import attr_value_pb2\r\nfrom tensorflow.core.framework import graph_pb2\r\nfrom tensorflow.core.framework import node_def_pb2\r\nfrom tensorflow.python.framework import tensor_util\r\nfrom tensorflow.python.framework import dtypes\r\n\r\ndef build_fake_yaml():\r\n fake_yaml = '''\r\n model:\r\n name: fake_yaml\r\n framework: tensorflow\r\n inputs: x\r\n outputs: op_to_store\r\n device: cpu\r\n evaluation:\r\n accuracy:\r\n metric:\r\n topk: 1\r\n tuning:\r\n strategy:\r\n name: bayesian\r\n exit_policy:\r\n max_trials: 1\r\n accuracy_criterion:\r\n relative: 0.01\r\n workspace:\r\n path: saved\r\n '''\r\n y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)\r\n with open('fake_yaml.yaml',\"w\",encoding=\"utf-8\") as f:\r\n yaml.dump(y,f)\r\n f.close()\r\n\r\ndef build_fake_yaml2():\r\n fake_yaml = '''\r\n model:\r\n name: fake_yaml\r\n framework: tensorflow\r\n inputs: input\r\n outputs: conv3\r\n device: cpu\r\n evaluation:\r\n accuracy:\r\n metric:\r\n topk: 1\r\n tuning:\r\n strategy:\r\n name: bayesian\r\n exit_policy:\r\n max_trials: 10\r\n accuracy_criterion:\r\n relative: -0.01\r\n workspace:\r\n path: saved\r\n '''\r\n y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)\r\n with open('fake_yaml2.yaml',\"w\",encoding=\"utf-8\") as f:\r\n yaml.dump(y,f)\r\n f.close()\r\n\r\ndef build_fake_model():\r\n try:\r\n graph = tf.Graph()\r\n graph_def = tf.GraphDef()\r\n with tf.Session() as sess:\r\n x = tf.placeholder(tf.float64, shape=(1,3,3,1), name='x')\r\n y = tf.constant(np.random.random((2,2,1,1)), name='y')\r\n op = tf.nn.conv2d(input=x, filter=y, strides=[1,1,1,1], padding='VALID', name='op_to_store')\r\n\r\n sess.run(tf.global_variables_initializer())\r\n constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['op_to_store'])\r\n\r\n graph_def.ParseFromString(constant_graph.SerializeToString())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def, name='')\r\n except:\r\n graph = tf.Graph()\r\n graph_def = tf.compat.v1.GraphDef()\r\n with tf.compat.v1.Session() as sess:\r\n x = tf.compat.v1.placeholder(tf.float64, shape=(1,3,3,1), name='x')\r\n y = tf.compat.v1.constant(np.random.random((2,2,1,1)), name='y')\r\n op = tf.nn.conv2d(input=x, filters=y, strides=[1,1,1,1], padding='VALID', name='op_to_store')\r\n\r\n sess.run(tf.compat.v1.global_variables_initializer())\r\n constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['op_to_store'])\r\n\r\n graph_def.ParseFromString(constant_graph.SerializeToString())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def, name='')\r\n return graph\r\n\r\ndef create_test_graph():\r\n input_node = node_def_pb2.NodeDef()\r\n input_node.name = \"input\"\r\n input_node.op = \"Placeholder\"\r\n input_node.attr[\"dtype\"].CopyFrom(attr_value_pb2.AttrValue(\r\n type=dtypes.float32.as_datatype_enum))\r\n\r\n conv1_weight_node = node_def_pb2.NodeDef()\r\n conv1_weight_node.name = \"conv1_weights\"\r\n conv1_weight_node.op = \"Const\"\r\n conv1_weight_value = np.float32(np.abs(np.random.randn(3,3,3,32)))\r\n conv1_weight_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n conv1_weight_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(\r\n tensor=tensor_util.make_tensor_proto(\r\n conv1_weight_value, conv1_weight_value.dtype.type, conv1_weight_value.shape)))\r\n\r\n conv1_node = node_def_pb2.NodeDef()\r\n conv1_node.name = \"conv1\"\r\n conv1_node.op = \"Conv2D\"\r\n conv1_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(\r\n type=dtypes.float32.as_datatype_enum))\r\n conv1_node.input.extend([input_node.name, conv1_weight_node.name])\r\n conv1_node.attr['strides'].CopyFrom(attr_value_pb2.AttrValue(\r\n list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))\r\n conv1_node.attr['dilations'].CopyFrom(attr_value_pb2.AttrValue(\r\n list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))\r\n conv1_node.attr['padding'].CopyFrom(attr_value_pb2.AttrValue(s=b'SAME'))\r\n conv1_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))\r\n\r\n bias_node = node_def_pb2.NodeDef()\r\n bias_node.name = \"conv1_bias\"\r\n bias_node.op = \"Const\"\r\n bias_value = np.float32(np.abs(np.random.randn(32)))\r\n bias_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n bias_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(\r\n bias_value, bias_value.dtype.type, bias_value.shape)))\r\n\r\n bias_add_node = node_def_pb2.NodeDef()\r\n bias_add_node.name = \"conv1_bias_add\"\r\n bias_add_node.op = \"BiasAdd\"\r\n bias_add_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n bias_add_node.input.extend([conv1_node.name, bias_node.name])\r\n bias_add_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))\r\n\r\n relu_node = node_def_pb2.NodeDef()\r\n relu_node.op = \"Relu\"\r\n relu_node.name = \"relu\"\r\n relu_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n relu_node.input.extend([bias_add_node.name])\r\n\r\n conv2_weight_node = node_def_pb2.NodeDef()\r\n conv2_weight_node.name = \"conv2_weights\"\r\n conv2_weight_node.op = \"Const\"\r\n conv2_weight_value = np.float32(np.abs(np.random.randn(3,3,32,32)))\r\n conv2_weight_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n conv2_weight_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(\r\n tensor=tensor_util.make_tensor_proto(\r\n conv2_weight_value, conv2_weight_value.dtype.type, conv2_weight_value.shape)))\r\n\r\n conv2_node = node_def_pb2.NodeDef()\r\n conv2_node.name = \"conv2\"\r\n conv2_node.op = \"Conv2D\"\r\n conv2_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(\r\n type=dtypes.float32.as_datatype_enum))\r\n conv2_node.input.extend([relu_node.name, conv2_weight_node.name])\r\n conv2_node.attr['strides'].CopyFrom(attr_value_pb2.AttrValue(\r\n list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))\r\n conv2_node.attr['dilations'].CopyFrom(attr_value_pb2.AttrValue(\r\n list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))\r\n conv2_node.attr['padding'].CopyFrom(attr_value_pb2.AttrValue(s=b'SAME'))\r\n conv2_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))\r\n\r\n bias_node2 = node_def_pb2.NodeDef()\r\n bias_node2.name = \"conv2_bias\"\r\n bias_node2.op = \"Const\"\r\n bias_value2 = np.float32(np.abs(np.random.randn(32)))\r\n bias_node2.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n bias_node2.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(\r\n bias_value2, bias_value2.dtype.type, bias_value2.shape)))\r\n\r\n bias_add_node2 = node_def_pb2.NodeDef()\r\n bias_add_node2.name = \"conv2_bias_add\"\r\n bias_add_node2.op = \"BiasAdd\"\r\n bias_add_node2.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n bias_add_node2.input.extend([conv2_node.name, bias_node2.name])\r\n bias_add_node2.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))\r\n\r\n relu_node2 = node_def_pb2.NodeDef()\r\n relu_node2.op = \"Relu\"\r\n relu_node2.name = \"relu2\"\r\n relu_node2.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n relu_node2.input.extend([bias_add_node2.name])\r\n\r\n conv3_weight_node = node_def_pb2.NodeDef()\r\n conv3_weight_node.name = \"conv3_weights\"\r\n conv3_weight_node.op = \"Const\"\r\n conv3_weight_value = np.float32(np.abs(np.random.randn(3,3,32,32)))\r\n conv3_weight_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.float32.as_datatype_enum))\r\n conv3_weight_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(\r\n tensor=tensor_util.make_tensor_proto(\r\n conv3_weight_value, conv3_weight_value.dtype.type, conv3_weight_value.shape)))\r\n\r\n conv3_node = node_def_pb2.NodeDef()\r\n conv3_node.name = \"conv3\"\r\n conv3_node.op = \"Conv2D\"\r\n conv3_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(\r\n type=dtypes.float32.as_datatype_enum))\r\n conv3_node.input.extend([relu_node2.name, conv3_weight_node.name])\r\n conv3_node.attr['strides'].CopyFrom(attr_value_pb2.AttrValue(\r\n list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))\r\n conv3_node.attr['dilations'].CopyFrom(attr_value_pb2.AttrValue(\r\n list=attr_value_pb2.AttrValue.ListValue(i=[1,1,1,1])))\r\n conv3_node.attr['padding'].CopyFrom(attr_value_pb2.AttrValue(s=b'SAME'))\r\n conv3_node.attr['data_format'].CopyFrom(attr_value_pb2.AttrValue(s=b'NHWC'))\r\n\r\n test_graph = graph_pb2.GraphDef()\r\n\r\n test_graph.node.extend([input_node,\r\n conv1_weight_node,\r\n conv1_node,\r\n bias_node,\r\n bias_add_node,\r\n relu_node,\r\n conv2_weight_node,\r\n conv2_node,\r\n bias_node2,\r\n bias_add_node2,\r\n relu_node2,\r\n conv3_weight_node,\r\n conv3_node,\r\n ])\r\n return test_graph\r\n\r\n\r\nclass TestQuantization(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.constant_graph = build_fake_model()\r\n self.test_graph = create_test_graph()\r\n build_fake_yaml()\r\n build_fake_yaml2()\r\n\r\n @classmethod\r\n def tearDownClass(self):\r\n os.remove('fake_yaml.yaml')\r\n os.remove('fake_yaml2.yaml')\r\n os.remove('saved/history.snapshot')\r\n os.remove('saved/deploy.yaml')\r\n os.rmdir('saved')\r\n\r\n def test_run_bayesian_one_trial(self):\r\n from lpot import Quantization\r\n\r\n quantizer = Quantization('fake_yaml.yaml')\r\n dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)\r\n dataloader = quantizer.dataloader(dataset)\r\n quantizer(\r\n self.constant_graph,\r\n q_dataloader=dataloader,\r\n eval_dataloader=dataloader\r\n )\r\n\r\n def test_run_bayesian_max_trials(self):\r\n from lpot import Quantization\r\n\r\n quantizer = Quantization('fake_yaml2.yaml')\r\n dataset = quantizer.dataset('dummy', (1, 224, 224, 3), label=True)\r\n dataloader = quantizer.dataloader(dataset)\r\n quantizer(\r\n self.test_graph,\r\n q_dataloader=dataloader,\r\n eval_dataloader=dataloader\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()\r\n"
] |
[
[
"tensorflow.compat.v1.saved_model.signature_def_utils.predict_signature_def",
"tensorflow.keras.models.load_model",
"tensorflow.Graph",
"tensorflow.compat.v1.train.import_meta_graph",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2",
"tensorflow.compat.v1.saved_model.loader.load",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.compat.v1.saved_model.builder.SavedModelBuilder",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.compat.v1.GraphDef",
"tensorflow.python.tools.freeze_graph.freeze_graph_with_def_protos",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.platform.gfile.Open"
],
[
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"numpy.sum",
"tensorflow.data.TFRecordDataset",
"tensorflow.python.platform.gfile.GFile",
"tensorflow.reshape",
"tensorflow.io.FixedLenSequenceFeature",
"tensorflow.compat.v1.Session",
"numpy.argmax",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.python_io.tf_record_iterator",
"tensorflow.io.parse_example",
"tensorflow.core.framework.graph_pb2.GraphDef"
],
[
"numpy.repeat",
"numpy.expand_dims",
"numpy.abs"
],
[
"numpy.random.randn",
"tensorflow.nn.conv2d",
"tensorflow.Graph",
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.import_graph_def",
"tensorflow.Session",
"tensorflow.compat.v1.GraphDef",
"tensorflow.compat.v1.graph_util.convert_variables_to_constants",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.python.framework.tensor_util.make_tensor_proto",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"numpy.random.random",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.core.framework.node_def_pb2.NodeDef",
"tensorflow.compat.v1.placeholder",
"tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue",
"tensorflow.GraphDef",
"tensorflow.core.framework.graph_pb2.GraphDef"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
meawoppl/numba
|
[
"bb8df0aee99133c6d52465ae9f9df2a7996339f3",
"bb8df0aee99133c6d52465ae9f9df2a7996339f3",
"bb8df0aee99133c6d52465ae9f9df2a7996339f3"
] |
[
"examples/example.py",
"oldnumba/tests/prange/test_prange.py",
"numba/tests/test_unpack_sequence.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\nfrom scipy.misc import lena\nfrom numpy import ones\nimport numpy\n\nfrom numba.decorators import jit\nfrom numba import int32, int64\n\n# Original approach will be slower for now due to the object mode failback\n# for numpy.zero_like\n#\n# @jit(argtypes=[int32[:,:], int32[:,:]], restype=int32[:,:])\n# def filter2d(image, filt):\n# M, N = image.shape\n# Mf, Nf = filt.shape\n# Mf2 = Mf // 2\n# Nf2 = Nf // 2\n# result = numpy.zeros_like(image)\n# for i in range(Mf2, M - Mf2):\n# for j in range(Nf2, N - Nf2):\n# num = 0.0\n# for ii in range(Mf):\n# for jj in range(Nf):\n# num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])\n# result[i, j] = num\n# return result\n\n\n@jit((int64[:,::1], int32[:,::1], int64[:,::1]), nopython=True)\ndef filter2d_core(image, filt, result):\n M, N = image.shape\n Mf, Nf = filt.shape\n Mf2 = Mf // 2\n Nf2 = Nf // 2\n for i in range(Mf2, M - Mf2):\n for j in range(Nf2, N - Nf2):\n num = 0\n for ii in range(Mf):\n for jj in range(Nf):\n num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii,j-Nf2+jj])\n result[i, j] = num\n\n@jit\ndef filter2d(image, filt):\n result = numpy.zeros_like(image)\n filter2d_core(image, filt, result)\n return result\n\n\nimage = lena()\nfilter = ones((7,7), dtype='int32')\n\nresult = filter2d(image, filter) # warm up\n\nimport time\nstart = time.time()\nresult = filter2d(image, filter)\nduration = time.time() - start\n\nfrom scipy.ndimage import convolve\nstart = time.time()\nresult2 = convolve(image, filter)\nduration2 = time.time() - start\n\nprint(\"Time for LLVM code = %f\\nTime for convolve = %f\" % (duration, duration2))\n\nfrom pylab import subplot, imshow, show, title, gray\nsubplot(1,2,1)\nimshow(image)\ntitle('Original Image')\ngray()\nsubplot(1,2,2)\nimshow(result)\ntitle('Filtered Image')\ngray()\nshow()\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\nimport numba\nfrom numba import autojit, double\n\nimport unittest\n# from unittest import FunctionTestCase as testcase\n\nimport numpy as np\n\ntests = []\ndef testcase(f):\n tests.append(f)\n return f\n\n#----------------------------------------------------------------------------\n# Simple isolated tests\n#----------------------------------------------------------------------------\n\n@testcase\ndef test_simple_prange_shared():\n @autojit(warn=False)\n def simple_prange_shared():\n \"\"\"\n >>> simple_prange_shared()\n 20L\n \"\"\"\n result = np.empty(1, dtype=np.int64)\n shared = 20\n\n for i in numba.prange(1):\n result[0] = shared\n return result[0]\n assert simple_prange_shared() == 20\n\n@testcase\ndef test_simple_prange_private():\n @autojit(warn=False)\n def simple_prange_private():\n \"\"\"\n >>> simple_prange_private()\n 10L\n \"\"\"\n result = np.empty(1, dtype=np.int64)\n var = 20\n for i in numba.prange(1):\n var = 10\n result[0] = var\n return result[0]\n assert simple_prange_private() == 10\n\n@testcase\ndef test_simple_prange_lastprivate():\n @autojit(warn=False)\n def simple_prange_lastprivate():\n \"\"\"\n >>> simple_prange_lastprivate()\n 10\n \"\"\"\n var = 20\n for i in numba.prange(1):\n var = 10\n return var\n assert simple_prange_lastprivate() == 10\n \n\n@testcase\ndef test_simple_prange_reduction():\n @autojit(warn=False)\n def simple_prange_reduction():\n \"\"\"\n >>> simple_prange_reduction()\n 15\n \"\"\"\n var = 10\n for i in numba.prange(1):\n var += 5\n return var\n assert simple_prange_reduction() == 15\n\n#----------------------------------------------------------------------------\n# Error Tests\n#----------------------------------------------------------------------------\n\n@autojit(warn=False)\ndef prange_reduction_error():\n \"\"\"\n DISABLED.\n\n >> prange_reduction_error()\n Traceback (most recent call last):\n ...\n NumbaError: 32:8: Local variable 'sum' is not bound yet\n \"\"\"\n for i in numba.prange(10):\n sum += i\n sum = 0.0\n return sum\n\n#----------------------------------------------------------------------------\n# Advanced Tests\n#----------------------------------------------------------------------------\n\n@testcase\ndef test_prange_reduction2():\n @autojit(warn=False)\n def prange_reduction2():\n \"\"\"\n >>> prange_reduction2()\n 49999995000000.0\n \"\"\"\n sum = 0.0\n for i in numba.prange(10000000):\n sum += i\n return sum\n assert prange_reduction2() == 49999995000000.0\n\n@testcase\ndef test_prange_reduction_and_privates():\n @autojit(warn=False)\n def prange_reduction_and_privates():\n \"\"\"\n >>> prange_reduction_and_privates()\n 100.0\n \"\"\"\n sum = 10.0\n for i in numba.prange(10):\n j = i * 2\n sum += j\n return sum\n assert prange_reduction_and_privates() == 100.0\n\n@testcase\ndef test_prange_lastprivate():\n @autojit(warn=False)\n def prange_lastprivate():\n \"\"\"\n >>> prange_lastprivate()\n 100.0\n 18\n \"\"\"\n sum = 10.0\n for i in numba.prange(10):\n j = i * 2\n sum += j\n print(sum)\n return j\n assert prange_lastprivate() == 18\n\n@testcase\ndef test_prange_shared_privates_reductions():\n @autojit(warn=False)\n def prange_shared_privates_reductions(shared):\n \"\"\"\n >>> prange_shared_privates_reductions(2.0)\n 100.0\n \"\"\"\n sum = 10.0\n\n for i in numba.prange(10):\n j = i * shared\n sum += j\n shared = 3.0\n return sum\n assert prange_shared_privates_reductions(2.0) == 100.0\n\n@testcase\ndef test_test_sum2d():\n @autojit(warn=False)\n def test_sum2d(A):\n \"\"\"\n >>> a = np.arange(100).reshape(10, 10)\n >>> test_sum2d(a)\n 4950.0\n >>> test_sum2d(a.astype(np.complex128))\n (4950+0j)\n >>> np.sum(a)\n 4950\n \"\"\"\n sum = 0.0\n for i in numba.prange(A.shape[0]):\n for j in range(A.shape[1]):\n # print(i, j)\n sum += A[i, j]\n return sum\n\n a = np.arange(100).reshape(10, 10)\n assert test_sum2d(a) == 4950.0\n assert test_sum2d(a.astype(np.complex128)) == 4950+0j\n assert np.sum(a) == 4950\n\n@testcase\ndef test_test_prange_in_closure():\n @autojit(warn=False)\n def test_prange_in_closure(x):\n \"\"\"\n >>> test_prange_in_closure(2.0)()\n 1000.0\n \"\"\"\n sum = 10.0\n N = 10\n\n @double()\n def inner():\n sum = 100.0\n for i in numba.prange(N):\n for j in range(N):\n sum += i * x\n return sum\n return inner\n assert test_prange_in_closure(2.0)() == 1000.0\n\n@testcase\ndef test_test_prange_in_closure2():\n @autojit(warn=False)\n def test_prange_in_closure2(x):\n \"\"\"\n >>> test_prange_in_closure2(2.0)()\n 10000.0\n \"\"\"\n sum = 10.0\n N = 10\n\n @double()\n def inner():\n sum = 100.0\n for i in numba.prange(N):\n for j in range(N):\n sum += (i * N + j) * x\n return sum\n return inner\n\n assert test_prange_in_closure2(2.0)() == 10000.0\n\nif __name__ == '__main__':\n # unittest.main()\n for test in tests:\n print(\"running\", test.__name__)\n test()",
"from __future__ import print_function\n\nimport numpy\n\nimport numba.unittest_support as unittest\nfrom numba.compiler import compile_isolated, Flags\nfrom numba import types\nfrom .support import TestCase\n\nenable_pyobj_flags = Flags()\nenable_pyobj_flags.set(\"enable_pyobject\")\n\nforce_pyobj_flags = Flags()\nforce_pyobj_flags.set(\"force_pyobject\")\n\nno_pyobj_flags = Flags()\n\n\ndef unpack_list(l):\n a, b, c = l\n return (a, b, c)\n\n\ndef unpack_shape(a):\n x, y, z = a.shape\n return x + y + z\n\n\ndef unpack_range():\n a, b, c = range(3)\n return a + b + c\n\n\ndef unpack_range_too_small():\n a, b, c = range(2)\n return a + b + c\n\n\ndef unpack_range_too_large():\n a, b, c = range(4)\n return a + b + c\n\n\ndef unpack_tuple():\n a, b, c = (1, 2, 3)\n return a + b + c\n\n\ndef unpack_tuple_too_small():\n a, b, c = (1, 2)\n return a + b + c\n\n\ndef unpack_tuple_too_large():\n a, b, c = (1, 2, 3, 4)\n return a + b + c\n\n\ndef unpack_heterogenous_tuple():\n a, b, c = (1, 2.5, 3j)\n return a + b + c\n\n\ndef unpack_nested_heterogenous_tuple():\n a, (b, c) = (1, (2.5, 3j))\n return a + b + c\n\n\ndef chained_unpack_assign1(x, y):\n # Used to fail in object mode (issue #580)\n a = (b, c) = (x, y)\n (d, e) = a\n return d + e + b + c\n\n\nclass TestUnpack(TestCase):\n\n def test_unpack_list(self):\n pyfunc = unpack_list\n cr = compile_isolated(pyfunc, (), flags=enable_pyobj_flags)\n cfunc = cr.entry_point\n l = [1, 2, 3]\n self.assertEqual(cfunc(l), pyfunc(l))\n\n def test_unpack_shape(self, flags=force_pyobj_flags):\n pyfunc = unpack_shape\n cr = compile_isolated(pyfunc, [types.Array(dtype=types.int32,\n ndim=3,\n layout='C')],\n flags=flags)\n cfunc = cr.entry_point\n a = numpy.zeros(shape=(1, 2, 3))\n self.assertPreciseEqual(cfunc(a), pyfunc(a))\n\n def test_unpack_shape_npm(self):\n self.test_unpack_shape(flags=no_pyobj_flags)\n\n def test_unpack_range(self, flags=force_pyobj_flags):\n self.run_nullary_func(unpack_range, flags)\n\n def test_unpack_range_npm(self):\n self.test_unpack_range(flags=no_pyobj_flags)\n\n def test_unpack_tuple(self, flags=force_pyobj_flags):\n self.run_nullary_func(unpack_tuple, flags)\n\n def test_unpack_tuple_npm(self):\n self.test_unpack_tuple(flags=no_pyobj_flags)\n\n def test_unpack_heterogenous_tuple(self, flags=force_pyobj_flags):\n self.run_nullary_func(unpack_heterogenous_tuple, flags)\n\n def test_unpack_heterogenous_tuple_npm(self):\n self.test_unpack_heterogenous_tuple(flags=no_pyobj_flags)\n\n def test_unpack_nested_heterogenous_tuple(self, flags=force_pyobj_flags):\n self.run_nullary_func(unpack_nested_heterogenous_tuple, flags)\n\n def test_unpack_nested_heterogenous_tuple_npm(self):\n self.test_unpack_nested_heterogenous_tuple(flags=no_pyobj_flags)\n\n def test_chained_unpack_assign(self, flags=force_pyobj_flags):\n pyfunc = chained_unpack_assign1\n cr = compile_isolated(pyfunc, [types.int32, types.int32],\n flags=flags)\n cfunc = cr.entry_point\n args = (4, 5)\n self.assertPreciseEqual(cfunc(*args), pyfunc(*args))\n\n def test_chained_unpack_assign_npm(self):\n self.test_chained_unpack_assign(flags=no_pyobj_flags)\n\n def check_unpack_error(self, pyfunc, flags=force_pyobj_flags):\n cr = compile_isolated(pyfunc, (), flags=flags)\n cfunc = cr.entry_point\n with self.assertRaises(ValueError):\n cfunc()\n\n def test_unpack_tuple_too_small(self):\n self.check_unpack_error(unpack_tuple_too_small)\n\n def test_unpack_tuple_too_small_npm(self):\n self.check_unpack_error(unpack_tuple_too_small, no_pyobj_flags)\n\n def test_unpack_tuple_too_large(self):\n self.check_unpack_error(unpack_tuple_too_large)\n\n def test_unpack_tuple_too_large_npm(self):\n self.check_unpack_error(unpack_tuple_too_large, no_pyobj_flags)\n\n def test_unpack_range_too_small(self):\n self.check_unpack_error(unpack_range_too_small)\n\n def test_unpack_range_too_small_npm(self):\n self.check_unpack_error(unpack_range_too_small, no_pyobj_flags)\n\n def test_unpack_range_too_large(self):\n self.check_unpack_error(unpack_range_too_large)\n\n def test_unpack_range_too_large_npm(self):\n self.check_unpack_error(unpack_range_too_large, no_pyobj_flags)\n\n\nif __name__ == '__main__':\n unittest.main(buffer=True)\n\n"
] |
[
[
"scipy.misc.lena",
"numpy.zeros_like",
"scipy.ndimage.convolve",
"numpy.ones"
],
[
"numpy.arange",
"numpy.sum",
"numpy.empty"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"0.12",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hwang595/Draco
|
[
"8472912cce82e6d74087a402fd417e7a837517ab"
] |
[
"src/setup.py"
] |
[
"from distutils.core import setup\nfrom Cython.Build import cythonize\nimport numpy\n\nsetup(\n name = 'cyclic decoding',\n ext_modules = cythonize(\"decoding.pyx\"),\n include_dirs=[numpy.get_include()]\n)"
] |
[
[
"numpy.get_include"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wbm06/RfPy
|
[
"3ea75add7cab6d73d81ca87372defdab71dc7c42"
] |
[
"rfpy/harmonics.py"
] |
[
"# Copyright 2019 Pascal Audet\n#\n# This file is part of RfPy.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nHarmonic decomposition module.\n\n\"\"\"\n\n# Import modules and functions\nimport numpy as np\nfrom obspy.core import Stream, Trace\nimport matplotlib.pyplot as plt\n\n\nclass Harmonics(object):\n \"\"\"\n A Harmonics object contains attributes and methods to decompose\n radial and transverse component receiver functions into\n back-azimuth harmonics. The object is initialized with two\n :class:`~obspy.core.Stream` objects containing observed (or synthetised)\n radial and transverse receiver functions. The methods available \n can decompose the receiver functions along a fixed azimuth, or\n search for the optimal azimuth within a time range by minimizing\n one component. \n\n Note\n ----\n The object is initialized with the ``rfV1`` field only, and\n other attributes are added to the object as the analysis proceeds.\n A second ``rfV2`` can be included, which is typically a copy of ``rfV1``\n filtered at different corner frequencies and is used to stack along the\n Pps and Pss moveout curves.\n\n Parameters\n ----------\n radialRF : :class:`~obspy.core.Stream`\n Stream object containing the radial-component receiver function\n seismograms \n transvRF : :class:`~obspy.core.Stream`\n Stream object containing the transverse-component receiver function\n seismograms \n azim : float\n Direction (azimuth) along which the B1 component of the stream\n is minimized (between ``xmin`` and ``xmax``)\n xmin : float\n Minimum x axis value over which to calculate ``azim``\n xmax : float\n Maximum x axis value over which to calculate ``azim``\n\n Other Parameters\n ----------------\n hstream : :class:`~obspy.core.Stream`\n Stream containing the 5 harmonics, oriented in direction ``azim``\n radial_forward : :class:`~obspy.core.Stream`, optional\n Stream containing the radial receiver functions\n transv_forward : :class:`~obspy.core.Stream`, optional\n Stream containing the transverse receiver functions\n\n \"\"\"\n\n def __init__(self, radialRF, transvRF=None, azim=0, xmin=0., xmax=10.):\n\n # Load example data if initializing empty object\n if radialRF == 'demo' or radialRF == 'Demo':\n print(\"Uploading demo data - station NY.MMPY\")\n import os\n import pickle\n file = open(os.path.join(\n os.path.dirname(__file__),\n \"examples/data\", \"demo_streams.pkl\"), 'rb')\n radialRF = pickle.load(file)\n transvRF = pickle.load(file)\n file.close()\n\n if not transvRF:\n raise TypeError(\"__init__() missing 1 required positional argument: 'transvRF'\")\n\n # fftshift if the time axis starts at negative lags \n if radialRF[0].stats.taxis[0]<0.:\n for tr in radialRF:\n tr.data = np.fft.fftshift(tr.data)\n for tr in transvRF:\n tr.data = np.fft.fftshift(tr.data)\n\n self.radialRF = radialRF\n self.transvRF = transvRF\n self.azim = azim\n self.xmin = xmin\n self.xmax = xmax\n\n def dcomp_find_azim(self, xmin=None, xmax=None):\n \"\"\"\n Method to decompose radial and transverse receiver function \n streams into back-azimuth harmonics and determine the main \n orientation ``azim``, obtained by minimizing the B1 component\n between ``xmin`` and ``xmax`` (i.e., time or depth).\n\n Parameters\n ----------\n xmin : float\n Minimum x axis value over which to calculate ``azim``\n xmax : float\n Maximum x axis value over which to calculate ``azim``\n\n Attributes\n ----------\n hstream : :class:`~obspy.core.Stream`\n Stream containing the 5 harmonics, oriented in direction ``azim``\n azim : float\n Direction (azimuth) along which the B1 component of the stream\n is minimized (between ``xmin`` and ``xmax``)\n var : :class:`~numpy.ndarray`\n Variance of the 5 harmonics between ``xmin`` and ``xmax``\n\n \"\"\"\n\n if not xmin:\n xmin = self.xmin\n if not xmax:\n xmax = self.xmax\n\n print()\n print('Decomposing receiver functions into baz harmonics')\n\n # Some integers\n nbin = len(self.radialRF)\n nz = len(self.radialRF[0].data)\n naz = 180\n daz = np.float(360/naz)\n deg2rad = np.pi/180.\n\n # Define depth range over which to calculate azimuth\n indmin = int(xmin/self.radialRF[0].stats.delta)\n indmax = int(xmax/self.radialRF[0].stats.delta)\n\n # Copy stream stats\n str_stats = self.radialRF[0].stats\n\n # Initialize work arrays\n C0 = np.zeros((nz, naz))\n C1 = np.zeros((nz, naz))\n C2 = np.zeros((nz, naz))\n C3 = np.zeros((nz, naz))\n C4 = np.zeros((nz, naz))\n\n # Loop over each depth step\n for iz in range(nz):\n\n # Build matrices OBS and H for each azimuth\n for iaz in range(naz):\n\n # Initialize work arrays\n OBS = np.zeros(2*nbin)\n H = np.zeros((2*nbin, 5))\n\n azim = iaz*daz\n\n # Radial component\n for irow, trace in enumerate(self.radialRF):\n\n baz = trace.stats.baz\n OBS[irow] = trace.data[iz]\n H[irow, 0] = 1.0\n H[irow, 1] = np.cos(deg2rad*(baz-azim))\n H[irow, 2] = np.sin(deg2rad*(baz-azim))\n H[irow, 3] = np.cos(2.*deg2rad*(baz-azim))\n H[irow, 4] = np.sin(2.*deg2rad*(baz-azim))\n\n shift = 90.\n\n # Transverse component\n for irow, trace in enumerate(self.transvRF):\n\n baz = trace.stats.baz\n OBS[irow+nbin] = trace.data[iz]\n H[irow+nbin, 0] = 0.0\n H[irow+nbin, 1] = np.cos(deg2rad*(baz+shift-azim))\n H[irow+nbin, 2] = np.sin(deg2rad*(baz+shift-azim))\n H[irow+nbin, 3] = np.cos(2.*deg2rad*(baz+shift/2.0-azim))\n H[irow+nbin, 4] = np.sin(2.*deg2rad*(baz+shift/2.0-azim))\n\n # Solve system of equations with truncated SVD\n u, s, v = np.linalg.svd(H)\n s[s < 0.001] = 0.\n CC = np.linalg.solve(s[:, None] * v, u.T.dot(OBS)[:5])\n\n # Fill up arrays\n C0[iz, iaz] = np.float(CC[0])\n C1[iz, iaz] = np.float(CC[1])\n C2[iz, iaz] = np.float(CC[2])\n C3[iz, iaz] = np.float(CC[3])\n C4[iz, iaz] = np.float(CC[4])\n\n # Minimize variance of third component over specific depth range to\n # find azim\n C1var = np.zeros(naz)\n for iaz in range(naz):\n C1var[iaz] = np.sqrt(np.mean(np.square(C1[indmin:indmax, iaz])))\n indaz = np.argmin(C1var)\n\n C0var = np.sqrt(np.mean(np.square(C0[indmin:indmax, indaz])))\n C1var = np.sqrt(np.mean(np.square(C1[indmin:indmax, indaz])))\n C2var = np.sqrt(np.mean(np.square(C2[indmin:indmax, indaz])))\n C3var = np.sqrt(np.mean(np.square(C3[indmin:indmax, indaz])))\n C4var = np.sqrt(np.mean(np.square(C4[indmin:indmax, indaz])))\n\n # Put back into traces\n A = Trace(data=C0[:, indaz], header=str_stats)\n B1 = Trace(data=C1[:, indaz], header=str_stats)\n B2 = Trace(data=C2[:, indaz], header=str_stats)\n C1 = Trace(data=C3[:, indaz], header=str_stats)\n C2 = Trace(data=C4[:, indaz], header=str_stats)\n\n # Put all treaces into stream\n self.hstream = Stream(traces=[A, B1, B2, C1, C2])\n self.azim = indaz*daz\n self.var = [C0var, C1var, C2var, C3var, C4var]\n\n def dcomp_fix_azim(self, azim=None):\n \"\"\"\n Method to decompose radial and transverse receiver function \n streams into back-azimuth harmonics along direction ``azim``.\n\n Parameters\n ----------\n azim : float\n Direction (azimuth) along which the B1 component of the stream\n is minimized (between ``xmin`` and ``xmax``)\n\n Attributes\n ----------\n hstream : :class:`~obspy.core.Stream`\n Stream containing the 5 harmonics, oriented in direction ``azim``\n\n \"\"\"\n\n if azim is None:\n azim = self.azim\n else:\n self.azim = azim\n\n print('Decomposing receiver functions into baz harmonics for azimuth = ',\n azim)\n\n # Some integers\n nbin = len(self.radialRF)\n nz = len(self.radialRF[0].data)\n deg2rad = np.pi/180.\n\n # Copy stream stats\n str_stats = self.radialRF[0].stats\n\n # Initialize work arrays\n C0 = np.zeros(nz)\n C1 = np.zeros(nz)\n C2 = np.zeros(nz)\n C3 = np.zeros(nz)\n C4 = np.zeros(nz)\n\n # Loop over each depth step\n for iz in range(nz):\n\n # Initialize working arrays\n OBS = np.zeros(2*nbin)\n H = np.zeros((2*nbin, 5))\n\n # Radial component\n for irow, trace in enumerate(self.radialRF):\n\n baz = trace.stats.baz\n OBS[irow] = trace.data[iz]\n H[irow, 0] = 1.0\n H[irow, 1] = np.cos(deg2rad*(baz-azim))\n H[irow, 2] = np.sin(deg2rad*(baz-azim))\n H[irow, 3] = np.cos(2.*deg2rad*(baz-azim))\n H[irow, 4] = np.sin(2.*deg2rad*(baz-azim))\n\n shift = 90.\n\n # Transverse component\n for irow, trace in enumerate(self.transvRF):\n\n baz = trace.stats.baz\n OBS[irow+nbin] = trace.data[iz]\n H[irow+nbin, 0] = 0.0\n H[irow+nbin, 1] = np.cos(deg2rad*(baz+shift-azim))\n H[irow+nbin, 2] = np.sin(deg2rad*(baz+shift-azim))\n H[irow+nbin, 3] = np.cos(2.*deg2rad*(baz+shift/2.0-azim))\n H[irow+nbin, 4] = np.sin(2.*deg2rad*(baz+shift/2.0-azim))\n\n # Solve system of equations with truncated SVD\n u, s, v = np.linalg.svd(H)\n s[s < 0.001] = 0.\n CC = np.linalg.solve(s[:, None] * v, u.T.dot(OBS)[:5])\n\n # Fill up arrays\n C0[iz] = np.float(CC[0])\n C1[iz] = np.float(CC[1])\n C2[iz] = np.float(CC[2])\n C3[iz] = np.float(CC[3])\n C4[iz] = np.float(CC[4])\n\n # Put back into traces\n A = Trace(data=C0, header=str_stats)\n B1 = Trace(data=C1, header=str_stats)\n B2 = Trace(data=C2, header=str_stats)\n C1 = Trace(data=C3, header=str_stats)\n C2 = Trace(data=C4, header=str_stats)\n\n # Put all traces into stream\n self.hstream = Stream(traces=[A, B1, B2, C1, C2])\n\n def forward(self, baz_list=None):\n \"\"\"\n Method to forward calculate radial and transverse component\n receiver functions given the 5 pre-determined harmonics and \n a list of back-azimuth values. The receiver function signal \n parameters (length, sampling rate, etc.) will be identical \n to those in the stream of harmonic components.\n\n Parameters\n ----------\n baz_list : list\n List of back-azimuth directions over which to calculate\n the receiver functions. If no list is specified, the method\n will use the same back-azimuths as those in the original\n receiver function streams\n\n Attributes\n ----------\n radial_forward : :class:`~obspy.core.Stream`\n Stream containing the radial receiver functions\n transv_forward : :class:`~obspy.core.Stream`\n Stream containing the transverse receiver functions\n\n\n \"\"\"\n\n if not hasattr(self, 'hstream'):\n raise(Exception(\"Decomposition has not been performed yet\"))\n\n if not baz_list:\n print(\"Warning: no BAZ specified - using all baz from \" +\n \"stored streams\")\n baz_list = [tr.stats.baz for tr in self.radialRF]\n if not isinstance(baz_list, list):\n baz_list = [baz_list]\n\n # Some constants\n nz = len(self.hstream[0].data)\n deg2rad = np.pi/180.\n\n # Copy traces\n self.radial_forward = Stream()\n self.transv_forward = Stream()\n\n for baz in baz_list:\n trR = Trace(header=self.hstream[0].stats)\n trT = Trace(header=self.hstream[0].stats)\n\n # Loop over each time/depth step\n for iz in range(nz):\n\n # Initialize working arrays\n X = np.zeros(5)\n H = np.zeros((2, 5))\n\n # Fill up X array\n X[0] = hstream[0].data[iz]\n X[1] = hstream[1].data[iz]\n X[2] = hstream[2].data[iz]\n X[3] = hstream[3].data[iz]\n X[4] = hstream[4].data[iz]\n\n # Fill up H arrays (for V and H)\n H[0, 0] = 1.0\n H[0, 1] = np.cos(deg2rad*(baz-self.azim))\n H[0, 2] = np.sin(deg2rad*(baz-self.azim))\n H[0, 3] = np.cos(2.*deg2rad*(baz-self.azim))\n H[0, 4] = np.sin(2.*deg2rad*(baz-self.azim))\n\n shift = 90.\n\n H[1, 0] = 0.0\n H[1, 1] = np.cos(deg2rad*(baz+shift-self.azim))\n H[1, 2] = np.sin(deg2rad*(baz+shift-self.azim))\n H[1, 3] = np.cos(2.*deg2rad*(baz+shift/2.0-self.azim))\n H[1, 4] = np.sin(2.*deg2rad*(baz+shift/2.0-self.azim))\n\n # Calculate dot product B = H*X\n B = np.dot(H, X)\n\n # Extract receiver functions\n trR.data[iz] = B[0]\n trT.data[iz] = -B[1]\n\n self.radial_forward.append(trR)\n self.transv_forward.append(trT)\n\n\n def plot(self, ymax=30., scale=10., save=False, title=None, form='png'):\n \"\"\"\n Method to plot the 5 harmonic components.\n\n Parameters\n ----------\n ymax : float\n Maximum y axis value (time or depth) over which to \n plot the harmonic components\n scale : float\n Scaling factor for the amplitudes (typically > 1)\n save : bool\n Whether or not to save the plot\n title : str\n Title of plot, to be used in the Figure and the \n file name (if ``save==True``)\n\n \"\"\"\n\n # Y axis\n y = np.arange(self.hstream[0].stats.npts) /\\\n self.hstream[0].stats.sampling_rate\n\n # Station name\n sta = self.hstream[0].stats.station\n\n # Initialize figure\n fig = plt.figure()\n plt.clf()\n\n # Get more control on subplots\n # ax1 = fig.add_axes([0.1, 0.1, 0.7, 0.5])\n ax1 = fig.add_subplot(111)\n\n for i, trace in enumerate(self.hstream):\n # i += 1\n ax1.fill_betweenx(\n y, i+1, i+1+trace.data*scale,\n where=trace.data+1e-6 <= 0.,\n facecolor='blue',\n linewidth=0)\n ax1.fill_betweenx(\n y, i+1, i+1+trace.data*scale,\n where=trace.data+1e-6 >= 0.,\n facecolor='red',\n linewidth=0)\n\n ax1.set_ylim(ymax, 0)\n ax1.set_xlabel('Harmonic components')\n if title:\n ax1.set_title(title)\n\n labels = [item.get_text() for item in ax1.get_xticklabels()]\n labels[1] = '$A$'\n labels[2] = '$B_1$'\n labels[3] = '$B_2$'\n labels[4] = '$C_1$'\n labels[5] = '$C_2$'\n ax1.set_xticklabels(labels)\n off = ax1.xaxis.get_offset_text()\n ax1.tick_params(axis=u'x', pad=10)\n ax1.grid()\n\n if save:\n plt.savefig('FIGURES/'+sta+'.'+title+'.'+form, dpi=300,\n bbox_inches='tight', format=form)\n plt.show()\n\n def save(self, file):\n \"\"\"\n Saves harmonics object to file\n\n Parameters\n ----------\n file : str\n File name for Harmonics object\n\n \"\"\"\n\n import pickle\n output = open(file, 'wb')\n pickle.dump(self, output)\n output.close()\n"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.linalg.svd",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.fft.fftshift",
"numpy.sin",
"matplotlib.pyplot.clf",
"numpy.argmin",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.float",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scaactk/daal4py
|
[
"08c5fa53b6eab0bc05aa2338727cb5d2c129171b"
] |
[
"examples/sycl/svm_batch.py"
] |
[
"#*******************************************************************************\r\n# Copyright 2020 Intel Corporation\r\n# All Rights Reserved.\r\n#\r\n# This software is licensed under the Apache License, Version 2.0 (the\r\n# \"License\"), the following terms apply:\r\n#\r\n# You may not use this file except in compliance with the License. You may\r\n# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\r\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n#\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#*******************************************************************************\r\n\r\n# daal4py SVM example for shared memory systems\r\n\r\nimport daal4py as d4p\r\nimport numpy as np\r\nimport os\r\nfrom daal4py.oneapi import sycl_context, sycl_buffer\r\n\r\n# let's try to use pandas' fast csv reader\r\ntry:\r\n import pandas\r\n read_csv = lambda f, c, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)\r\nexcept:\r\n # fall back to numpy loadtxt\r\n read_csv = lambda f, c, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)\r\n\r\ntry:\r\n with sycl_context('gpu'):\r\n gpu_available=True\r\nexcept:\r\n gpu_available=False\r\n\r\n# Commone code for both CPU and GPU computations\r\ndef compute(train_indep_data, train_dep_data, test_indep_data, method='defaultDense'):\r\n # Configure a SVM object to use linear kernel\r\n kernel_function = d4p.kernel_function_linear(method='defaultDense', k=1.0, b=0.0)\r\n train_algo = d4p.svm_training(method=method, kernel=kernel_function, C=1.0, accuracyThreshold=1e-3, tau=1e-8, cacheSize=600000000)\r\n\r\n train_result = train_algo.compute(train_indep_data, train_dep_data)\r\n\r\n # Create an algorithm object and call compute\r\n predict_algo = d4p.svm_prediction(kernel=kernel_function)\r\n predict_result = predict_algo.compute(test_indep_data, train_result.model)\r\n decision_result = predict_result.prediction\r\n predict_labels = np.where(decision_result >=0, 1, -1)\r\n return predict_labels, decision_result\r\n\r\n# At this moment with sycl we are working only with numpy arrays\r\ndef to_numpy(data):\r\n try:\r\n from pandas import DataFrame\r\n if isinstance(data, DataFrame):\r\n return np.ascontiguousarray(data.values)\r\n except:\r\n pass\r\n try:\r\n from scipy.sparse import csr_matrix\r\n if isinstance(data, csr_matrix):\r\n return data.toarray()\r\n except:\r\n pass\r\n return data\r\n\r\n\r\ndef main(readcsv=read_csv):\r\n # input data file\r\n train_file = os.path.join('..', 'data', 'batch', 'svm_two_class_train_dense.csv')\r\n predict_file = os.path.join('..', 'data', 'batch', 'svm_two_class_test_dense.csv')\r\n\r\n nFeatures = 20\r\n train_data = readcsv(train_file, range(nFeatures))\r\n train_labels = readcsv(train_file, range(nFeatures, nFeatures + 1))\r\n predict_data = readcsv(predict_file, range(nFeatures))\r\n predict_labels = readcsv(predict_file, range(nFeatures, nFeatures + 1))\r\n\r\n predict_result_classic, decision_function_classic = compute(train_data, train_labels, predict_data, 'boser')\r\n\r\n train_data = to_numpy(train_data)\r\n train_labels = to_numpy(train_labels)\r\n predict_data = to_numpy(predict_data)\r\n\r\n # It is possible to specify to make the computations on GPU\r\n if gpu_available:\r\n with sycl_context('gpu'):\r\n sycl_train_data = sycl_buffer(train_data)\r\n sycl_train_labels = sycl_buffer(train_labels)\r\n sycl_predict_data = sycl_buffer(predict_data)\r\n\r\n predict_result_gpu, decision_function_gpu = compute(sycl_train_data, sycl_train_labels, sycl_predict_data, 'thunder')\r\n assert np.allclose(predict_result_gpu, predict_result_classic)\r\n\r\n return predict_labels, predict_result_classic, decision_function_classic\r\n\r\n\r\nif __name__ == \"__main__\":\r\n predict_labels, predict_result, decision_function = main()\r\n np.set_printoptions(precision=0)\r\n print(\"\\nSVM classification decision function (first 10 observations):\\n\", decision_function[0:10])\r\n print(\"\\nSVM classification predict result (first 10 observations):\\n\", predict_result[0:10])\r\n print(\"\\nGround truth (first 10 observations):\\n\", predict_labels[0:10])\r\n print('All looks good!')\r\n"
] |
[
[
"pandas.read_csv",
"numpy.allclose",
"numpy.ascontiguousarray",
"numpy.set_printoptions",
"numpy.where",
"numpy.loadtxt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Mastour-mouhcine/rodProject
|
[
"3c4066ce78272752a286f57245fb5969c6340fda"
] |
[
".history/serverSide/verificationVF_20211104125303.py"
] |
[
"import xlsxwriter\nimport mysql.connector\nfrom selenium.webdriver.chrome.options import Options \nfrom selenium import webdriver\nimport time\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport pandas as pd\nimport xlrd\nfrom selenium import webdriver\nimport requests, time\nimport re\nimport os\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium import webdriver\nimport os\n\nmydb = mysql.connector.connect(\n host=\"135.148.9.103\",\n user=\"admin\",\n password=\"rod2021\",\n database=\"rod_input\",\n port = '3306'\n)\n\nmycursor1 = mydb.cursor()\n\nmycursor1.execute(\"SELECT mail_direct from `data_input` WHERE 1\")\nliste1=[]\n\nmyresult1 = mycursor1.fetchall()\n#mydb.commit()\nfor x in myresult1:\n liste1.append(x)\ndata=pd.DataFrame(liste1,columns=['mail_direct'])\n\noptions = Options()\noptions.add_argument(\"--headless\") # Runs Chrome in headless mode.\noptions.add_argument('--no-sandbox') # Bypass OS security model\noptions.add_argument('--disable-gpu') # applicable to windows os only\noptions.add_argument('start-maximized') # \noptions.add_argument('disable-infobars')\noptions.add_argument(\"--enable-extensions\")\ndriver = webdriver.Chrome(chrome_options=options, executable_path=ChromeDriverManager().install())\n#driver.get(\"http://google.com/\")\n#url=\"https://www.zerobounce.net/members/login/\"\n#driver = webdriver.Chrome(r\"C:\\Users\\infodos\\.wdm\\drivers\\chromedriver\\win32\\93.0.4577.63\\chromedriver.exe\")\n#driver.get(url)\n#url=\"https://www.zerobounce.net/members/login/\"\n#ChromeDriverManager().install()\n#r\"C:\\\\Users\\\\infodos\\.wdm\\drivers\\\\chromedriver\\\\win32\\\\93.0.4577.63\\\\chromedriver.exe\"\n#driver = webdriver.Chrome(ChromeDriverManager().install())\n#driver.get(url)\ndata=data.dropna()\nliste1=[]\n \nusername=\"[email protected]\"\npassword=\"YuR9YrKB\"\nurl=\"https://www.zerobounce.net/members/login/\"\ndriver.get(url)\ndriver.find_element_by_name(\"fe_UserName\").send_keys(username)\ndriver.find_element_by_name(\"fe_Password\").send_keys(password)\nfor row in data['mail_direct']:\n driver.find_element_by_css_selector(\"input[type=\\\"submit\\\" i]\").click()\n driver.get(\"https://www.zerobounce.net/members/singleemailvalidator/\")\n driver.find_element_by_name(\"ctl00$MainContent$fe_email_address\").send_keys(row)\n time.sleep(2)\n driver.find_element_by_name(\"ctl00$MainContent$btnValidate\").click()\n a=driver.find_element_by_class_name(\"item-status\").text\n b=driver.find_element_by_id(\"MainContent_apiResults1\").text \n liste1.append([row,b])\n time.sleep(3)\n\ndf=pd.DataFrame(liste1,columns=['Email','Status'])\n\n#import os\n#os.remove(\"data_input.xlsx\")\n#os.remove(\"Verification.xlsx\")\nwriter = pd.ExcelWriter(\"Verification.xlsx\")\ndf.to_excel(writer, 'data')\nwriter.save()\nprint(\"Verification complete\")"
] |
[
[
"pandas.DataFrame",
"pandas.ExcelWriter"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
sadielbartholomew/cf-python
|
[
"98541d8e55c703eca9bfba4168fb3d42755267da"
] |
[
"cf/cfdatetime.py"
] |
[
"import datetime\nfrom functools import partial\n\nimport cftime\nimport numpy as np\n\nfrom .functions import _DEPRECATION_ERROR_CLASS\n\n_default_calendar = \"gregorian\"\n\n# --------------------------------------------------------------------\n# Mapping of CF calendars to cftime date-time objects\n# --------------------------------------------------------------------\n_datetime_object = {\n (\"\",): partial(cftime.datetime, calendar=\"\"),\n (None, \"gregorian\", \"standard\", \"none\"): cftime.DatetimeGregorian,\n (\"proleptic_gregorian\",): cftime.DatetimeProlepticGregorian,\n (\"360_day\",): cftime.Datetime360Day,\n (\"noleap\", \"365_day\"): cftime.DatetimeNoLeap,\n (\"all_leap\", \"366_day\"): cftime.DatetimeAllLeap,\n (\"julian\",): cftime.DatetimeJulian,\n}\n\n_calendar_map = {None: \"gregorian\"}\n\n\nclass Datetime(cftime.datetime):\n \"\"\"A date-time object which supports CF calendars.\n\n Deprecated at version 3.0.0. Use function 'cf.dt' to create date-\n time objects instead.\n\n \"\"\"\n\n def __init__(\n self,\n year,\n month=1,\n day=1,\n hour=0,\n minute=0,\n second=0,\n microsecond=0,\n dayofwk=-1,\n dayofyr=1,\n calendar=None,\n ):\n \"\"\"**Initialization**\"\"\"\n _DEPRECATION_ERROR_CLASS(\n \"Datetime\",\n \"Use function 'cf.dt' to create date-time objects instead.\",\n ) # pragma: no cover\n\n\ndef elements(x):\n return x.timetuple()[:6]\n\n\ndef dt(\n arg, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, calendar=\"\"\n):\n \"\"\"Return a date-time object for a date and time according to a\n calendar.\n\n .. seealso:: `cf.dt_vector`\n\n :Parameters:\n\n arg:\n A multi-purpose argument that is one of:\n\n * An `int` specifying the calendar year, used in\n conjunction with the *month*, *day*, *hour*, *minute*,\n *second* and *microsecond* parameters.\n\n * A `str` specifying an ISO 8601-like date-time string (in\n which non-Gregorian calendar dates are allowed).\n\n * `datetime.datetime or `cftime.datetime`. A new date-time\n object is returned for the given date-time.\n\n\n calendar: `str`, optional\n The calendar for the date-time. By default the Gregorian\n calendar is used.\n\n *Parameter example:*\n ``calendar='360_day'``\n\n :Returns:\n\n `cftime.datetime`\n The new date-time object.\n\n **Examples:**\n\n >>> d = cf.dt(2003)\n >>> d\n cftime.DatetimeGregorian(2003-01-01 00:00:00)\n >>> print(d)\n 2003-01-01 00:00:00\n\n >>> d = cf.dt(2003, 2, 30, calendar='360_day')\n >>> d = cf.dt(2003, 2, 30, 0, 0, 0, calendar='360_day')\n >>> d = cf.dt('2003-2-30', calendar='360_day')\n >>> d = cf.dt('2003-2-30 0:0:0', calendar='360_day')\n >>> d\n cftime.Datetime360Day(2003:02:30 00:00:00)\n >>> print(d)\n 2003-02-30 00:00:00\n\n >>> d = cf.dt(2003, 4, 5, 12, 30, 15)\n >>> d = cf.dt(year=2003, month=4, day=5, hour=12, minute=30, second=15)\n >>> d = cf.dt('2003-04-05 12:30:15')\n >>> d.year, d.month, d.day, d.hour, d.minute, d.second\n (2003, 4, 5, 12, 30, 15)\n\n \"\"\"\n if isinstance(arg, str):\n (year, month, day, hour, minute, second, microsecond) = st2elements(\n arg\n )\n\n elif isinstance(arg, cftime.datetime):\n (year, month, day, hour, minute, second, microsecond) = (\n arg.year,\n arg.month,\n arg.day,\n arg.hour,\n arg.minute,\n arg.second,\n arg.microsecond,\n )\n if calendar == \"\":\n calendar = arg.calendar\n\n elif isinstance(arg, datetime.datetime):\n (year, month, day, hour, minute, second) = arg.timetuple()[:6]\n microsecond = arg.microsecond\n if calendar == \"\":\n calendar = _default_calendar\n\n else:\n year = arg\n\n # calendar=_calendar_map.get(calendar, calendar)\n #\n # return cftime.datetime(year, month, day, hour, minute, second,\n # microsecond, calendar=calendar)\n\n for calendars, datetime_cls in _datetime_object.items():\n if calendar in calendars:\n return datetime_cls(\n year, month, day, hour, minute, second, microsecond\n )\n\n raise ValueError(\n f\"Can't create date-time object with unknown calendar {calendar!r}\"\n )\n\n\ndef dt_vector(\n arg, month=1, day=1, hour=0, minute=0, second=0, microsecond=0, calendar=\"\"\n):\n \"\"\"Return a 1-d array of date-time objects.\n\n .. seealso:: `cf.dt`\n\n :Parameters:\n\n arg:\n A multi-purpose argument that is one of:\n\n * An `int`, or sequence of `int`, specifying the calendar\n years, used in conjunction with the *month*, *day*,\n *hour*, *minute*, *second* and *microsecond* parameters.\n\n * A `str`, or sequence of `str`, specifying ISO 8601-like\n date-time strings (in which non-Gregorian calendar dates\n are allowed).\n\n * A two dimensional array of `int`. There may be up to 7\n columns, each one specifying the years, months, days,\n hours minutes, seconds and microseconds respectively. If\n fewer than 7 trailing dimensions are provided then the\n default value for the missing components are used\n\n calendar: `str`, optional\n The calendar for the date-times. By default the Gregorian\n calendar is used.\n\n *Parameter example:*\n ``calendar='360_day'``\n\n :Returns:\n\n `numpy.ndarray`\n 1-d array of date-time objects.\n\n **Examples:**\n\n TODO\n\n \"\"\"\n arg = np.array(arg)\n month = np.array(month)\n day = np.array(day)\n hour = np.array(hour)\n minute = np.array(minute)\n second = np.array(second)\n microsecond = np.array(microsecond)\n\n ndim = max(map(np.ndim, (month, day, hour, minute, second, microsecond)))\n\n if ndim > 1:\n raise ValueError(\n \"If set, the 'month', 'day', 'hour', 'minute', 'second', \"\n \"'microsecond' parameters must be scalar or 1-d\"\n )\n\n if arg.ndim > 2:\n raise ValueError(\n \"The 'arg' parameter must be scalar, 1-d or 2-d. \" f\"Got: {arg!r}\"\n )\n\n sizes = set(\n map(np.size, (arg, month, day, hour, minute, second, microsecond))\n )\n\n if len(sizes) == 1 and 1 in sizes:\n # All arguments are scalars or size 1\n out = dt(\n arg.item(),\n month.item(),\n day.item(),\n hour.item(),\n minute.item(),\n second.item(),\n microsecond.item(),\n calendar=calendar,\n )\n if ndim >= 1:\n out = [out]\n\n out = np.array(out)\n\n if not out.ndim:\n out = np.expand_dims(out, 0)\n\n return out\n\n # Still here?\n if arg.ndim == 2 and arg.shape[1] > 7:\n raise ValueError(\n \"The size of the second dimension of 'arg' must be less than 8. \"\n f\"Got: {arg.shape[1]!r}\"\n )\n\n if arg.ndim == 1:\n if arg.dtype.kind in \"UOS\":\n out = [dt(a, calendar=calendar) for a in arg]\n else:\n if len(sizes) > 2:\n raise ValueError(\n \"The 'arg', 'month', 'day', 'hour', 'minute', 'second', \"\n \"'microsecond' parameters have incompatible sizes.\"\n \"At least two of them have different sizes greater than 1\"\n )\n\n if len(sizes) == 2 and 1 not in sizes:\n raise ValueError(\n \"The 'arg', 'month', 'day', 'hour', 'minute', 'second', \"\n \"'microsecond' parameters have incompatible sizes. \"\n \"At least two of them have different sizes greater than 1\"\n )\n\n x = np.empty((max(sizes), 7), dtype=int)\n x[:, 0] = arg\n x[:, 1] = month\n x[:, 2] = day\n x[:, 3] = hour\n x[:, 4] = minute\n x[:, 5] = second\n x[:, 6] = microsecond\n arg = x\n\n out = [dt(*args, calendar=calendar) for args in arg]\n else:\n out = [dt(*args, calendar=calendar) for args in arg]\n\n out = np.array(out)\n\n if not out.ndim:\n out = np.expand_dims(out, 0)\n\n return out\n\n\ndef st2dt(array, units_in=None, dummy0=None, dummy1=None):\n \"\"\"The returned array is always independent.\n\n :Parameters:\n\n array: numpy array-like\n\n units_in: `Units`, optional\n\n dummy0: optional\n Ignored.\n\n dummy1: optional\n Ignored.\n\n :Returns:\n\n `numpy.ndarray`\n An array of `cftime.datetime` objects with the same shape\n as *array*.\n\n **Examples:**\n\n \"\"\"\n func = partial(st2datetime, calendar=units_in._calendar)\n return np.vectorize(func, otypes=[object])(array)\n\n\ndef st2datetime(date_string, calendar=None):\n \"\"\"Parse an ISO 8601 date-time string into a `cftime` object.\n\n :Parameters:\n\n date_string: `str`\n\n :Returns:\n\n `cftime.datetime`\n\n \"\"\"\n if date_string.count(\"-\") != 2:\n raise ValueError(\n \"Input date-time string must contain at least a year, a month \"\n \"and a day\"\n )\n\n x = cftime._parse_date(date_string)\n if len(x) == 7:\n year, month, day, hour, minute, second, utc_offset = x\n microsecond = 0\n else:\n year, month, day, hour, minute, second, microsecond, utc_offset = x\n\n if utc_offset:\n raise ValueError(\"Can't specify a time offset from UTC\")\n\n # return Datetime(year, month, day, hour, minute, second)\n return dt(\n year, month, day, hour, minute, second, microsecond, calendar=calendar\n )\n\n\ndef st2elements(date_string):\n \"\"\"Parse an ISO 8601 date-time string into a `cftime` object.\n\n :Parameters:\n\n date_string: `str`\n\n :Returns:\n\n `tuple`\n\n \"\"\"\n if date_string.count(\"-\") != 2:\n raise ValueError(\n \"Input date-time string must contain at least a year, a month \"\n \"and a day\"\n )\n\n x = cftime._parse_date(date_string)\n if len(x) == 7:\n year, month, day, hour, minute, second, utc_offset = x\n microsecond = 0\n else:\n year, month, day, hour, minute, second, microsecond, utc_offset = x\n\n if utc_offset:\n raise ValueError(\"Can't specify a time offset from UTC\")\n\n return (\n year,\n month,\n day,\n hour,\n minute,\n second,\n microsecond,\n )\n\n\ndef rt2dt(array, units_in, units_out=None, dummy1=None):\n \"\"\"Convert reference times to date-time objects.\n\n The returned array is always independent.\n\n :Parameters:\n\n array: numpy array-like\n\n units_in: `Units`\n\n units_out: *optional*\n Ignored.\n\n dummy1:\n Ignored.\n\n :Returns:\n\n `numpy.ndarray`\n An array of `cftime.datetime` objects with the same shape\n as *array*.\n\n \"\"\"\n ndim = np.ndim(array)\n if not ndim and np.ma.is_masked(array):\n # num2date has issues with scalar masked arrays with a True\n # mask\n return np.ma.masked_all((), dtype=object)\n\n units = units_in.units\n calendar = getattr(units_in, \"calendar\", \"standard\")\n\n array = cftime.num2date(\n array, units, calendar, only_use_cftime_datetimes=True\n )\n\n return array\n\n\ndef dt2Dt(x, calendar=None):\n \"\"\"Convert a datetime.datetime object to a cf.Datetime object.\"\"\"\n if not x:\n return False\n\n return dt(x, calendar=calendar)\n\n\ndef dt2rt(array, units_in, units_out, dummy1=None):\n \"\"\"Round to the nearest millisecond. This is only necessary whilst\n netCDF4 time functions have an accuracy of no better than 1\n millisecond (which is still the case at version 1.2.2).\n\n The returned array is always independent.\n\n :Parameters:\n\n array: numpy array-like of date-time objects\n\n units_in:\n Ignored.\n\n units_out: `Units`\n\n dummy1:\n Ignored.\n\n :Returns:\n\n `numpy.ndarray`\n An array of numbers with the same shape as *array*.\n\n \"\"\"\n ndim = np.ndim(array)\n\n # array = units_out._utime.date2num(array)\n array = cftime.date2num(\n array, units=units_out.units, calendar=units_out._utime.calendar\n )\n\n if not ndim:\n array = np.asanyarray(array)\n\n return array\n\n\ndef st2rt(array, units_in, units_out, dummy1=None):\n \"\"\"The returned array is always independent.\n\n :Parameters:\n\n array: numpy array-like of ISO 8601 date-time strings\n\n units_in: `Units` or `None`\n\n units_out: `Units`\n\n dummy1:\n Ignored.\n\n :Returns:\n\n `numpy.ndarray`\n An array of floats with the same shape as *array*.\n\n \"\"\"\n array = st2dt(array, units_in)\n # array = units_out._utime.date2num(array)\n array = cftime.date2num(\n array, units=units_out.units, calendar=units_out._utime.calendar\n )\n\n if not np.ndim(array):\n array = np.asanyarray(array)\n\n return array\n"
] |
[
[
"numpy.expand_dims",
"numpy.ndim",
"numpy.vectorize",
"numpy.asanyarray",
"numpy.ma.masked_all",
"numpy.array",
"numpy.ma.is_masked"
]
] |
[
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dabi0614/GeoData
|
[
"1699e9d7eb2c23dfee1e14ac5857507834909914",
"1699e9d7eb2c23dfee1e14ac5857507834909914",
"1699e9d7eb2c23dfee1e14ac5857507834909914",
"1699e9d7eb2c23dfee1e14ac5857507834909914"
] |
[
"geodata/sketchG1-1.py",
"geodata/sketch7.py",
"geodata/sketchE.py",
"geodata/sketchJ2.py"
] |
[
"\nfrom dask.distributed import Client\n\nimport numpy as np\n\nimport matplotlib as mpl\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as tri\nimport cartopy.crs as ccrs\n\nimport geodata as gd\nimport pystare as ps\nimport h5py as h5\nfrom pyhdf.SD import SD, SDC\n\nimport yaml\n\nfrom sortedcontainers import SortedDict, SortedList\n\nfrom stopwatch import sw_timer\n\n###########################################################################\n#\n\ndef npi64(i):\n return np.array(i,dtype=np.int64)\n\ndef npf64(i):\n return np.array(i,dtype=np.double)\n\nclass sid_geometry(object):\n def __init__(self,sids=None):\n self.triangles = SortedDict()\n self.tri_triang = None\n if sids is not None:\n self.add(sids)\n return\n\n def add(self,sids):\n for sid in sids:\n if sid not in self.triangles.keys():\n self.tri_triang = None\n self.triangles[sid] = ps.triangulate_indices(np.array([sid],dtype=np.int64)) ## LLI\n return\n\n def triang(self):\n if self.tri_triang is None:\n k=0\n n = len(self.triangles.keys())\n lats = np.zeros([3*n],dtype=np.double)\n lons = np.zeros([3*n],dtype=np.double)\n intmat = []\n for sid in self.triangles:\n lats[k:k+3] = self.triangles[sid][0][:]\n lons[k:k+3] = self.triangles[sid][1][:]\n intmat.append([i+k for i in self.triangles[sid][2][0]])\n k=k+3\n self.tri_triang = tri.Triangulation(lats,lons,intmat)\n return self.tri_triang\n\n def get_sids_np(self):\n return np.array(self.triangles.keys(),dtype=np.int64)\n\nclass data_entry(object):\n def __init__(self,sid,datum):\n self.sid = sid\n self.datum = datum\n return\n\n def as_tuple(self):\n return (self.sid,self.datum)\n\nclass catalog_entry(object):\n def __init__(self,sid):\n self.data = {}\n self.sid = sid\n self.geometry = sid_geometry([sid])\n return\n\n def add(self,key,datum):\n if key not in self.data.keys():\n self.data[key] = []\n self.data[key].append(datum) # datum is a data_entry\n return\n\nclass catalog(object):\n def __init__(self,resolution=None,sids=None):\n self.resolution = resolution\n self.result_size_limit = 4096\n self.sdict = SortedDict()\n if sids is not None:\n for s in sids:\n self.open_entry(s)\n return\n\n def add(self,key,sid,datum,resolution=None):\n if resolution is not None:\n sidtest = gd.spatial_clear_to_resolution(gd.spatial_coerce_resolution(sid,resolution))\n elif self.resolution is not None:\n sidtest = gd.spatial_clear_to_resolution(gd.spatial_coerce_resolution(sid,self.resolution))\n else:\n sidtest = sid\n if sidtest not in self.sdict.keys():\n self.sdict[sidtest] = catalog_entry(sidtest) # construct with relevant resolution\n self.sdict[sidtest].add(key,data_entry(sid,datum))\n return\n\n def open_entry(self,sid):\n \"Open a catalog entry, if it's not there. Expand sid, if needed.\"\n sidl=[sid]\n if self.resolution is not None:\n sidl = ps.expand_intervals(sidl,self.resolution,self.result_size_limit)\n for s in sidl:\n if s not in self.sdict.keys():\n self.sdict[s] = catalog_entry(s) # construct with appropriate resolution\n return\n\n def add_to_entry(self,key,sid,datum):\n \"Add data to entry if it's there.\"\n if self.resolution is not None:\n sid_test = gd.spatial_clear_to_resolution(gd.spatial_coerce_resolution(sid,self.resolution))\n else:\n sid_test = sid\n # print('testing ',hex(sid_test), hex(sid))\n entry_open = sid_test in self.sdict.keys()\n if entry_open:\n # print(key,' adding ',data,' to ',hex(sid))\n self.add(key,sid,datum)\n return entry_open\n\n def get_all_data(self,key,interpolate=False):\n ret = []\n for s in self.sdict.keys():\n try:\n if len(self.sdict[s].data[key]) > 0:\n ret = ret + self.sdict[s].data[key]\n except KeyError:\n continue\n return ret\n\n def get_data(self,key,sid):\n return self.sdict[sid].data[key]\n\n###########################################################################\n# https://stackoverflow.com/questions/41596386/tripcolor-using-rgb-values-for-each-vertex\n#\ndef colors_to_cmap(colors):\n '''\n colors_to_cmap(nx3_or_nx4_rgba_array) yields a matplotlib colormap object that, when\n that will reproduce the colors in the given array when passed a list of n evenly\n spaced numbers between 0 and 1 (inclusive), where n is the length of the argument.\n\n Example:\n cmap = colors_to_cmap(colors)\n zs = np.asarray(range(len(colors)), dtype=np.float) / (len(colors)-1)\n # cmap(zs) should reproduce colors; cmap[zs[i]] == colors[i]\n '''\n colors = np.asarray(colors)\n if colors.shape[1] == 3:\n colors = np.hstack((colors, np.ones((len(colors),1))))\n steps = (0.5 + np.asarray(range(len(colors)-1), dtype=np.float))/(len(colors) - 1)\n return mpl.colors.LinearSegmentedColormap(\n 'auto_cmap',\n {clrname: ([(0, col[0], col[0])] + \n [(step, c0, c1) for (step,c0,c1) in zip(steps, col[:-1], col[1:])] + \n [(1, col[-1], col[-1])])\n for (clridx,clrname) in enumerate(['red', 'green', 'blue', 'alpha'])\n for col in [colors[:,clridx]]},\n N=len(colors))\n\n###########################################################################\n# Helper functions\n\ndef with_hdf_get(h,var):\n sds = hdf.select(var)\n ret = sds.get()\n sds.endaccess()\n return ret\n\ndef slam(client,action,data,partition_factor=1.5):\n np = sum(client.nthreads().values())\n print('slam: np = %i'%np)\n shard_bounds = [int(i*len(data)/(1.0*partition_factor*np)) for i in range(int(partition_factor*np))] \n if shard_bounds[-1] != len(data):\n shard_bounds = shard_bounds + [-1]\n data_shards = [data[shard_bounds[i]:shard_bounds[i+1]] for i in range(len(shard_bounds)-1)]\n print('ds len: ',len(data_shards))\n print('ds item len: ',len(data_shards[0]))\n print('ds type: ',type(data_shards[0]))\n print('ds dtype: ',data_shards[0].dtype)\n big_future = client.scatter(data_shards)\n results = client.map(action,big_future)\n return results\n \n\ndef main():\n ###########################################################################\n # Data source\n dataPath=\"/home/mrilee/data/\"\n \n ###########################################################################\n # MODIS\n\n modis_base = \"MOD05_L2.\"\n \n # modis_item = \"A2005349.2120.061.2017294065852\"\n # modis_time_start = \"2005-12-15T21:20:00\"\n \n modis_item = \"A2005349.2125.061.2017294065400\"\n modis_time_start = \"2005-12-15T21:25:00\"\n \n modis_suffix = \".hdf\"\n modis_filename = modis_base+modis_item+modis_suffix\n\n # hdf = SD(dataPath+modis_filename,SDC.READ)\n # ds_wv_nir = hdf.select('Water_Vapor_Near_Infrared')\n \n fmt_suffix = \".h5\"\n workFileName = \"sketchG.\"+modis_base+modis_item+fmt_suffix\n print('loading ',workFileName)\n workFile = h5.File(workFileName,'r')\n sids = workFile['/image']['stare_spatial']\n lat = workFile['/image']['Latitude']\n lon = workFile['/image']['Longitude']\n data = workFile['/image']['Water_Vapor_Near_Infrared']\n workFile.close()\n\n modis_min = np.amin(data)\n modis_max = np.amax(data)\n sids = sids-1\n\n ###########################################################################\n # GOES\n \n goes_file='sketch9.2005.349.213015.h5'\n workFileName = goes_file\n workFile = h5.File(workFileName,'r')\n goes_sids = workFile['/image']['stare_spatial']\n goes_data = workFile['/image']['goes_b3']\n workFile.close()\n print('goes mnmx: ',np.amin(goes_data),np.amax(goes_data))\n goes_min = np.amin(goes_data)\n goes_max = np.amax(goes_data)\n goes_sids = goes_sids-1\n\n\n ###########################################################################\n # Plotting\n\n nrows = 2\n ncols = 3\n\n nrows = 1\n ncols = 1\n\n proj = ccrs.PlateCarree()\n # proj = ccrs.Mollweide()\n # proj = ccrs.Mollweide(central_longitude=-160.0)\n transf = ccrs.Geodetic()\n\n# https://stackoverflow.com/questions/33942233/how-do-i-change-matplotlibs-subplot-projection-of-an-existing-axis\n # plt.figure()\n fig,axs = plt.subplots(nrows=nrows,ncols=ncols,subplot_kw={'projection': proj})\n\n # axs.set_facecolor('k')\n # axs.patch.set_facecolor('black')\n # axs.set_facecolor('black')\n\n if nrows*ncols == 1:\n fig = [fig]\n axs = [axs]\n\n goes_line = [False,False,False]\n modis_line = [False,False,False]\n cover_plot = [True, True, True ]\n goes_plot_1 = [True, False,True ]\n goes_plot_1_points = [True, False,True ]\n modis_plot_1 = [False,True, True ]\n plt_show_1 = [False,False,True ]\n\n goes_line = [False,False,False ,True ,False ,True ]\n modis_line = [False,False,False ,False ,True ,True ]\n cover_plot = [False,False,False ,False ,False ,False ]\n goes_plot_1 = [True, False,True ,True ,False ,True ]\n goes_plot_1_points = [False,False,False ,True ,False ,True ]\n modis_plot_1 = [False,True, True ,False ,True ,True ]\n modis_plot_1_points = [False,False,False ,False ,True ,True ] \n plt_show_1 = [False,False,True ,False ,False ,True ]\n \n irow = [0,0,0,1,1,1]\n icol = [0,1,2,0,1,2]\n\n coastline_color = 'black'\n coastline_color = 'black'\n\n # blend\n blend_tripcolor_1 = True\n blend_tripcolor_1_res = 10\n # blend_tripcolor_1_res = 9 # FFE\n # blend_tripcolor_1_res = 6 # Test\n blend_tripcolor_1_cmap = None\n blend_tripcolor_1_alpha = 1\n blend_tripcolor_1_gamma_g = 0.65\n blend_tripcolor_1_gamma_m = 0.65\n if blend_tripcolor_1:\n goes_plot_1 = [False]*6\n modis_plot_1 = [False]*6\n # coastline_color = 'white'\n coastline_color = 'black'\n\n # 2020-0125 pix 1\n # goes_plot_1_res = 9\n # modis_plot_1_res = 9\n #\n # goes_plot_1_res = 6\n # modis_plot_1_res = 6\n #\n # plot_1_res = 9 # FFE\n plot_1_res = 6\n goes_plot_1_res = plot_1_res\n modis_plot_1_res = plot_1_res\n\n # Colors\n goes_plot_1_tripcolor = 'Reds'\n modis_plot_1_tripcolor = 'Blues'\n #\n common_alpha = 0.7\n goes_plot_1_alpha = common_alpha\n modis_plot_1_alpha = common_alpha\n\n # recalculate=[True,False,False,True,False,False]\n recalculate=[True,False,True,True,False,True]\n cover_rads =[2.0,0,2, 0.125,0,0.125]\n # cover_rads =[2.0,0,0, 0.125,0,0]\n\n circle_plot =[ False ,False ,False ,False ,False ,False ]\n circle_color=[ 'White' ,'lightgrey' ,'White' ,'navajowhite' ,'khaki' ,'White' ]\n modis_scatter_color=['darkcyan','darkcyan','darkcyan','darkcyan','cyan','cyan']\n\n nodes_cover=[1,2,1,1,2,1] # 1 == goes, 2 == modis, 0 == None\n # nodes_cover=[0,0,0,0,0,0]\n\n subplot_title = [\n \"ROI+GOES\"\n ,\"ROI+MODIS\"\n ,\"ROI+GOES+MODIS\"\n ,None\n ,None\n ,None\n ]\n \n # for iter in range(6):\n # for iter in [2,5]:\n if True:\n iter = 2\n\n ###########################################################################\n if recalculate[iter]:\n print('recalculating iter = ',iter)\n\n ###########################################################################\n cover_resolution = 11\n # cover_resolution = 12\n cover_type = 'circular'\n # cover_resolution = 6\n #+ cover_resolution = 5\n #+ cover_type = 'bounding_box'\n\n if cover_type == 'circular':\n ###########################################################################\n # HI 28.5N 177W\n \n # Near the Big Island\n cover_lat = 19.5-0.375\n cover_lon = -155.5+0.375\n \n # Midway Island\n # cover_lat = 28.2\n # cover_lon = -177.35\n \n # Ni'ihau\n # cover_lat = 21.9\n # cover_lon = -160.17\n \n cover_rad = cover_rads[iter]\n \n cover = ps.to_circular_cover(\n cover_lat\n ,cover_lon\n ,cover_rad\n ,cover_resolution)\n # ,range_size_limit=2000)\n \n elif cover_type == 'bounding_box':\n # Set cover to \"bounding box.\"\n cover_lat = np.array([15,15,38,38],dtype=np.float)\n cover_lon = np.array([-174,-145,-145,-174],dtype=np.float)\n cover = ps.to_hull_range_from_latlon(\n cover_lat\n ,cover_lon\n ,cover_resolution\n )\n\n cover_cat = catalog(resolution=cover_resolution,sids=cover)\n cover_sids_min = np.amin(cover)\n cover_sids_max = np.amax(cover) # Need to convert to terminator\n cover_sids_max = gd.spatial_terminator(cover_sids_max)\n \n # for k in list(cover_cat.sdict.keys()):\n # print('cc: ',hex(k))\n\n ###########################################################################\n #\n gm_cat_resolution = 5\n gm_catalog = catalog(resolution=gm_cat_resolution)\n k=0\n for i in range(10):\n while(goes_sids[k]<0):\n k=k+1\n # print('adding: ','0x%016x'%goes_sids[k],k)\n gm_catalog.add('goes',goes_sids[k],goes_data[k])\n k=k+1\n \n for i in range(10):\n # print('adding: ','0x%016x'%sids[i])\n gm_catalog.add('modis',sids[i],data[i])\n \n k = 0\n # for i in range(10):\n idx = np.arange(goes_sids.size)[np.where( (goes_sids > cover_sids_min) & (goes_sids < cover_sids_max))]\n for k in range(len(idx)):\n # while(goes_sids[k]<0):\n # k=k+1\n if goes_sids[idx[k]] > 0:\n cover_cat.add_to_entry('goes',goes_sids[idx[k]],goes_data[idx[k]])\n # k=k+1\n \n idx = np.arange(sids.size)[np.where( (sids > cover_sids_min) & (sids < cover_sids_max))]\n for k in range(len(idx)):\n if sids[idx[k]] > 0:\n cover_cat.add_to_entry('modis',sids[idx[k]],data[idx[k]])\n \n \n # print(yaml.dump(gm_catalog))\n # exit()\n #\n ###########################################################################\n\n print('plotting iter ',iter)\n \n if nrows*ncols == 1:\n ax = axs[0]\n else:\n ax = axs[irow[iter],icol[iter]]\n \n if subplot_title[iter] is not None:\n ax.set_title(subplot_title[iter])\n if False:\n ax.set_global()\n if True:\n ax.coastlines(color=coastline_color)\n \n\n if iter == 0:\n x0 = 0.05\n y0 = 0.025; dy = 0.025\n plt.figtext(x0,y0+0*dy\n ,\"MODIS: \"+\"sketchG.\"+modis_base+modis_item+fmt_suffix+', Water_Vapor_Near_Infrared, resolution = %i'%(sids[10000]&31)\n ,fontsize=10)\n k=0;\n while goes_sids[k]<0:\n k=k+1\n plt.figtext(x0,y0+1*dy\n ,\"GOES: \"+goes_file+' BAND_3 (6.7mu), resolution = %i'%(goes_sids[k]&31)\n ,fontsize=10)\n\n if cover_type == 'circular':\n plt.figtext(x0,y0+2*dy\n ,\"ROI Cover: resolution = %d, radius = %0.2f (upper) %0.3f (lower) degrees, center = 0x%016x\"%(cover_resolution,cover_rads[0],cover_rads[3],ps.from_latlon(npf64([cover_lat]),npf64([cover_lon]),cover_resolution)[0])\n ,fontsize=10)\n\n # plt.show()\n # exit()\n\n if False:\n lli = ps.triangulate_indices(cover)\n ax.triplot(tri.Triangulation(lli[0],lli[1],lli[2])\n ,'g-',transform=transf,lw=1,markersize=3)\n \n if True:\n if goes_plot_1[iter]:\n cc_data = cover_cat.get_all_data('goes')\n csids,sdat = zip(*[cd.as_tuple() for cd in cc_data])\n if goes_plot_1_points[iter]:\n glat,glon = ps.to_latlon(csids)\n\n # csids_at_res = list(map(gd.spatial_clear_to_resolution,csids))\n # cc_data_accum = dict()\n # for cs in csids_at_res:\n # cc_data_accum[cs] = []\n # for ics in range(len(csids_at_res)):\n # cc_data_accum[csids_at_res[ics]].append(sdat[ics])\n # for cs in cc_data_accum.keys():\n # if len(cc_data_accum[cs]) > 1:\n # cc_data_accum[cs] = [sum(cc_data_accum[cs])/(1.0*len(cc_data_accum[cs]))]\n # tmp_values = np.array(list(cc_data_accum.values()))\n # vmin = np.amin(tmp_values)\n # vmax = np.amax(np.array(tmp_values))\n\n cc_data_accum,vmin,vmax = gd.simple_collect(csids,sdat,force_resolution=goes_plot_1_res)\n\n # print('a100: ',cc_data)\n # print('cc_data type: ',type(cc_data))\n # print('cc_data[0] type: ',type(cc_data[0]))\n \n for cs in cc_data_accum.keys():\n # print('item: ',hex(cs),cc_data_accum[cs])\n lli = ps.triangulate_indices([cs])\n triang = tri.Triangulation(lli[0],lli[1],lli[2])\n cd_plt = np.array(cc_data_accum[cs])\n # print('cd_plt type ',type(cd_plt))\n # print('cd_plt shape ',cd_plt.shape)\n # print('cd_plt type ',type(cd_plt[0]))\n if goes_line[iter]:\n ax.triplot(triang,'r-',transform=transf,lw=1.5,markersize=3,alpha=0.5)\n # ax.tripcolor(triang,facecolors=cd_plt,vmin=goes_min,vmax=goes_max,cmap='Reds',alpha=0.4)\n ax.tripcolor(triang\n ,facecolors=cd_plt\n ,edgecolors='k',lw=0\n ,shading='flat'\n ,vmin=vmin,vmax=vmax,cmap=goes_plot_1_tripcolor,alpha=goes_plot_1_alpha)\n \n # for cd in cc_data:\n # lli = ps.triangulate_indices([cd.sid])\n # triang = tri.Triangulation(lli[0],lli[1],lli[2])\n # cd_plt = np.array([cd.datum])\n # if goes_line[iter]:\n # ax.triplot(triang,'r-',transform=transf,lw=3,markersize=3,alpha=0.5)\n # ax.tripcolor(triang,facecolors=cd_plt,vmin=goes_min,vmax=goes_max,cmap='Reds',alpha=0.4)\n \n if modis_plot_1[iter]:\n cc_data_m = cover_cat.get_all_data('modis')\n csids,sdat = zip(*[cd.as_tuple() for cd in cc_data_m])\n # mlat,mlon = ps.to_latlon(csids)\n\n cc_data_m_accum,vmin,vmax = gd.simple_collect(csids,sdat,force_resolution=modis_plot_1_res)\n\n for cs in cc_data_m_accum.keys():\n lli = ps.triangulate_indices([cs])\n triang = tri.Triangulation(lli[0],lli[1],lli[2])\n cd_plt = np.array(cc_data_m_accum[cs])\n # print('lli[0] len ',len(lli[0]))\n # print('cd_plt len ', len(cd_plt))\n # print('cd_plt type ',type(cd_plt))\n # print('cd_plt shape ',cd_plt.shape)\n # print('cd_plt type ',type(cd_plt[0]))\n if modis_line[iter]:\n ax.triplot(triang,'b-',transform=transf,lw=1.5,markersize=3,alpha=0.5)\n # ax.tripcolor(triang,facecolors=cd_plt,vmin=goes_min,vmax=goes_max,cmap='Blues',alpha=0.4)\n ax.tripcolor(triang\n ,facecolors=cd_plt\n ,edgecolors='k',lw=0\n ,shading='flat'\n ,vmin=vmin,vmax=vmax,cmap=modis_plot_1_tripcolor,alpha=modis_plot_1_alpha)\n\n # for cd in cc_data_m:\n # lli = ps.triangulate_indices([cd.sid])\n # triang = tri.Triangulation(lli[0],lli[1],lli[2])\n # cd_plt = np.array([cd.datum])\n # if modis_line[iter]:\n # ax.triplot(triang,'b-',transform=transf,lw=1,markersize=3,alpha=0.5)\n # ax.tripcolor(triang,facecolors=cd_plt,vmin=modis_min,vmax=modis_max,cmap='Blues',alpha=0.4)\n if modis_plot_1_points[iter]:\n mlat,mlon = ps.to_latlon(csids)\n ax.scatter(mlon,mlat,s=8,c=modis_scatter_color[iter])\n # ax.scatter(mlon,mlat,s=8,c='cyan')\n # ax.scatter(mlon,mlat,s=8,c='darkcyan')\n\n # blend_tripcolor_1 = False\n # blend_tripcolor_res_1 = 6\n # blend_tripcolor_1_cmap = None\n # blend_tripcolor_1_alpha = 1\n if blend_tripcolor_1:\n cc_data = cover_cat.get_all_data('goes')\n csids,sdat = zip(*[cd.as_tuple() for cd in cc_data])\n cc_data_accum,vmin,vmax = gd.simple_collect(csids,sdat,force_resolution=blend_tripcolor_1_res)\n\n cc_data_m = cover_cat.get_all_data('modis')\n csids_m,sdat_m = zip(*[cd.as_tuple() for cd in cc_data_m])\n cc_data_m_accum,vmin_m,vmax_m = gd.simple_collect(csids_m,sdat_m,force_resolution=blend_tripcolor_1_res)\n\n data_accum_keys = set()\n for cs in cc_data_accum.keys():\n data_accum_keys.add(cs)\n for cs in cc_data_m_accum.keys():\n data_accum_keys.add(cs)\n for cs in data_accum_keys:\n # print('item: ',hex(cs),cc_data_accum[cs])\n lli = ps.triangulate_indices([cs])\n triang = tri.Triangulation(lli[0],lli[1],lli[2])\n try:\n cd_plt_g = (np.array(cc_data_accum[cs])-vmin)/(vmax-vmin)\n cd_plt_g = cd_plt_g ** blend_tripcolor_1_gamma_g\n except:\n cd_plt_g = np.array([0])\n try:\n cd_plt_m = (np.array(cc_data_m_accum[cs])-vmin_m)/(vmax_m-vmin_m)\n cd_plt_m = cd_plt_m ** blend_tripcolor_1_gamma_m\n except:\n cd_plt_m = np.array([0])\n ######\n # blend 1 & 2\n # cd_plt = np.array([[cd_plt_g,0,cd_plt_m]])\n ######\n # blend 3\n # print('len: ',cd_plt_g.shape,cd_plt_m.shape)\n cd_plt = np.array([[cd_plt_g[0],0.5*(cd_plt_g+cd_plt_m)[0],cd_plt_m[0]]])\n cd_cmp = colors_to_cmap(cd_plt)\n zs = np.asarray(range(3),dtype=np.float)/2.0\n\n ax.tripcolor(triang\n ,zs\n ,cmap=cd_cmp\n # ,facecolors=cd_plt\n ,edgecolors='k',lw=0\n ,shading='gouraud'\n # ,shading='flat'\n # ,vmin=vmin,vmax=vmax\n # ,cmap=blend_tripcolor_1_cmap\n ,alpha=blend_tripcolor_1_alpha)\n # ,vmin=vmin,vmax=vmax,cmap=blend_tripcolor_1_cmap,alpha=blend_tripcolor_1_alpha)\n \n if goes_plot_1[iter]:\n if goes_plot_1_points[iter]:\n ax.scatter(glon,glat,s=8,c='black')\n\n if nodes_cover[iter] > 0:\n if nodes_cover[iter] == 1:\n cc_data_ = cover_cat.get_all_data('goes')\n else:\n cc_data_ = cover_cat.get_all_data('modis')\n sids_,dat_ = zip(*[cd.as_tuple() for cd in cc_data_])\n # print('sids_ len: ',len(sids_))\n sids_test = gd.spatial_clear_to_resolution(npi64([gd.spatial_coerce_resolution(s,gm_cat_resolution) for s in sids_]))\n # print('sids_tlen: ',len(sids_test))\n if cover_type == 'circular':\n print('cover: 0x%016x'%ps.from_latlon(npf64([cover_lat]),npf64([cover_lon]),cover_resolution)[0])\n geom_test = sid_geometry(sids_test)\n for s in geom_test.triangles.keys():\n print(iter,' 0x%016x'%s)\n triang_test = geom_test.triang()\n # ax.triplot(triang_test,'g-',transform=transf,lw=1.0,markersize=3,alpha=0.75)\n ax.triplot(triang_test,'k-',transform=transf,lw=1.0,markersize=3,alpha=0.5)\n \n if False:\n for i in range(0,10):\n k = cover_cat.sdict.peekitem(i)[0]\n triang = cover_cat.sdict[k].geometry.triang()\n ax.triplot(triang,'b-',transform=transf,lw=1,markersize=3,alpha=0.5)\n\n if cover_plot[iter]:\n # lli = ps.triangulate_indices(ps.expand_intervals(cover,9,result_size_limit=2048))\n lli = ps.triangulate_indices(cover)\n ax.triplot(tri.Triangulation(lli[0],lli[1],lli[2])\n ,'k-',transform=transf,lw=1,markersize=3,alpha=0.5)\n # ,'g-',transform=transf,lw=1,markersize=3)\n\n if False:\n # k = gm_catalog.sdict.keys()[0]\n # for k in gm_catalog.sdict.keys():\n for i in range(0,3):\n k = gm_catalog.sdict.peekitem(i)[0]\n triang = gm_catalog.sdict[k].geometry.triang()\n ax.triplot(triang,'r-',transform=transf,lw=1,markersize=3)\n\n if circle_plot[iter]:\n phi=np.linspace(0,2*np.pi,64)\n # rad=cover_rad\n rad=0.125\n ax.plot(cover_lon+rad*np.cos(phi),cover_lat+rad*np.sin(phi),transform=transf,color=circle_color[iter])\n\n # ax.set_facecolor('k')\n\n if plt_show_1[iter]:\n plt.show()\n \n###########################################################################\n#\n# if False:\n# sw_timer.stamp('triangulating')\n# print('triangulating')\n# client = Client()\n# for lli_ in slam(client,ps.triangulate_indices,sids):\n# sw_timer.stamp('slam iteration')\n# print('lli_ type: ',type(lli_))\n# lli = lli_.result()\n# sw_timer.stamp('slam result')\n# print('lli type: ',type(lli))\n# triang = tri.Triangulation(lli[0],lli[1],lli[2])\n# sw_timer.stamp('slam triang')\n# plt.triplot(triang,'r-',transform=transf,lw=1.5,markersize=3,alpha=0.5)\n# sw_timer.stamp('slam triplot')\n#\n# sw_timer.stamp('plt show')\n# # lons,lats,intmat=ps.triangulate_indices(sids)\n# # triang = tri.Triangulation(lons,lats,intmat)\n# # plt.triplot(triang,'r-',transform=transf,lw=1.5,markersize=3)\n#\n# plt.show()\n\n# client.close()\n\n print(sw_timer.report_all())\n\nif __name__ == \"__main__\":\n\n main()\n\n",
"\n# Look for intersection between MERRA and GOES files\n# Ignoring the file-name connection...\n\nimport geodata as gd\nfrom netCDF4 import Dataset\nimport numpy as np\nimport pystare as ps\n\nimport os, fnmatch\n\ndataPath = \"/home/mrilee/data/\"\n\nlistOfFiles = os.listdir(dataPath)\npatterns = [\"*.nc\",\"*.nc4\"]\nfor pattern in patterns:\n for entry in listOfFiles:\n if fnmatch.fnmatch(entry,pattern):\n print(entry)\n\nprint('')\npatterns = [\"MERRA*.nc4\",\"goes*.nc\"]\nfor pattern in patterns:\n for entry in listOfFiles:\n if fnmatch.fnmatch(entry,pattern):\n tid = gd.temporal_id_from_file(dataPath,entry)\n print(entry,hex(tid),gd.datetime_from_stare(tid))\n\n\n# HDF vs. ADM... Nice way to mock...\n\nm2_pattern=\"MERRA*.nc4\"\ngoes_pattern=\"goes*.nc\"\n\n\nm2_tid_index = {}\n\nif False:\n for entry in listOfFiles:\n if fnmatch.fnmatch(entry,m2_pattern):\n tid = gd.temporal_id_from_file(dataPath,entry)\n if tid not in m2_tid_index.keys():\n m2_tid_index[tid] = [entry]\n else:\n m2_tid_index[tid].append(entry)\n\nm2_files = []\nfor entry in listOfFiles:\n if fnmatch.fnmatch(entry,m2_pattern):\n m2_files.append(entry)\nm2_tid_index = gd.temporal_id_centered_filename_index(m2_files)\n\nprint('m2 tid keys:', m2_tid_index)\nprint('m2 tid keys:', list(m2_tid_index.keys()))\n\nif False:\n for entry in listOfFiles:\n if fnmatch.fnmatch(entry,goes_pattern):\n gtid = gd.temporal_id_from_file(dataPath,entry)\n # print(entry,gtid)\n # print('gtid: ',hex(gtid),gd.datetime_from_stare(gtid))\n gm2_match = ps.cmp_temporal(np.array([gtid],dtype=np.int64),list(m2_tid_index.keys()))\n match_fnames = []\n for i in range(gm2_match.size):\n if gm2_match[i] == 1:\n # fine_match = ps.cmp_temporal(np.array([gtid],dtype=np.int64),gd.merra2_stare_time(Dataset(dataPath+m2_tid_index[list(m2_tid_index.keys())[i]][0])))\n fine_match = gd.temporal_match_to_merra2_ds(gtid,Dataset(dataPath+m2_tid_index[list(m2_tid_index.keys())[i]][0]))\n # print('fine_match: ',fine_match)\n #print('m2tid: ',gd.datetime_from_stare(list(m2_tid_index.keys())[i]))\n if 1 in fine_match:\n match_fnames.append(m2_tid_index[list(m2_tid_index.keys())[i]][0])\n else:\n match_fnames.append(None)\n else:\n match_fnames.append(None)\n # print(entry, ' entry,matches: ',gm2_match,match_fnames)\n match_fnames_trimmed = []\n for i in match_fnames:\n if i is not None:\n match_fnames_trimmed.append(i)\n # print(entry,match_fnames_trimmed)\n if(len(match_fnames_trimmed) > 1):\n print('*** WARNING: more than one MERRA-2 file for the GOES file!!')\n matched_pair = (entry,match_fnames_trimmed[0])\n print('matched_pair: ',matched_pair)\n\nfor entry in listOfFiles:\n if fnmatch.fnmatch(entry,goes_pattern):\n gtid = gd.temporal_id_centered_from_filename(entry)\n print('matched pair: 0x%016x % 40s % 40s'%(gtid,entry,gd.temporal_match_to_merra2(gtid,m2_tid_index,dataPath=dataPath)[0]))\n",
"\n# Read h5 file and try CCL.\n\nimport geodata as gd\nimport h5py as h5\nfrom netCDF4 import Dataset\nimport numpy as np\nimport pystare as ps\n\nimport matplotlib as mpl\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as tri\nimport cartopy.crs as ccrs\n\nimport cv2\nfrom ccl_marker_stack import ccl_marker_stack\n\n# workFileName = \"work.h5\"\nworkFileName = \"sketchF.h5\"\n#workFile = h5.File(workPath+workFileName,'r')\nworkFile = h5.File(workFileName,'r')\n\ntpw_scale = workFile['/merra2_description']['tpw_scale']\ntpw_offset = workFile['/merra2_description']['tpw_offset']\nprint('tpw scale offset: ',tpw_scale,tpw_offset)\n\nb5_img = workFile['/image']['goes_b5']\n# b5_img = workFile['/image']['goes_b4']\n# b5_img = workFile['/image']['goes_b3']\nprint('b5 mnmx: ',np.amin(b5_img),np.amax(b5_img))\n\n# m2_img = workFile['/image']['merra2_tpw']\nm2_img = tpw_offset + tpw_scale*workFile['/image']['merra2_tpw']\nprint('m2 mnmx: ',np.amin(m2_img),np.amax(m2_img))\n\n# b5_lo,b5_threshold = (7500.0,15000.0)\n# b5_lo,b5_threshold = (7500.0,12500.0)\nb5_lo,b5_threshold = (0.0,8000.0) # b5\n# b5_lo,b5_threshold = (0.0,5000.0) # b3\n# b5_lo,b5_threshold = (1000.0,8000.0)\n\nb5_img_ge2_idx = np.where((b5_img <= b5_threshold) & (b5_img>b5_lo)) # This is where TPW is high and b5 is low.\n\nb5_img_lt2_idx = np.where((b5_img > b5_threshold) | (b5_img < b5_lo )) # Reverse.\n\nnx = workFile['/image_description']['nx']\nny = workFile['/image_description']['ny']\n\nb5_thresh=[b5_lo,b5_threshold]\n\n# Copy the following from ccl2d.\n\nif False:\n cv2.imshow('b5_img',np.array(255*b5_img.reshape([nx,ny])/np.amax(b5_img),dtype=np.uint8)); cv2.waitKey(0); cv2.destroyAllWindows()\n\nmx = np.nanmax(b5_img)\nif mx == 0:\n mx = 1.0\ndata = np.array(255.0*b5_img/mx,dtype=np.uint8).reshape([nx,ny])\nd_trigger = int(255.0*b5_threshold/mx)\nd_out = int(255)\n\n# Why does the limb show up in the thresh, but not the labels?\n\n# Eliminate the sky.\ndata[np.where(data < (255*3000/mx))] = 255\n\nprint('d type: ',type(data))\nprint('d trigger,out: ',d_trigger,d_out)\nprint('d mnmx: ',np.amin(data),np.amax(data))\n\nif False:\n cv2.imshow('data',data); cv2.waitKey(0); cv2.destroyAllWindows()\n\n# This works\nif True:\n # Pass in external threshold\n marker_stack = ccl_marker_stack() \n m0_new,m1_new,m0_eol,translation01\\\n = marker_stack.make_slice_from(\n data\n ,(d_trigger,d_out)\n ,graph=False\n ,thresh_inverse=True\n ,global_latlon_grid=False\n ,norm_data=False\n ,perform_threshold=True)\n markers=m1_new\n print('markers type,len ',type(markers),len(markers))\n # print('markers ',markers)\n\nthresh = None\n\n# The following two also work\nif False:\n ret,thresh = cv2.threshold(data,d_trigger,d_out,cv2.THRESH_BINARY_INV) # less than, for b5\n print('thresh ret: ',type(ret),ret)\n print('thresh type: ',type(thresh),thresh.shape,np.amin(thresh),np.amax(thresh))\n # Pass in data, ask for threshold\n marker_stack = ccl_marker_stack() \n m0_new,m1_new,m0_eol,translation01\\\n = marker_stack.make_slice_from(\n thresh\n ,(d_trigger,d_out)\n ,graph=False\n ,thresh_inverse=True\n ,global_latlon_grid=False\n ,norm_data=False\n ,perform_threshold=False)\n markers=m1_new\n print('markers type,len ',type(markers),len(markers))\n # print('markers ',markers)\n\nif False:\n cv2.imshow('thresh',thresh); cv2.waitKey(0); cv2.destroyAllWindows()\n\nif False:\n ret,markers = cv2.connectedComponents(thresh)\n\nmarkers_mx = np.amax(markers)\nprint('markers_mx: ',markers_mx)\n\nif markers_mx == 0:\n markers_mx = 1\n\ndata1=markers.astype(np.float)/markers_mx\nprint('markers',type(markers),type(data1))\n\nif False:\n cv2.imshow('markers',data1); cv2.waitKey(0); cv2.destroyAllWindows()\n\n# https://stackoverflow.com/questions/46441893/connected-component-labeling-in-python\n# For fun viz.\ndef imshow_components(labels):\n # Map component labels to hue val\n label_hue = np.uint8(179*labels/np.max(labels))\n blank_ch = 255*np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n\n # cvt to BGR for display\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n\n # set bg label to black\n labeled_img[label_hue==0] = 0\n\n # cv2.imshow('labeled.png', labeled_img); cv2.waitKey()\n return labeled_img\n\nnrows = 5\nfig,axs = plt.subplots(nrows=nrows)\n\nfor row in range(nrows):\n axs[row].get_xaxis().set_visible(False)\n axs[row].get_yaxis().set_visible(False)\n\naxs[0].imshow(b5_img.reshape(nx,ny))\naxs[1].imshow(data)\nif thresh is not None:\n axs[2].imshow(thresh)\naxs[3].imshow(markers)\naxs[4].imshow(imshow_components(markers))\nplt.show()\n\n\n\n\n",
"#!/usr/bin/env python\n\n###########################################################################\n# Read a vds made by sketchJ1.py and display.\n###########################################################################\n\nimport os, fnmatch\n\nimport matplotlib as mpl\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as tri\nimport cartopy.crs as ccrs\n\nimport numpy as np\n\nimport geodata as gd\nimport h5py as h5\nfrom pyhdf.SD import SD, SDC\n\nfrom stopwatch import sw_timer as timer\n\nimport pystare as ps\n\nfrom sortedcontainers import SortedDict\n\n###########################################################################\n#\ndef safe_shape(x):\n try:\n ret = x.shape\n except:\n ret = None\n pass\n return ret\n#\n###########################################################################\n#\n\ndef main():\n fname = 'MOD05_L2.A2005349.2120.061.2017294065852.wv_nir.sketchJ0.vds.h5'\n with h5.File(fname,'r') as h:\n shape = (h['metadata']['shape1'][0],h['metadata']['shape0'][0],)\n img = h['wv_nir']['Water_Vapor_Near_Infrared']\n print('img ',type(img),img.dtype,img.shape,shape)\n img = img.reshape(shape)\n plt.imshow(img,origin='lower',aspect=1)\n plt.show()\n return\n\nif __name__ == \"__main__\":\n main()\n\n\n## from matplotlib import pyplot as plt\n## plt.imshow(data, interpolation='nearest')\n## plt.show()\n"
] |
[
[
"matplotlib.pyplot.figtext",
"matplotlib.tri.Triangulation",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
],
[
"numpy.array"
],
[
"numpy.nanmax",
"numpy.amax",
"numpy.ones_like",
"numpy.amin",
"matplotlib.pyplot.subplots",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.where"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MoonmanBye/TorchInference_RRAM
|
[
"4c7948299f608658dcc42602c43f05b008e0fef2"
] |
[
"models/quant/quant_modules.py"
] |
[
"\"\"\"\nQuantization modules\n\"\"\"\n\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .utee import wage_quantizer\n\ndef dorefa_quant(x, nbit, dequantize=True):\n x = torch.tanh(x)\n scale = 2**nbit - 1\n \n x = x / 2 / x.abs().max() + 1/2\n xq = torch.round(x * scale)\n \n if dequantize:\n xq = xq.div(scale)\n xq = 2 * xq - 1\n return xq\n\nclass RoundQ(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, wbit):\n input_q = dorefa_quant(input, wbit)\n ctx.save_for_backward(input)\n return input_q\n \n @staticmethod\n def backward(ctx, grad_output):\n grad_input = grad_output.clone()\n return grad_input, None\n\nclass RoundUQ(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, scale): \n input_div = input.mul(scale)\n input_q = input_div.round().div(scale)\n return input_q\n\n @staticmethod\n def backward(ctx, grad_output):\n grad_input = grad_output.clone()\n return grad_input, None\n\nclass WQ(nn.Module):\n \"\"\"\n DoreFa quantizer\n \"\"\"\n def __init__(self, wbit):\n super(WQ, self).__init__()\n self.wbit = wbit\n \n def forward(self, x):\n weight_q = RoundQ.apply(x, self.wbit)\n return weight_q\n\nclass AQ(nn.Module):\n def __init__(self, abit, act_alpha):\n super(AQ, self).__init__()\n self.abit = abit\n self.register_parameter('act_alpha', nn.Parameter(torch.tensor(act_alpha)))\n\n def forward(self, input):\n input = torch.where(input < self.act_alpha, input, self.act_alpha)\n \n with torch.no_grad():\n scale = (2**self.abit - 1) / self.act_alpha \n\n input_q = RoundUQ.apply(input, scale)\n return input_q\n\nclass QConv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=False, wbit=8, abit=8, alpha_init=10.0):\n super(QConv2d, self).__init__(in_channels, out_channels, kernel_size,\n stride=stride, padding=padding, dilation=dilation, groups=groups,\n bias=bias)\n self.weight_quant = WQ(wbit=wbit)\n self.act_quant = AQ(abit, act_alpha=torch.tensor(alpha_init))\n\n self.wbit = wbit\n self.abit = abit\n\n def forward(self, input):\n if self.abit == 32:\n input_q = input\n else:\n input_q = self.act_quant(input)\n \n weight_q = self.weight_quant(self.weight)\n \n out = F.conv2d(input_q, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups)\n return out\n\n def extra_repr(self):\n return super(QConv2d, self).extra_repr() + \", wbit={}, abit={}\".format(self.wbit, self.abit)\n\n\nclass QLinear(nn.Linear):\n def __init__(self, in_channels, out_channels, bias=True, wbit=8, abit=8, alpha_init=10.0):\n super(QLinear, self).__init__(in_features=in_channels, out_features=out_channels, bias=bias)\n self.weight_quant = WQ(wbit=wbit)\n self.act_quant = AQ(abit, act_alpha=torch.tensor(alpha_init))\n\n self.wbit = wbit\n self.abit = abit\n\n def forward(self, input):\n weight_q = self.weight_quant(self.weight)\n input_q = self.act_quant(input)\n out = F.linear(input_q, weight_q, self.bias)\n return out\n\n def extra_repr(self):\n return super(QLinear, self).extra_repr() + \", wbit={}, abit={}\".format(self.wbit, self.abit)\n"
] |
[
[
"torch.round",
"torch.nn.functional.conv2d",
"torch.tensor",
"torch.tanh",
"torch.no_grad",
"torch.where",
"torch.nn.functional.linear"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guberti/tvm
|
[
"bd88ee28bb1844a15c507a516eb823c90e8fbd75"
] |
[
"tests/python/frontend/onnx/test_forward.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport re\n\nimport numpy as np\nimport pytest\nimport scipy\nimport torch\nimport torchvision\nimport tvm\nimport tvm.testing\nimport tvm.topi.testing\nfrom tvm import relay\nfrom tvm.contrib import graph_executor\n\nimport onnx\nfrom onnx import TensorProto, helper, mapping, numpy_helper\n\n\ndef get_input_data_shape_dict(graph_def, input_data):\n if isinstance(input_data, list):\n input_names = {}\n shape_dict = {}\n for i, _ in enumerate(input_data):\n input_names[i] = graph_def.graph.input[i].name\n shape_dict[input_names[i]] = input_data[i].shape\n else:\n input_names = graph_def.graph.input[0].name\n shape_dict = {input_names: input_data.shape}\n\n return input_names, shape_dict\n\n\ndef get_tvm_output_with_vm(\n graph_def, input_data, target, device, opset=None, freeze_params=False, convert_to_static=False\n):\n \"\"\"Generic function to execute and get tvm output with vm executor\"\"\"\n if not isinstance(input_data, list):\n input_data = [input_data]\n _, shape_dict = get_input_data_shape_dict(graph_def, input_data)\n\n mod, params = relay.frontend.from_onnx(\n graph_def, shape_dict, opset=opset, freeze_params=freeze_params\n )\n\n if convert_to_static:\n mod = relay.transform.DynamicToStatic()(mod)\n\n ex = relay.create_executor(\"vm\", mod=mod, device=device, target=target)\n result = ex.evaluate()(*input_data, **params)\n if isinstance(result, tvm.runtime.NDArray):\n return result.numpy()\n return [r.numpy() for r in result]\n\n\ndef get_tvm_output(\n graph_def,\n input_data,\n target,\n device,\n output_shape=None,\n output_dtype=\"float32\",\n opset=None,\n opt_level=1,\n):\n \"\"\"Generic function to execute and get tvm output\"\"\"\n # TODO: Resolve the issues and remove the following lines\n target = \"llvm\"\n device = tvm.cpu(0)\n\n input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)\n\n mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset)\n\n with tvm.transform.PassContext(opt_level=opt_level):\n graph, lib, params = relay.build(mod, target, params=params)\n\n m = graph_executor.create(graph, lib, device)\n # set inputs\n if isinstance(input_data, list):\n for i, e in enumerate(input_names):\n # Its possible for some onnx inputs to not be needed in the tvm\n # module, confirm its present before setting.\n try:\n m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype)))\n except:\n continue\n else:\n m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))\n\n m.set_input(**params)\n # execute\n m.run()\n # get outputs\n if isinstance(output_shape, list):\n tvm_output_list = []\n for i, _ in enumerate(output_shape):\n tvm_output = m.get_output(i)\n tvm_output_list.append(tvm_output.numpy())\n return tvm_output_list\n else:\n tvm_output = m.get_output(0)\n return tvm_output.numpy()\n\n\ndef get_onnxruntime_output(model, inputs):\n import onnxruntime.backend\n\n rep = onnxruntime.backend.prepare(model.SerializeToString(), \"CPU\")\n if isinstance(inputs, list) and len(inputs) == 1:\n inp = inputs[0]\n else:\n inp = inputs\n output = rep.run(inp)\n # Unpack output if there's only a single value.\n if len(output) == 1:\n output = output[0]\n return output\n\n\ndef verify_with_ort_with_inputs(\n model,\n inputs,\n out_shape=None,\n targets=None,\n use_vm=False,\n opset=None,\n freeze_params=False,\n convert_to_static=False,\n dtype=\"float32\",\n rtol=1e-5,\n atol=1e-5,\n apply_softmax=False,\n opt_level=1,\n):\n if opset is not None:\n model.opset_import[0].version = opset\n\n ort_out = get_onnxruntime_output(model, inputs)\n\n if targets is None:\n targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n\n for target in targets:\n dev = tvm.device(target, 0)\n if use_vm:\n tvm_out = get_tvm_output_with_vm(\n model,\n inputs,\n target,\n dev,\n opset=opset,\n freeze_params=freeze_params,\n convert_to_static=convert_to_static,\n )\n else:\n tvm_out = get_tvm_output(\n model, inputs, target, dev, out_shape, dtype, opset=opset, opt_level=opt_level\n )\n if not isinstance(tvm_out, list):\n tvm_out = [tvm_out]\n if not isinstance(ort_out, list):\n ort_out = [ort_out]\n for tvm_val, ort_val in zip(tvm_out, ort_out):\n if apply_softmax:\n ort_val = scipy.special.softmax(ort_val)\n tvm_val = scipy.special.softmax(tvm_val)\n tvm.testing.assert_allclose(ort_val, tvm_val, rtol=rtol, atol=atol)\n assert ort_val.dtype == tvm_val.dtype\n\n\ndef verify_with_ort(\n model,\n input_shapes,\n out_shape=None,\n targets=None,\n use_vm=False,\n opset=None,\n freeze_params=False,\n convert_to_static=False,\n dtype=\"float32\",\n rtol=1e-5,\n atol=1e-5,\n):\n inputs = [np.random.uniform(size=ishape).astype(dtype) for ishape in input_shapes]\n verify_with_ort_with_inputs(\n model,\n inputs,\n out_shape=out_shape,\n targets=targets,\n use_vm=use_vm,\n opset=opset,\n freeze_params=freeze_params,\n convert_to_static=convert_to_static,\n dtype=dtype,\n rtol=rtol,\n atol=atol,\n )\n\n\ndef make_constant_node(name, data_type, dims, vals):\n return helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[name],\n value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),\n )\n\n\ndef is_version_greater_than(ver):\n return \"\".join(re.findall(r\"(\\d+\\.)(\\d+\\.)(\\d)\", onnx.__version__)[0]) > \"\".join(\n re.findall(r\"(\\d+\\.)(\\d+\\.)(\\d)\", ver)[0]\n )\n\n\[email protected]_gpu\ndef test_reshape():\n in_shape = (4, 3, 3, 4)\n ref_shape = (6, 2, 4, 3)\n\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n reshape_node = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out\"])\n\n graph = helper.make_graph(\n [ref_node, reshape_node],\n \"reshape_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"reshape_test\")\n\n for target, dev in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"int32\")\n tvm_out = get_tvm_output(model, x, target, dev, ref_shape, \"float32\")\n tvm.testing.assert_allclose(ref_shape, tvm_out.shape)\n\n\[email protected]_gpu\ndef test_double_reshape():\n in_shape = (4, 3, 3, 4)\n ref_shape = (6, 2, 4, 3)\n\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n reshape_node1 = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out1\"])\n reshape_node2 = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out2\"])\n add_node = helper.make_node(\"Add\", [\"out1\", \"out2\"], [\"out\"])\n\n graph = helper.make_graph(\n [ref_node, reshape_node1, reshape_node2, add_node],\n \"reshape_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"reshape_test\")\n\n for target, dev in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"int32\")\n tvm_out = get_tvm_output(model, x, target, dev, ref_shape, \"float32\")\n tvm.testing.assert_allclose(ref_shape, tvm_out.shape)\n\n\[email protected]_gpu\ndef test_expand():\n def _test_expand(name, data, shape, ref_data, dtype=\"int32\"):\n shape_array = np.array(shape)\n if dtype == \"int32\":\n shape_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"shape\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=shape_array.shape,\n vals=shape_array.flatten().astype(\"int32\"),\n ),\n )\n elif dtype == \"int64\":\n shape_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"shape\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT64,\n dims=shape_array.shape,\n vals=shape_array.flatten().astype(\"int64\"),\n ),\n )\n else:\n raise \"Invalid dtype\"\n expand_node = helper.make_node(\"Expand\", [\"in\", \"shape\"], [\"out\"])\n\n graph = helper.make_graph(\n [shape_node, expand_node],\n \"expand_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(data.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_data.shape))],\n )\n\n model = helper.make_model(graph, producer_name=name)\n\n for target, dev in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output_with_vm(model, data, target, dev, freeze_params=True)\n tvm.testing.assert_allclose(ref_data, tvm_out)\n\n in_shape = (3, 1)\n shape = (3, 4)\n data = np.random.uniform(size=in_shape).astype(np.float32)\n ref_data = np.tile(data, 4)\n _test_expand(\"expand_with_dim_unchanged_test\", data, shape, ref_data, \"int32\")\n _test_expand(\"expand_with_dim_unchanged_test\", data, shape, ref_data, \"int64\")\n\n in_shape = (3, 1)\n shape = (2, 1, 6)\n data = np.random.uniform(size=in_shape).astype(np.float32)\n ref_data = data * np.ones(shape, dtype=np.float32)\n _test_expand(\"expand_with_dim_changed_test\", data, shape, ref_data, \"int32\")\n _test_expand(\"expand_with_dim_changed_test\", data, shape, ref_data, \"int64\")\n\n\ndef verify_depth_to_space(inshape, outshape, mode, blockSize):\n node = onnx.helper.make_node(\"DepthToSpace\", inputs=[\"x\"], outputs=[\"y\"], blocksize=blockSize)\n\n graph = helper.make_graph(\n [node],\n \"depth_to_space_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=\"depth_to_space_test\")\n\n verify_with_ort(model, [inshape], [outshape])\n\n\[email protected]_gpu\ndef test_depth_to_space():\n # current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.\n # TO-DO, we can add mode arguement to test CRD mode and DCR mode\n # in the future when we update to a newer onnx version.\n verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode=\"CRD\", blockSize=2)\n\n\ndef verify_space_to_depth(inshape, outshape, blockSize):\n node = onnx.helper.make_node(\"SpaceToDepth\", inputs=[\"x\"], outputs=[\"y\"], blocksize=blockSize)\n\n graph = helper.make_graph(\n [node],\n \"space_to_depth_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=\"space_to_depth_test\")\n\n verify_with_ort(model, [inshape], [outshape])\n\n\[email protected]_gpu\ndef test_space_to_depth():\n verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)\n\n\[email protected]_gpu\ndef test_shape():\n in_shape = (4, 3, 3, 4)\n ref_shape = (6, 2, 4, 3)\n\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=onnx.TensorProto.INT32,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n reshape_node = helper.make_node(\"Reshape\", [\"in\", \"ref_in\"], [\"out\"])\n\n shape_node = helper.make_node(\"Shape\", [\"out\"], [\"final_out\"])\n\n graph = helper.make_graph(\n [ref_node, reshape_node, shape_node],\n \"shape_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"final_out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"shape_test\")\n\n for target, dev in tvm.testing.enabled_targets():\n x = np.random.uniform(size=in_shape).astype(\"int32\")\n tvm_out = get_tvm_output(model, x, target, dev, ref_shape, \"int32\")\n tvm.testing.assert_allclose(ref_shape, tvm_out)\n\n\ndef _test_power_iteration(x_shape, y_shape):\n if isinstance(y_shape, int):\n y_shape = [y_shape]\n\n x = np.random.uniform(size=x_shape).astype(np.float32)\n y = np.random.uniform(size=y_shape).astype(np.float32)\n\n np_res = np.power(x, y).astype(np.float32)\n\n res = helper.make_node(\"Pow\", [\"x\", \"y\"], [\"out\"])\n\n graph = helper.make_graph(\n [res],\n \"power_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(np_res.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"power_test\")\n\n for target, dev in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, [x, y], target, dev, np_res.shape)\n tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_power():\n _test_power_iteration((1, 3), (1))\n _test_power_iteration((2, 3), (2, 3))\n _test_power_iteration((2, 3), (1, 3))\n\n\ndef verify_range(start, limit, delta, dtype):\n dtype_map = {\n \"float32\": TensorProto.FLOAT,\n \"int32\": TensorProto.INT32,\n \"int64\": TensorProto.INT64,\n }\n dtype_onnx = dtype_map[dtype]\n y = helper.make_node(\"Range\", [\"start\", \"limit\", \"delta\"], [\"output\"])\n graph = helper.make_graph(\n [y],\n \"range_test\",\n inputs=[\n helper.make_tensor_value_info(\"start\", dtype_onnx, []),\n helper.make_tensor_value_info(\"limit\", dtype_onnx, []),\n helper.make_tensor_value_info(\"delta\", dtype_onnx, []),\n ],\n outputs=[\n helper.make_tensor_value_info(\n \"output\", dtype_onnx, np.arange(start, limit, delta).shape\n )\n ],\n )\n model = helper.make_model(graph, producer_name=\"range_test\")\n inputs = [np.array(x).astype(dtype) for x in [start, limit, delta]]\n verify_with_ort_with_inputs(model, inputs, use_vm=True)\n\n\[email protected]_gpu\ndef test_range():\n for t in [\"float32\", \"int32\", \"int64\"]:\n verify_range(0, 10, 1, t)\n verify_range(2, 8, 2, t)\n verify_range(-3, 6, 4, t)\n verify_range(-2, -7, -1, t)\n\n\[email protected]_gpu\ndef test_squeeze():\n in_shape = (1, 3, 1, 3, 1, 1)\n out_shape = (3, 3)\n y = helper.make_node(\"Squeeze\", [\"in\"], [\"out\"], axes=[0, 2, 4, 5])\n\n graph = helper.make_graph(\n [y],\n \"squeeze_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"squeeze_test\")\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n verify_with_ort_with_inputs(model, [x], [out_shape], opset=11)\n\n\[email protected]_gpu\ndef test_flatten():\n\n in_shape = (1, 3, 4, 4)\n axis = 1\n ref_shape = (1, 48)\n\n flatten_node = helper.make_node(\"Flatten\", [\"in\"], [\"out\"], axis=axis)\n\n graph = helper.make_graph(\n [flatten_node],\n \"flatten_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(ref_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"flatten_test\")\n verify_with_ort(model, [in_shape])\n\n\[email protected]_gpu\ndef test_unsqueeze():\n in_shape = (3, 3)\n axis = (0, 3, 4)\n out_shape = (1, 3, 3, 1, 1)\n y = helper.make_node(\"Unsqueeze\", [\"in\"], [\"out\"], axes=list(axis))\n\n graph = helper.make_graph(\n [y],\n \"squeeze_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"squeeze_test\")\n verify_with_ort(model, [in_shape], opset=11)\n\n\ndef verify_gather(in_shape, indices, axis, dtype):\n x = np.random.uniform(size=in_shape).astype(dtype)\n indices = np.array(indices, dtype=\"int64\")\n out_np = np.take(x, indices, axis=axis)\n\n y = helper.make_node(\"Gather\", [\"in\", \"indices\"], [\"out\"], axis=axis)\n\n graph = helper.make_graph(\n [y],\n \"gather_test\",\n inputs=[\n helper.make_tensor_value_info(\n \"in\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)\n ),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT64, list(indices.shape)),\n ],\n outputs=[\n helper.make_tensor_value_info(\n \"out\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_np.shape)\n )\n ],\n )\n model = helper.make_model(graph, producer_name=\"gather_test\")\n verify_with_ort_with_inputs(model, [x, indices], dtype=dtype)\n\n\[email protected]_gpu\ndef test_gather():\n verify_gather((4,), [1], 0, \"int32\")\n verify_gather((1, 4), [0], 0, \"int32\")\n verify_gather((4,), [[[1, 0], [0, 1]]], 0, \"float32\")\n verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, \"int32\")\n verify_gather((3, 3, 3), [[[1, 0]]], -1, \"int32\")\n verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, \"float32\")\n\n\[email protected]_gpu\ndef test_dynamic_gather():\n dtype = \"float32\"\n in_shape = [2, 2]\n indices = 1\n axis = 1\n x = np.random.uniform(size=in_shape).astype(dtype)\n indices = np.array(indices, dtype=\"int64\")\n out_np = np.take(x, indices, axis=axis)\n\n indices = helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"indices\"],\n value=onnx.helper.make_tensor(\n name=\"const_indices\",\n data_type=onnx.TensorProto.INT64,\n dims=[],\n vals=[1],\n ),\n )\n y = helper.make_node(\"Gather\", [\"in\", \"indices\"], [\"out\"], axis=axis)\n\n graph = helper.make_graph(\n [indices, y],\n \"gather_test\",\n inputs=[\n helper.make_tensor_value_info(\n \"in\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], [\"?\", \"?\"]\n ),\n ],\n outputs=[\n helper.make_tensor_value_info(\n \"out\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], [\"?\"] * len(out_np.shape)\n )\n ],\n )\n model = helper.make_model(graph, producer_name=\"dynamic_gather_test\")\n\n mod, params = relay.frontend.from_onnx(model)\n\n for target, device in tvm.testing.enabled_targets():\n ex = relay.create_executor(\"vm\", mod=mod, device=device, target=target)\n result = ex.evaluate()(x, **params)\n tvm.testing.assert_allclose(out_np, result.numpy(), rtol=1e-5, atol=1e-5)\n\n\ndef verify_gatherelements(in_shape, indices, axis):\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n indices = np.array(indices, dtype=\"int32\")\n\n y = helper.make_node(\"GatherElements\", [\"data\", \"indices\"], [\"output\"], axis=axis)\n graph = helper.make_graph(\n [y],\n \"gather_elements_test\",\n inputs=[\n helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n )\n model = helper.make_model(graph, producer_name=\"gather_elements_test\")\n\n verify_with_ort_with_inputs(model, [x, indices])\n\n\[email protected]_gpu\ndef test_gatherelements():\n verify_gatherelements((4,), [3, 0, 2, 1], 0)\n verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)\n verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)\n verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)\n\n indices = [\n [[1, 0, 0], [1, 0, 1], [0, 1, 1]],\n [[1, 1, 1], [1, 2, 1], [1, 0, 1]],\n [[1, 2, 1], [1, 2, 1], [1, 2, 1]],\n ]\n\n verify_gatherelements((3, 3, 3), indices, 2)\n\n\ndef verify_scatter(in_shape, indices, axis):\n x = np.random.uniform(size=in_shape).astype(\"float32\")\n indices = np.array(indices, dtype=\"int32\")\n updates = np.random.uniform(size=indices.shape).astype(\"float32\")\n\n y = helper.make_node(\"ScatterElements\", [\"data\", \"indices\", \"updates\"], [\"output\"], axis=axis)\n\n graph = helper.make_graph(\n [y],\n \"scatter_test\",\n inputs=[\n helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, list(indices.shape)),\n helper.make_tensor_value_info(\"updates\", TensorProto.FLOAT, list(indices.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(in_shape))],\n )\n model = helper.make_model(graph, producer_name=\"scatter_test\")\n verify_with_ort_with_inputs(model, [x, indices, updates])\n\n\[email protected]_gpu\ndef test_scatter():\n verify_scatter((4,), [1], 0)\n verify_scatter((1, 4), [[0]], 0)\n verify_scatter((4,), [2, 3], 0)\n verify_scatter((2, 2), [[1, 0], [0, 1]], 1)\n verify_scatter((3, 3, 3), [[[-1, -3]]], -1)\n verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)\n\n\ndef _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):\n if axes:\n y = helper.make_node(\"Slice\", [\"in\"], [\"out\"], axes=axes, starts=starts, ends=ends)\n else:\n y = helper.make_node(\"Slice\", [\"in\"], [\"out\"], starts=starts, ends=ends)\n\n graph = helper.make_graph(\n [y],\n \"slice_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"slice_test\")\n verify_with_ort_with_inputs(model, [indata], [outdata.shape], opset=1)\n\n\ndef _test_slice_iteration_v10(indata, outdata, **attrs):\n starts = attrs[\"starts\"]\n ends = attrs[\"ends\"]\n axes = None if \"axes\" not in attrs else attrs[\"axes\"]\n steps = None if \"steps\" not in attrs else attrs[\"steps\"]\n starts = np.asarray(starts)\n ends = np.asarray(ends)\n inputs = [\n helper.make_tensor_value_info(\"data\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"starts\", TensorProto.INT64, list(starts.shape)),\n helper.make_tensor_value_info(\"ends\", TensorProto.INT64, list(ends.shape)),\n ]\n initializer = [\n helper.make_tensor(\"starts\", TensorProto.INT64, list(starts.shape), starts),\n helper.make_tensor(\"ends\", TensorProto.INT64, list(ends.shape), ends),\n ]\n nodes = []\n\n if \"add_noop_to_input_attrs\" in attrs:\n\n def add_noop_to_input_attr(attr_name, attr):\n output_name = attr_name + \"_output\"\n\n ref_shape = list(np.array(attr).shape)\n ref_shape.insert(0, 1)\n ref_shape = tuple(ref_shape)\n ref_array = np.array(ref_shape)\n ref_node = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"ref_in_\" + attr_name],\n value=onnx.helper.make_tensor(\n name=\"const_tensor__1_\" + attr_name,\n data_type=onnx.TensorProto.INT64,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(int),\n ),\n )\n in_shape = np.array(attr).shape\n in_array = np.array(in_shape)\n ref_node2 = onnx.helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"input_shape_\" + attr_name],\n value=onnx.helper.make_tensor(\n name=\"const_tensor__2_\" + attr_name,\n data_type=onnx.TensorProto.INT64,\n dims=in_array.shape,\n vals=in_array.flatten().astype(int),\n ),\n )\n\n reshape1_node = helper.make_node(\n \"Reshape\", [attr_name, \"ref_in_\" + attr_name], [\"reshape_\" + attr_name]\n )\n reshape2_node = helper.make_node(\n \"Reshape\", [\"reshape_\" + attr_name, \"input_shape_\" + attr_name], [output_name]\n )\n return [ref_node, ref_node2, reshape1_node, reshape2_node]\n\n slice_inputs = []\n for attr_name in [\"starts\", \"ends\", \"axes\", \"steps\"]:\n if attr_name not in attrs:\n continue\n if \"add_noop_to_input_attrs\" in attrs and attr_name in attrs[\"add_noop_to_input_attrs\"]:\n nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))\n slice_inputs.append(attr_name + \"_output\")\n else:\n slice_inputs.append(attr_name)\n\n if axes:\n axes = np.asarray(axes)\n inputs.append(helper.make_tensor_value_info(\"axes\", TensorProto.INT64, list(axes.shape)))\n initializer.append(helper.make_tensor(\"axes\", TensorProto.INT64, list(axes.shape), axes))\n\n if steps:\n assert axes is not None and len(axes) == len(steps)\n steps = np.asarray(steps)\n inputs.append(helper.make_tensor_value_info(\"steps\", TensorProto.INT64, list(axes.shape)))\n initializer.append(helper.make_tensor(\"steps\", TensorProto.INT64, list(steps.shape), steps))\n\n y = helper.make_node(\"Slice\", [\"data\", *slice_inputs], [\"out\"])\n\n nodes.append(y)\n graph = helper.make_graph(\n nodes,\n \"slice_test\",\n inputs=inputs,\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n initializer=initializer,\n )\n model = helper.make_model(graph, producer_name=\"slice_test\")\n verify_with_ort_with_inputs(model, [indata], opset=10, freeze_params=True, use_vm=True)\n\n\[email protected]_gpu\ndef test_slice():\n x = np.random.randn(20, 10, 5).astype(np.float32)\n _test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))\n _test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))\n _test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))\n _test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))\n _test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))\n _test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))\n _test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))\n _test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))\n _test_slice_iteration_v10(\n x,\n x[0:3, 0:10],\n starts=(0, 0),\n ends=(3, 10),\n axes=(0, 1),\n add_noop_to_input_attrs=[\"starts\"],\n )\n _test_slice_iteration_v10(\n x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=[\"ends\"]\n )\n _test_slice_iteration_v10(\n x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=[\"axes\"]\n )\n _test_slice_iteration_v10(\n x,\n x[:, 0:-1],\n starts=(0,),\n ends=(-1,),\n axes=(1,),\n add_noop_to_input_attrs=[\"starts\", \"ends\"],\n )\n _test_slice_iteration_v10(\n x,\n x[0:3, 0:10],\n starts=(0, 0),\n ends=(3, 10),\n axes=(0, 1),\n add_noop_to_input_attrs=[\"ends\", \"axes\"],\n )\n _test_slice_iteration_v10(\n x,\n x[:, :, 3:4],\n starts=(0, 0, 3),\n ends=(20, 10, 4),\n add_noop_to_input_attrs=[\"starts\", \"axes\"],\n )\n _test_slice_iteration_v10(\n x,\n x[:, 1:1000],\n starts=(1,),\n ends=(1000,),\n axes=(1,),\n add_noop_to_input_attrs=[\"starts\", \"ends\", \"axes\"],\n )\n x = np.random.randn(1, 1, 1, 128).astype(np.float32)\n _test_slice_iteration_v10(\n x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)\n )\n\n x = np.random.randn(4, 4).astype(np.float32)\n _test_slice_iteration_v10(\n x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)\n )\n _test_slice_iteration_v10(\n x,\n x[0::1, 1::2],\n starts=(0, 1),\n ends=(4, 4),\n axes=(0, 1),\n steps=(1, 2),\n )\n\n\ndef _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs, opset=None):\n indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)\n outdata = outfunc(indata, **npargs)\n\n y = helper.make_node(opname, [\"in\"], [\"out\"], **kwargs)\n\n graph = helper.make_graph(\n [y],\n opname + \"_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=opname + \"_test\")\n verify_with_ort_with_inputs(model, [indata], [outdata.shape], opset=opset, dtype=dtype)\n\n\[email protected]_gpu\ndef test_floor():\n _test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, \"float32\", \"Floor\", {})\n\n\[email protected]_gpu\ndef test_ceil():\n _test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, \"float32\", \"Ceil\", {})\n\n\[email protected]_gpu\ndef test_clip():\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n np.clip,\n {\"a_min\": -1.0, \"a_max\": 1.0},\n \"float32\",\n \"Clip\",\n {\"min\": -1.0, \"max\": 1.0},\n opset=6,\n )\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n np.clip,\n {\"a_min\": -np.inf, \"a_max\": 1.0},\n \"float32\",\n \"Clip\",\n {\"max\": 1.0},\n opset=6,\n )\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n np.clip,\n {\"a_min\": -1.0, \"a_max\": np.inf},\n \"float32\",\n \"Clip\",\n {\"min\": -1.0},\n opset=6,\n )\n\n\[email protected]_gpu\ndef test_clip_min_max_as_inputs():\n input_shape = (2, 4, 5, 6)\n nodes = [\n make_constant_node(\"min\", onnx.TensorProto.FLOAT, (), [0.0]),\n make_constant_node(\"max\", onnx.TensorProto.FLOAT, (), [6.0]),\n ]\n input_names = [\"in\", \"min\", \"max\"]\n nodes.append(helper.make_node(\"Clip\", inputs=input_names, outputs=[\"out\"]))\n graph = helper.make_graph(\n nodes,\n \"clip_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(input_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(input_shape))],\n )\n model = helper.make_model(graph, producer_name=\"clip_test\")\n\n verify_with_ort(model, [input_shape], out_shape=[input_shape])\n\n\[email protected]_gpu\ndef test_round():\n _test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, \"float32\", \"Round\", {})\n\n\ndef _test_finite_ops(inshape, outfunc, npargs, dtype, opname, kwargs):\n indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)\n\n outdata = outfunc(indata, **npargs)\n y = helper.make_node(opname, [\"in\"], [\"out\"], **kwargs)\n\n graph = helper.make_graph(\n [y],\n opname + \"_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=opname + \"_test\")\n verify_with_ort_with_inputs(model, [indata], [outdata.shape], dtype=dtype)\n\n\[email protected]_gpu\ndef test_isinf():\n _test_finite_ops((2, 4, 5, 6), np.isinf, {}, \"float32\", \"IsInf\", {})\n\n\[email protected]_gpu\ndef test_isnan():\n _test_finite_ops((2, 4, 5, 6), np.isnan, {}, \"float32\", \"IsNaN\", {})\n\n\ndef verify_gather_nd(in_shape, indices, out_shape, dtype=\"float32\", batch_dims=0, opset=11):\n x = np.random.uniform(size=in_shape).astype(dtype)\n indices = np.array(indices, dtype=\"int64\")\n\n y = helper.make_node(\"GatherND\", [\"in\", \"indices\"], [\"out\"])\n\n if opset >= 12:\n batch_dims_attr = helper.make_attribute(\"batch_dims\", batch_dims)\n y.attribute.append(batch_dims_attr)\n\n graph = helper.make_graph(\n [y],\n \"gather_test\",\n inputs=[\n helper.make_tensor_value_info(\n \"in\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)\n ),\n helper.make_tensor_value_info(\"indices\", TensorProto.INT64, list(indices.shape)),\n ],\n outputs=[\n helper.make_tensor_value_info(\n \"out\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_shape)\n )\n ],\n )\n model = helper.make_model(graph, producer_name=\"gather_test\")\n verify_with_ort_with_inputs(model, [x, indices], [out_shape], opset=opset)\n\n\[email protected]_gpu\ndef test_gather_nd():\n verify_gather_nd([2, 2], [[0, 0], [1, 1]], [2], \"int32\")\n verify_gather_nd([2, 2], [[1], [0]], [2, 2])\n verify_gather_nd([2, 2, 2], [[0, 1], [1, 0]], [2, 2])\n verify_gather_nd([2, 2, 2], [[[0, 1]], [[1, 0]]], [2, 1, 2])\n\n if is_version_greater_than(\"1.6.0\"):\n verify_gather_nd([2, 2, 2], [[1], [0]], [2, 2], batch_dims=1, opset=12)\n verify_gather_nd(\n (3, 2, 2, 3, 4),\n np.random.randint(low=0, high=2, size=(3, 2, 3), dtype=\"int64\"),\n (3, 2),\n batch_dims=2,\n opset=12,\n )\n\n\[email protected]_gpu\ndef test_onehot():\n indices_shape = [10]\n indices_array = np.random.randint(low=0, high=9, size=indices_shape, dtype=\"int32\")\n depth = 10\n values = np.asarray([0, 1]).astype(\"int32\")\n out_np = np.eye(depth)[indices_array.reshape(-1)]\n\n onehot_node = helper.make_node(\"OneHot\", [\"indices\", \"depth\", \"values\"], [\"out\"])\n\n graph = helper.make_graph(\n [onehot_node],\n \"onehot_test\",\n inputs=[\n helper.make_tensor_value_info(\"indices\", TensorProto.INT32, indices_shape),\n helper.make_tensor_value_info(\"depth\", TensorProto.INT32, [1]),\n helper.make_tensor_value_info(\"values\", TensorProto.INT32, values.shape),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.INT32, out_np.shape)],\n )\n\n model = helper.make_model(graph, producer_name=\"onehot_test\")\n\n # TODO(jwfromm): Replace test against np with test against onnxrt once we update versions.\n for target, dev in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output_with_vm(\n model, [indices_array, np.array([depth]).astype(\"int32\"), values], target, dev\n )\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n\ndef verify_gemm(a_shape, b_shape, c_shape=None, freeze_params=False, dtype=\"float32\"):\n out_shape = [a_shape[0], b_shape[1]]\n a_array = np.random.uniform(size=a_shape).astype(dtype)\n b_array = np.random.uniform(size=b_shape).astype(dtype)\n input_names = [\"a\", \"b\"]\n ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]\n input_nodes = [\n helper.make_tensor_value_info(\"a\", ONNX_DTYPE, list(a_shape)),\n helper.make_tensor_value_info(\"b\", ONNX_DTYPE, list(b_shape)),\n ]\n input_values = [a_array, b_array]\n if c_shape is not None:\n c_array = np.random.uniform(size=c_shape).astype(dtype)\n input_names.append(\"c\")\n input_nodes.append(helper.make_tensor_value_info(\"c\", ONNX_DTYPE, list(c_shape)))\n input_values.append(c_array)\n\n gemm_node = helper.make_node(\"Gemm\", input_names, [\"out\"])\n\n graph = helper.make_graph(\n [gemm_node],\n \"gemm_test\",\n inputs=input_nodes,\n outputs=[helper.make_tensor_value_info(\"out\", ONNX_DTYPE, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"gemm_test\")\n verify_with_ort_with_inputs(model, input_values, freeze_params=freeze_params, dtype=dtype)\n\n\[email protected]_gpu\ndef test_gemm():\n verify_gemm(a_shape=(4, 3), b_shape=(3, 4))\n verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,))\n verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,), freeze_params=True)\n verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,), freeze_params=True, dtype=\"float16\")\n\n\[email protected]_gpu\ndef test_matmul():\n a_shape = (4, 3)\n b_shape = (3, 4)\n out_shape = [a_shape[0], b_shape[1]]\n\n a_array = np.random.uniform(size=a_shape).astype(\"float32\")\n b_array = np.random.uniform(size=b_shape).astype(\"float32\")\n\n mul_node = helper.make_node(\"MatMul\", [\"a\", \"b\"], [\"out\"])\n\n graph = helper.make_graph(\n [mul_node],\n \"matmul_test\",\n inputs=[\n helper.make_tensor_value_info(\"a\", TensorProto.FLOAT, list(a_shape)),\n helper.make_tensor_value_info(\"b\", TensorProto.FLOAT, list(b_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"matmul_test\")\n verify_with_ort_with_inputs(model, [a_array, b_array])\n\n\ndef verify_batch_matmul(a_shape, b_shape, out_shape, target, dev):\n a_array = np.random.uniform(size=a_shape).astype(\"float32\")\n b_array = np.random.uniform(size=b_shape).astype(\"float32\")\n\n mul_node = helper.make_node(\"MatMul\", [\"a\", \"b\"], [\"out\"])\n\n graph = helper.make_graph(\n [mul_node],\n \"matmul_test\",\n inputs=[\n helper.make_tensor_value_info(\"a\", TensorProto.FLOAT, list(a_shape)),\n helper.make_tensor_value_info(\"b\", TensorProto.FLOAT, list(b_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, out_shape)],\n )\n\n model = helper.make_model(graph, producer_name=\"matmul_test\")\n verify_with_ort_with_inputs(model, [a_array, b_array], use_vm=True, targets=[target])\n\n\[email protected]_gpu\ndef test_batch_matmul(target, dev):\n verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4), (2, 3, 4, 4), target, dev)\n verify_batch_matmul((2, 4, 3), (3, 4), (2, 4, 4), target, dev)\n verify_batch_matmul((2, 3, 4, 3), (3, 4), (2, 3, 4, 4), target, dev)\n # Test implicit broadcasting.\n verify_batch_matmul((4, 3), (2, 3, 4), (2, 4, 4), target, dev)\n verify_batch_matmul((2, 4, 3), (1, 3, 4), (2, 4, 4), target, dev)\n verify_batch_matmul((1, 4, 3), (2, 3, 4), (2, 4, 4), target, dev)\n verify_batch_matmul((4, 32, 16), (16, 32), (4, 32, 32), target, dev)\n verify_batch_matmul((4, 32, 16, 32), (32, 16), (4, 32, 16, 16), target, dev)\n\n\ndef verify_simple_dynamic_model(a_shape, b_shape, target, dev):\n def verify_model(ex, a_shape, b_shape):\n a_array = np.random.uniform(size=a_shape).astype(\"float32\")\n b_array = np.random.uniform(size=b_shape).astype(\"float32\")\n # matmul\n out_np = np.matmul(a_array, b_array)\n # relu\n out_np[out_np < 0] = 0\n\n tvm_out = ex.evaluate()(a_array, b_array).numpy()\n tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)\n\n mul_node = helper.make_node(\"MatMul\", [\"a\", \"b\"], [\"out\"])\n relu_node = helper.make_node(\"Relu\", [\"out\"], [\"relu\"])\n\n a_array = np.random.uniform(size=a_shape).astype(\"float32\")\n b_array = np.random.uniform(size=b_shape).astype(\"float32\")\n # matmul\n out_np = np.matmul(a_array, b_array)\n\n graph = helper.make_graph(\n [mul_node, relu_node],\n \"matmul_test\",\n inputs=[\n helper.make_tensor_value_info(\"a\", TensorProto.FLOAT, list(a_shape)),\n helper.make_tensor_value_info(\"b\", TensorProto.FLOAT, list(b_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"relu\", TensorProto.FLOAT, list(out_np.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"matmul_test\")\n\n a_anys = [relay.Any()] * len(a_shape)\n b_anys = [relay.Any()] * len(b_shape)\n\n mod, params = relay.frontend.from_onnx(model, {\"a\": a_anys, \"b\": b_anys})\n ex = relay.create_executor(\"vm\", mod=mod, device=dev, target=target)\n verify_model(ex, a_shape, b_shape)\n verify_model(ex, [a * 2 for a in a_shape], [b * 2 for b in b_shape])\n verify_model(ex, [a * 3 for a in a_shape], [b * 3 for b in b_shape])\n\n\n# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed\[email protected]_targets(\"llvm\")\ndef test_batch_matmul_dynamic_model(target, dev):\n verify_simple_dynamic_model((2, 3, 4, 3), (2, 3, 3, 4), target, dev)\n verify_simple_dynamic_model((2, 4, 3), (3, 4), target, dev)\n verify_simple_dynamic_model((2, 3, 4, 3), (3, 4), target, dev)\n\n\ndef verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):\n in_array = np.random.uniform(size=shape).astype(dtype)\n\n if alpha == None and beta == None and bias == None:\n alpha = 0.0001\n beta = 0.75\n bias = 1.0\n node = onnx.helper.make_node(\"LRN\", inputs=[\"in\"], outputs=[\"out\"], size=nsize)\n else:\n node = onnx.helper.make_node(\n \"LRN\", inputs=[\"in\"], outputs=[\"out\"], alpha=alpha, beta=beta, bias=bias, size=nsize\n )\n\n graph = helper.make_graph(\n [node],\n \"lrn_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(shape))],\n )\n model = helper.make_model(graph, producer_name=\"lrn_test\")\n verify_with_ort_with_inputs(model, [in_array])\n\n\[email protected]_gpu\ndef test_lrn():\n verify_lrn((5, 5, 5, 5), 3, \"float32\")\n verify_lrn((5, 5, 5, 5), 3, \"float32\", alpha=0.0002, beta=0.5, bias=2.0)\n\n\ndef verify_instance_norm(shape, axis=1):\n x = np.random.randn(*shape).astype(np.float32)\n gamma = np.random.randn(shape[1]).astype(np.float32)\n beta = np.random.randn(shape[1]).astype(np.float32)\n epsilon = 1e-5\n\n node = onnx.helper.make_node(\n \"InstanceNormalization\",\n inputs=[\"x\", \"gamma\", \"beta\"],\n outputs=[\"y\"],\n epsilon=epsilon,\n )\n graph = helper.make_graph(\n [node],\n \"instance_norm_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(shape)),\n helper.make_tensor_value_info(\"gamma\", TensorProto.FLOAT, (shape[1],)),\n helper.make_tensor_value_info(\"beta\", TensorProto.FLOAT, (shape[1],)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(shape))],\n )\n model = helper.make_model(graph, producer_name=\"instance_norm_test\")\n verify_with_ort_with_inputs(model, [x, gamma, beta], out_shape=[shape])\n\n\[email protected]_gpu\ndef test_instance_norm():\n verify_instance_norm((2, 3, 4, 5))\n verify_instance_norm((32, 64, 80, 64))\n verify_instance_norm((8, 6, 5))\n verify_instance_norm((8, 7, 6, 5, 4))\n\n\ndef verify_upsample_nearest():\n scale = 2\n in_shape = (1, 1, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\"], [\"out\"], mode=\"nearest\", scales=[1.0, 1.0, 2.0, 2.0])\n\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n\n graph = helper.make_graph(\n [y],\n \"upsample_nearest_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_nearest_test\")\n verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)\n\n\ndef verify_upsample3d_nearest():\n scale = 2\n in_shape = (1, 1, 3, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)\n y = helper.make_node(\n \"Upsample\", [\"in\"], [\"out\"], mode=\"nearest\", scales=[1.0, 1.0, 2.0, 2.0, 2.0]\n )\n\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n\n graph = helper.make_graph(\n [y],\n \"upsample_nearest_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_nearest_test\")\n # Upsample is deprecated after opset 9\n verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)\n\n\ndef verify_upsample_bilinear():\n scale = 2\n in_shape = (1, 1, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\"], [\"out\"], mode=\"linear\", scales=[1.0, 1.0, 2.0, 2.0])\n\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n\n graph = helper.make_graph(\n [y],\n \"upsample_bilinear_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_bilinear_test\")\n verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7)\n\n\ndef verify_upsample3d_trilinear():\n scale = 2\n in_shape = (1, 1, 3, 3, 3)\n out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)\n y = helper.make_node(\"Upsample\", [\"in\", \"scales\"], [\"out\"], mode=\"linear\")\n scales = [1.0, 1.0, 2.0, 2.0, 2.0]\n in_array = np.random.uniform(size=in_shape).astype(np.float32)\n out_array = tvm.topi.testing.resize3d_python(\n in_array,\n (scale, scale, scale),\n \"NCDHW\",\n \"linear\",\n coordinate_transformation_mode=\"asymmetric\",\n )\n\n ref_array = np.array(scales)\n ref_node = helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"scales\"],\n value=onnx.helper.make_tensor(\n name=\"const_tensor\",\n data_type=TensorProto.FLOAT,\n dims=ref_array.shape,\n vals=ref_array.flatten().astype(float),\n ),\n )\n\n graph = helper.make_graph(\n [ref_node, y],\n \"upsample_trilinear_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(in_shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"upsample_trilinear_test\")\n # TODO(jwfromm): Trilinear upsampling not supported in 1.0.0 onnxruntime.\n # Replace topi comparison with verify_with_ort once we update.\n for target, dev in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output(model, in_array, target, dev, out_shape, \"float32\")\n tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_upsample():\n verify_upsample_nearest()\n verify_upsample_bilinear()\n verify_upsample3d_nearest()\n verify_upsample3d_trilinear()\n\n\ndef verify_softmax(inshape, axis):\n opname = \"Softmax\"\n indata = np.random.uniform(size=inshape).astype(np.float32)\n outshape = inshape\n y = helper.make_node(opname, [\"in\"], [\"out\"])\n if axis is not None:\n axis_attr = helper.make_attribute(\"axis\", axis)\n y.attribute.append(axis_attr)\n\n graph = helper.make_graph(\n [y],\n opname + \"_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=opname + \"_test\")\n verify_with_ort_with_inputs(model, [indata])\n\n\[email protected]_gpu\ndef test_softmax():\n verify_softmax((1, 10), None)\n verify_softmax((1, 10), 1)\n\n\ndef verify_min(input_dim):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n a_np2 = np.random.uniform(size=input_dim).astype(dtype)\n a_np3 = np.random.uniform(size=input_dim).astype(dtype)\n\n min_node = helper.make_node(\"Min\", [\"a_np1\", \"a_np2\", \"a_np3\"], [\"out\"])\n\n graph = helper.make_graph(\n [min_node],\n \"Min_test\",\n inputs=[\n helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np2\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np3\", TensorProto.FLOAT, list(input_dim)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(input_dim))],\n )\n\n model = helper.make_model(graph, producer_name=\"Min_test\")\n verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])\n\n\[email protected]_gpu\ndef test_forward_min():\n verify_min((1, 3, 20, 20))\n verify_min((20, 20))\n\n\ndef verify_max(input_dim):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n a_np2 = np.random.uniform(size=input_dim).astype(dtype)\n a_np3 = np.random.uniform(size=input_dim).astype(dtype)\n\n max_node = helper.make_node(\"Max\", [\"a_np1\", \"a_np2\", \"a_np3\"], [\"out\"])\n\n graph = helper.make_graph(\n [max_node],\n \"Max_test\",\n inputs=[\n helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np2\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np3\", TensorProto.FLOAT, list(input_dim)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(input_dim))],\n )\n\n model = helper.make_model(graph, producer_name=\"Max_test\")\n verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])\n\n\[email protected]_gpu\ndef test_forward_max():\n verify_max((1, 3, 20, 20))\n verify_max((20, 20))\n\n\ndef verify_mean(input_dim):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n a_np2 = np.random.uniform(size=input_dim).astype(dtype)\n a_np3 = np.random.uniform(size=input_dim).astype(dtype)\n\n mean_node = helper.make_node(\"Mean\", [\"a_np1\", \"a_np2\", \"a_np3\"], [\"out\"])\n\n graph = helper.make_graph(\n [mean_node],\n \"Mean_test\",\n inputs=[\n helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np2\", TensorProto.FLOAT, list(input_dim)),\n helper.make_tensor_value_info(\"a_np3\", TensorProto.FLOAT, list(input_dim)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(input_dim))],\n )\n\n model = helper.make_model(graph, producer_name=\"Mean_test\")\n verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3])\n\n\[email protected]_gpu\ndef test_forward_mean():\n verify_mean((1, 3, 20, 20))\n verify_mean((20, 20))\n\n\ndef verify_hardsigmoid(input_dim, alpha, beta):\n dtype = \"float32\"\n\n a_np1 = np.random.uniform(size=input_dim).astype(dtype)\n\n hardsigmoid_node = helper.make_node(\"HardSigmoid\", [\"a_np1\"], [\"out\"], alpha=alpha, beta=beta)\n\n graph = helper.make_graph(\n [hardsigmoid_node],\n \"HardSigmoid_test\",\n inputs=[helper.make_tensor_value_info(\"a_np1\", TensorProto.FLOAT, list(input_dim))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(input_dim))],\n )\n\n model = helper.make_model(graph, producer_name=\"HardSigmoid_test\")\n verify_with_ort_with_inputs(model, [a_np1])\n\n\[email protected]_gpu\ndef test_forward_hardsigmoid():\n verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)\n verify_hardsigmoid((20, 20), 0.3, 0.4)\n\n\ndef verify_argreduce(input_dim, op_name, axis=None, keepdims=None):\n a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)\n out_shape = list(a_np1.shape)\n def_axis = axis if axis is not None else 0\n if keepdims == 1 or keepdims == None:\n out_shape[def_axis] = 1\n else:\n out_shape.pop(def_axis)\n\n node = onnx.helper.make_node(op_name, inputs=[\"a_np1\"], outputs=[\"out\"])\n\n if keepdims is not None:\n keepdims_attr = helper.make_attribute(\"keepdims\", keepdims)\n node.attribute.append(keepdims_attr)\n if axis is not None:\n axis_attr = helper.make_attribute(\"axis\", axis)\n node.attribute.append(axis_attr)\n\n graph = helper.make_graph(\n [node],\n \"argreduce_test\",\n inputs=[helper.make_tensor_value_info(\"a_np1\", TensorProto.INT32, list(a_np1.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.INT64, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"argreduce_test\")\n verify_with_ort_with_inputs(model, [a_np1])\n\n\n# TODO (mbrookhart, electriclilies) Fix argmin on GPU and enable this test\n# @tvm.testing.uses_gpu\ndef test_forward_arg_min_max():\n \"\"\"Verify argmin and argmax\"\"\"\n verify_argreduce([3, 4, 4], \"ArgMin\")\n verify_argreduce([3, 4, 4], \"ArgMax\")\n verify_argreduce([3, 4, 4], \"ArgMin\", axis=1)\n verify_argreduce([3, 4, 4], \"ArgMax\", axis=0)\n verify_argreduce([3, 4, 4], \"ArgMin\", keepdims=0)\n verify_argreduce([3, 4, 4], \"ArgMax\", keepdims=1)\n for axis in [None, 0, 1, 2]:\n for keepdims in [None, True, False]:\n verify_argreduce([3, 4, 4], \"ArgMin\", axis, keepdims)\n verify_argreduce([3, 4, 4], \"ArgMax\", axis, keepdims)\n\n\ndef verify_constantofshape(input_dim, value, dtype):\n fill_node = helper.make_node(\n \"ConstantOfShape\",\n [\"input\"],\n [\"output\"],\n value=helper.make_tensor(\n \"value\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1,), (value,)\n ),\n )\n\n inputs = [helper.make_tensor_value_info(\"input\", TensorProto.INT64, [len(input_dim)])]\n\n graph = helper.make_graph(\n [fill_node],\n \"fill_test\",\n inputs,\n outputs=[\n helper.make_tensor_value_info(\n \"output\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], input_dim\n )\n ],\n )\n\n model = helper.make_model(graph, producer_name=\"fill_test\")\n input_np = np.array(input_dim).astype(\"int64\")\n verify_with_ort_with_inputs(model, [input_np], use_vm=True)\n\n\[email protected]_gpu\ndef test_constantofshape():\n verify_constantofshape((2, 3, 4, 5), 10, \"float32\")\n verify_constantofshape((3, 3), 0, \"int32\")\n verify_constantofshape((1, 2, 3), -1, \"float32\")\n\n\ndef verify_pad(indata, pads, mode=\"constant\", value=0.0):\n indata = np.array(indata).astype(np.float32)\n # numpy expect result\n len_dim = len(pads) // 2\n np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]\n # onnx graph\n if mode in [\"edge\", \"reflect\"]:\n outdata = np.pad(indata, pad_width=np_pads, mode=mode)\n node = helper.make_node(\n \"Pad\",\n inputs=[\"input\"],\n outputs=[\"output\"],\n mode=mode,\n pads=pads,\n )\n else:\n outdata = np.pad(indata, pad_width=np_pads, mode=\"constant\", constant_values=value)\n node = helper.make_node(\n \"Pad\", inputs=[\"input\"], outputs=[\"output\"], mode=\"constant\", pads=pads, value=value\n )\n graph = helper.make_graph(\n [node],\n \"pad_test\",\n inputs=[helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(outdata.shape))],\n )\n model = helper.make_model(graph, producer_name=\"pad_test\")\n verify_with_ort_with_inputs(model, [indata], [outdata.shape], dtype=\"float32\", opset=2)\n\n\ndef verify_pad_v11(indata, pads, mode=\"constant\", value=0.0):\n indata = np.array(indata).astype(np.float32)\n # numpy expect result\n len_dim = len(pads) // 2\n np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]\n pads = np.array(pads)\n # onnx graph\n if mode in [\"edge\", \"reflect\"]:\n inputs = [indata]\n outdata = np.pad(indata, pad_width=np_pads, mode=mode)\n node = helper.make_node(\"Pad\", inputs=[\"input\", \"pads\"], outputs=[\"output\"], mode=mode)\n graph = helper.make_graph(\n [node],\n \"pad_test\",\n inputs=[\n helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"pads\", TensorProto.INT64, (len(pads),)),\n ],\n initializer=[helper.make_tensor(\"pads\", TensorProto.INT64, (len(pads),), pads)],\n outputs=[\n helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(outdata.shape))\n ],\n )\n else:\n inputs = [indata]\n outdata = np.pad(indata, pad_width=np_pads, mode=\"constant\", constant_values=value)\n node = helper.make_node(\n \"Pad\", inputs=[\"input\", \"pads\", \"constant_value\"], outputs=[\"output\"], mode=\"constant\"\n )\n graph = helper.make_graph(\n [node],\n \"pad_test\",\n inputs=[\n helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"pads\", TensorProto.INT64, (len(pads),)),\n helper.make_tensor_value_info(\"constant_value\", TensorProto.FLOAT, (1,)),\n ],\n initializer=[\n helper.make_tensor(\"pads\", TensorProto.INT64, (len(pads),), pads),\n helper.make_tensor(\"constant_value\", TensorProto.FLOAT, (1,), [value]),\n ],\n outputs=[\n helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, list(outdata.shape))\n ],\n )\n model = helper.make_model(graph, producer_name=\"pad_test\")\n verify_with_ort_with_inputs(model, inputs, opset=11, use_vm=True)\n\n\[email protected]_gpu\ndef test_pad():\n verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], \"constant\", 0.0)\n verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], \"constant\", 0.0)\n verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], \"constant\", 5.0)\n verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"edge\")\n verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"reflect\")\n\n verify_pad_v11(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], \"constant\", 0.0)\n verify_pad_v11(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], \"constant\", 0.0)\n verify_pad_v11(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], \"constant\", 5.0)\n verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"edge\")\n verify_pad_v11(\n np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], \"reflect\"\n )\n\n\ndef verify_reduce_func(func, data, axis, keepdims):\n inshape = data.shape\n outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape\n\n if axis:\n node = onnx.helper.make_node(\n func, inputs=[\"x\"], outputs=[\"y\"], axes=axis, keepdims=keepdims\n )\n else:\n node = onnx.helper.make_node(func, inputs=[\"x\"], outputs=[\"y\"], keepdims=keepdims)\n\n graph = helper.make_graph(\n [node],\n \"reduce_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(inshape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(outshape))],\n )\n\n model = helper.make_model(graph, producer_name=\"reduce_test\")\n\n verify_with_ort_with_inputs(model, [data], [outshape], opset=11)\n\n\[email protected]_gpu\ndef test_all_reduce_funcs():\n funcs = [\n \"ReduceMax\",\n \"ReduceMean\",\n \"ReduceMin\",\n \"ReduceProd\",\n \"ReduceSum\",\n \"ReduceSumSquare\",\n \"ReduceLogSum\",\n \"ReduceLogSumExp\",\n \"ReduceL1\",\n \"ReduceL2\",\n ]\n\n for func in funcs:\n for keepdims in [True, False]:\n verify_reduce_func(\n func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims\n )\n\n verify_reduce_func(\n func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims\n )\n\n\ndef verify_split(indata, outdatas, split, axis=0, pass_split=True, opset=11):\n indata = np.array(indata).astype(np.float32)\n outdatas = [np.array(o).astype(np.float32) for o in outdatas]\n inputs = [helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape))]\n input_names = [\"input\"]\n initializer = []\n\n if split:\n split_index = range(len(split))\n else:\n split_index = range(len(outdatas))\n\n if pass_split:\n if opset >= 13:\n input_names.append(\"split\")\n np_split = np.array(split).astype(np.int64)\n inputs.append(\n helper.make_tensor_value_info(\"split\", TensorProto.INT64, list(np_split.shape))\n )\n indata = [indata, np_split]\n initializer.append(\n helper.make_tensor(\"split\", TensorProto.INT64, list(np_split.shape), np_split)\n )\n node = helper.make_node(\n \"Split\",\n inputs=input_names,\n outputs=[\"output_{}\".format(i) for i in range(len(split_index))],\n axis=axis,\n )\n\n if pass_split and opset < 13:\n split_attr = helper.make_attribute(\"split\", split)\n node.attribute.append(split_attr)\n\n graph = helper.make_graph(\n [node],\n \"split_test\",\n inputs=inputs,\n initializer=initializer,\n outputs=[\n helper.make_tensor_value_info(\n \"output_{}\".format(i), TensorProto.FLOAT, list(outdatas[i].shape)\n )\n for i in range(len(split_index))\n ],\n )\n model = helper.make_model(graph, producer_name=\"split_test\")\n verify_with_ort_with_inputs(model, indata, out_shape=list(range(len(split_index))), opset=opset)\n\n\[email protected]_gpu\ndef test_split():\n # 1D\n verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0)\n verify_split(\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0, False\n )\n verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0)\n # 2D\n verify_split(\n [[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],\n [[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],\n [2, 2],\n 1,\n )\n # Split evenly (unstack)\n verify_split([1, 2, 3], [[1], [2], [3]], False, 0, False)\n # Split a single value to a single value\n verify_split([1], [[1]], [1], pass_split=True)\n\n\[email protected]_gpu\ndef test_binary_ops():\n in_shape = (1, 2, 3, 3)\n dtype = \"float32\"\n out_shape = in_shape\n\n def verify_binary_ops(op, x, y, out_type=\"float32\"):\n z = helper.make_node(op, [\"in1\", \"in2\"], [\"out\"])\n graph = helper.make_graph(\n [z],\n \"_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", TensorProto.FLOAT, x.shape),\n helper.make_tensor_value_info(\"in2\", TensorProto.FLOAT, y.shape),\n ],\n outputs=[\n helper.make_tensor_value_info(\n \"out\", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_type)], list(out_shape)\n )\n ],\n )\n model = helper.make_model(graph, producer_name=\"_test\")\n verify_with_ort_with_inputs(model, [x, y])\n\n x = np.random.uniform(size=in_shape).astype(dtype)\n y = np.random.uniform(size=in_shape).astype(dtype)\n z = np.random.uniform(size=(3,)).astype(dtype)\n verify_binary_ops(\"Add\", x, y)\n verify_binary_ops(\"Add\", x, z)\n verify_binary_ops(\"Sub\", x, y)\n verify_binary_ops(\"Sub\", x, z)\n verify_binary_ops(\"Mul\", x, y)\n verify_binary_ops(\"Mul\", x, z)\n verify_binary_ops(\"Div\", x, y)\n verify_binary_ops(\"Div\", x, z)\n verify_binary_ops(\"Sum\", x, y)\n verify_binary_ops(\"Sum\", x, z)\n verify_binary_ops(\"Greater\", x, y, \"bool\")\n verify_binary_ops(\"Greater\", x, z, \"bool\")\n verify_binary_ops(\"Less\", x, y, \"bool\")\n verify_binary_ops(\"Less\", x, z, \"bool\")\n verify_binary_ops(\"Equal\", x, y, \"bool\")\n verify_binary_ops(\"Equal\", x, z, \"bool\")\n\n\[email protected]_gpu\ndef test_unary_ops():\n in_shape = (1, 2, 3, 3)\n dtype = \"float32\"\n out_shape = in_shape\n\n def verify_unary_ops(op, x, rtol=1e-5, atol=1e-5, dtype=\"float32\"):\n x = x.astype(dtype)\n ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]\n z = helper.make_node(op, [\"in1\"], [\"out\"])\n graph = helper.make_graph(\n [z],\n \"_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", ONNX_DTYPE, list(in_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", ONNX_DTYPE, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"_test\")\n verify_with_ort_with_inputs(model, [x], rtol=rtol, atol=atol)\n\n x = np.random.uniform(size=in_shape)\n verify_unary_ops(\"Neg\", x)\n verify_unary_ops(\"Abs\", x)\n verify_unary_ops(\"Reciprocal\", x)\n verify_unary_ops(\"Reciprocal\", x, dtype=\"float16\")\n verify_unary_ops(\"Sqrt\", x)\n verify_unary_ops(\"Relu\", x)\n verify_unary_ops(\"Exp\", x)\n verify_unary_ops(\"Log\", x)\n verify_unary_ops(\"Log\", x)\n verify_unary_ops(\"Acos\", x)\n verify_unary_ops(\"Acosh\", x)\n verify_unary_ops(\"Asin\", x)\n verify_unary_ops(\"Asinh\", x)\n verify_unary_ops(\"Atan\", x)\n verify_unary_ops(\"Atanh\", x)\n verify_unary_ops(\"Cos\", x)\n verify_unary_ops(\"Cosh\", x)\n verify_unary_ops(\"Sin\", x)\n verify_unary_ops(\"Sinh\", x)\n verify_unary_ops(\"Tan\", x)\n verify_unary_ops(\"Tanh\", x)\n verify_unary_ops(\"Sigmoid\", x)\n verify_unary_ops(\"Softsign\", x)\n\n\[email protected]_gpu\ndef test_leaky_relu():\n def leaky_relu_x(x, alpha):\n return np.where(x >= 0, x, x * alpha)\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6), leaky_relu_x, {\"alpha\": 0.25}, \"float32\", \"LeakyRelu\", {\"alpha\": 0.25}\n )\n\n\[email protected]_gpu\ndef test_elu():\n def elu_x(x, alpha):\n return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6), elu_x, {\"alpha\": 0.25}, \"float32\", \"Elu\", {\"alpha\": 0.25}\n )\n\n\[email protected]_gpu\ndef test_selu():\n def selu_x(x, alpha, gamma):\n return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n selu_x,\n {\"alpha\": 0.25, \"gamma\": 0.3},\n \"float32\",\n \"Selu\",\n {\"alpha\": 0.25, \"gamma\": 0.3},\n )\n\n\[email protected]_gpu\ndef test_prelu():\n def verify_prelu(x_shape, a_shape):\n node = helper.make_node(\"PRelu\", inputs=[\"X\", \"slope\"], outputs=[\"Y\"])\n\n graph = helper.make_graph(\n [node],\n \"prelu_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"slope\", TensorProto.FLOAT, list(a_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(x_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"prelu_test\")\n\n verify_with_ort(\n model,\n [x_shape, a_shape],\n out_shape=[list(x_shape)],\n use_vm=True,\n convert_to_static=True,\n )\n\n verify_prelu([3, 4, 5, 6], [1, 4, 1, 1])\n verify_prelu([1, 8, 5, 6], [1, 8, 1, 1])\n verify_prelu([2, 12, 16, 16], [1, 12, 1, 1])\n verify_prelu([2, 12, 16, 16], [1]) # Test alpha broadcasting.\n verify_prelu([3, 1], [3, 1]) # Test non NCHW workload.\n\n\[email protected]_gpu\ndef test_ThresholdedRelu():\n def ThresholdedRelu_x(x, alpha):\n out_np = np.clip(x, alpha, np.inf)\n out_np[out_np == alpha] = 0\n return out_np\n\n _test_onnx_op_elementwise(\n (2, 4, 5, 6),\n ThresholdedRelu_x,\n {\"alpha\": 0.25},\n \"float32\",\n \"ThresholdedRelu\",\n {\"alpha\": 0.25},\n )\n\n\[email protected]_gpu\ndef test_LogSoftmax():\n _test_onnx_op_elementwise(\n (1, 4), tvm.topi.testing.log_softmax_python, {}, \"float32\", \"LogSoftmax\", {\"axis\": 1}\n )\n\n\ndef check_torch_conversion(model, input_size):\n dummy_input = torch.randn(*input_size)\n file_name = \"{}.onnx\".format(model.__name__)\n # Set verbose=True for more output\n torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)\n onnx_model = onnx.load(file_name)\n input_data = np.random.uniform(size=input_size).astype(\"float32\")\n verify_with_ort_with_inputs(onnx_model, [input_data], apply_softmax=True)\n\n\[email protected]_gpu\ndef test_resnet():\n check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224))\n # check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))\n\n\n# def test_alexnet():\n# Torch's ONNX export does not support the adaptive pooling used by AlexNet?\n# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))\n\n# Torch's ONNX export does not support the adaptive pooling used by vgg16?\n# def test_vgg16():\n# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))\n\n# TODO(@jroesch): Update Torch + ONNX to support this import.\n# def test_squeezenet():\n# # Torch's ONNX export does not support the max pooling used by Squezenet\n# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))\n\n\[email protected]_gpu\ndef test_densenet():\n check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224))\n\n\[email protected]_gpu\ndef test_inception():\n check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224))\n\n\n# TODO(@jroesch): Update Torch + ONNX to support this import.\n# def test_googlenet():\n# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))\n\n# TODO(@jroesch): Update Torch + ONNX to support this import.\n# def test_shufflenetv2():\n# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))\n\n\[email protected]_gpu\ndef test_sign():\n def Sign_x(x):\n return np.sign(x)\n\n _test_onnx_op_elementwise((3, 4, 5, 6), Sign_x, {}, \"float32\", \"Sign\", {})\n\n\ndef verify_not(indata, dtype):\n x = indata.astype(dtype)\n\n node = helper.make_node(\n \"Not\",\n inputs=[\"in\"],\n outputs=[\"out\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"not_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.BOOL, list(x.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(x.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"not_test\")\n verify_with_ort_with_inputs(model, [x])\n\n\[email protected]_gpu\ndef test_not():\n # 2d\n verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)\n # 3d\n verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)\n # 4d\n verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)\n\n\ndef verify_and(indata, dtype):\n x = indata[0].astype(dtype)\n y = indata[1].astype(dtype)\n outdata = np.logical_and(x, y)\n\n node = helper.make_node(\n \"And\",\n inputs=[\"in1\", \"in2\"],\n outputs=[\"out\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"and_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", TensorProto.BOOL, list(x.shape)),\n helper.make_tensor_value_info(\"in2\", TensorProto.BOOL, list(y.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"and_test\")\n verify_with_ort_with_inputs(model, [x, y], [outdata.shape])\n\n\[email protected]_gpu\ndef test_and():\n # 2d\n x = np.random.randn(3, 4) > 0\n y = np.random.randn(3, 4) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 3d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(3, 4, 5) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 4d\n x = np.random.randn(3, 4, 5, 6) > 0\n y = np.random.randn(3, 4, 5, 6) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 3d vs 1d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(5) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n # 3d vs 2d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(4, 5) > 0\n verify_and(indata=[x, y], dtype=bool)\n\n\ndef verify_tile_v6(indata, repeats, outdata):\n node = helper.make_node(\"Tile\", inputs=[\"input\", \"repeats\"], outputs=[\"out\"])\n graph = helper.make_graph(\n [node],\n \"tile_test\",\n inputs=[\n helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, list(indata.shape)),\n helper.make_tensor_value_info(\"repeats\", TensorProto.INT64, list(repeats.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"tile_test\")\n verify_with_ort_with_inputs(model, [indata, repeats], use_vm=True, opset=6)\n\n\[email protected]_gpu\ndef test_tile():\n x = np.random.rand(2, 3, 4, 5).astype(np.float32)\n repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)\n z = np.tile(x, repeats)\n verify_tile_v6(x, repeats, z)\n\n\ndef verify_erf(indata, outdata):\n node = helper.make_node(\"Erf\", inputs=[\"in\"], outputs=[\"out\"])\n graph = helper.make_graph(\n [node],\n \"erf_test\",\n inputs=[helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(outdata.shape))],\n )\n model = helper.make_model(graph, producer_name=\"erf_test\")\n verify_with_ort_with_inputs(model, [indata], [outdata.shape])\n\n\[email protected]_gpu\ndef test_erf():\n x = np.random.rand(2, 3, 4, 6).astype(np.float32)\n z = scipy.special.erf(x)\n verify_erf(x, z)\n\n\ndef verify_where(condition, x, y, dtype, outdata, dynamic=False):\n node_list = []\n where_inputs = [\"condition\", \"x\", \"y\"]\n if dynamic:\n shape_node = helper.make_node(\"Shape\", [\"x\"], [\"shape\"])\n reshape_node = helper.make_node(\"Reshape\", [\"x\", \"shape\"], [\"X\"])\n where_inputs[1] = \"X\"\n node_list += [shape_node, reshape_node]\n node = helper.make_node(\"Where\", inputs=where_inputs, outputs=[\"out\"])\n node_list.append(node)\n graph = helper.make_graph(\n node_list,\n \"where_test\",\n inputs=[\n helper.make_tensor_value_info(\"condition\", TensorProto.BOOL, list(condition.shape)),\n helper.make_tensor_value_info(\"x\", dtype, list(x.shape)),\n helper.make_tensor_value_info(\"y\", dtype, list(y.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", dtype, list(outdata.shape))],\n )\n model = helper.make_model(graph, producer_name=\"where_test\")\n verify_with_ort_with_inputs(model, [condition, x, y], [outdata.shape], use_vm=True)\n\n\[email protected]_gpu\ndef test_where():\n condition = np.array([[1, 0], [1, 1]], dtype=bool)\n x = np.array([[1, 2], [3, 4]], dtype=np.int64)\n y = np.array([[9, 8], [7, 6]], dtype=np.int64)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.INT64, outdata)\n\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[9, 8], [7, 6]], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n x = np.array(1, dtype=np.float32)\n y = np.array([2], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n x = np.array([2], dtype=np.float32)\n y = np.array(1, dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n condition = np.array(1, dtype=bool)\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[5, 6], [7, 8]], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n\n x = np.array([[1, 2], [3, 4]], dtype=np.float32)\n y = np.array([[1], [7]], dtype=np.float32)\n outdata = np.where(condition, x, y)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata)\n verify_where(condition, x, y, TensorProto.FLOAT, outdata, dynamic=True)\n\n\ndef verify_or(indata, dtype):\n x = indata[0].astype(dtype)\n y = indata[1].astype(dtype)\n outdata = np.logical_or(x, y)\n\n node = helper.make_node(\n \"Or\",\n inputs=[\"in1\", \"in2\"],\n outputs=[\"out\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"or_test\",\n inputs=[\n helper.make_tensor_value_info(\"in1\", TensorProto.BOOL, list(x.shape)),\n helper.make_tensor_value_info(\"in2\", TensorProto.BOOL, list(y.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.BOOL, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"or_test\")\n verify_with_ort_with_inputs(model, [x, y], [outdata.shape])\n\n\[email protected]_gpu\ndef test_or():\n # 2d\n x = np.random.randn(3, 4) > 0\n y = np.random.randn(3, 4) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 3d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(3, 4, 5) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 4d\n x = np.random.randn(3, 4, 5, 6) > 0\n y = np.random.randn(3, 4, 5, 6) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 3d vs 1d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(5) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n # 3d vs 2d\n x = np.random.randn(3, 4, 5) > 0\n y = np.random.randn(4, 5) > 0\n verify_or(indata=[x, y], dtype=bool)\n\n\[email protected]_gpu\ndef test_batch_norm():\n def verify_batch_norm(in_shape):\n batchnorm = onnx.helper.make_node(\n \"BatchNormalization\", inputs=[\"x\", \"scale\", \"B\", \"mean\", \"var\"], outputs=[\"Y\"]\n )\n\n graph = helper.make_graph(\n [batchnorm],\n \"batchnorm_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"scale\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"B\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"mean\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"var\", TensorProto.FLOAT, [in_shape[1]]),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(in_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"batchnorm_test\")\n # X, scale, b, mean, var\n inshapes = [in_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]\n verify_with_ort(model, inshapes, out_shape=[in_shape])\n\n verify_batch_norm([1, 3, 224, 224])\n verify_batch_norm([1, 3, 24, 24])\n verify_batch_norm([16, 3, 24, 24])\n verify_batch_norm([16, 16, 24, 24])\n verify_batch_norm([16, 16, 10, 10])\n\n\[email protected]_gpu\ndef test_batch_norm_dynamic_subgraph():\n def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):\n\n batchnorm = onnx.helper.make_node(\n \"BatchNormalization\", inputs=[\"x\", \"scale\", \"B\", \"mean\", \"var\"], outputs=[\"Y\"]\n )\n\n shape_node = helper.make_node(\"Shape\", [\"Y\"], [\"shape\"])\n reshape_node = helper.make_node(\"Reshape\", [\"in\", \"shape\"], [\"out\"])\n graph = helper.make_graph(\n [batchnorm, shape_node, reshape_node],\n \"batchnorm_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(in_shape)),\n helper.make_tensor_value_info(\"in\", TensorProto.FLOAT, list(o_shape)),\n helper.make_tensor_value_info(\"scale\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"B\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"mean\", TensorProto.FLOAT, [in_shape[1]]),\n helper.make_tensor_value_info(\"var\", TensorProto.FLOAT, [in_shape[1]]),\n ],\n outputs=[helper.make_tensor_value_info(\"out\", TensorProto.FLOAT, list(in_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"batchnorm_test\")\n\n # X, inp, scale, b, mean, var\n inshapes = [in_shape, o_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]\n verify_with_ort(model, inshapes, out_shape=[in_shape], use_vm=True)\n\n verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])\n\n\ndef verify_conv(\n x_shape,\n w_shape,\n y_shape,\n padding,\n kernel_shape,\n strides,\n dilations,\n group=1,\n auto_pad=\"NOTSET\",\n unset_pad=False,\n):\n if unset_pad:\n node = helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n group=group,\n )\n elif padding is None:\n ## autopadding with unset default attributes\n kwargs = {}\n if not all([s == 1 for s in strides]):\n kwargs[\"strides\"] = strides\n if not all([d == 1 for d in dilations]):\n kwargs[\"dilations\"] = dilations\n\n node = helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n # Default values for other attributes:\n auto_pad=auto_pad,\n group=group,\n **kwargs,\n )\n else:\n node = helper.make_node(\n \"Conv\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n group=group,\n pads=padding,\n )\n\n graph = helper.make_graph(\n [node],\n \"conv_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, list(w_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"conv_test\")\n\n verify_with_ort(model, [x_shape, w_shape], [y_shape], use_vm=True, convert_to_static=True)\n\n\[email protected]_gpu\ndef test_conv():\n def repeat(N, D):\n return tuple([N for _ in range(D)])\n\n for D in [1, 2, 3]:\n # Convolution with padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution with assymetric padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(4, D),\n repeat(0, D) + repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution without padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution with autopadding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with valid autopadding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"VALID\",\n )\n # Convolution with unset padding\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n True,\n )\n # Convolution with non uniform stride\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(2, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with dilation\n verify_conv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(2, D),\n repeat(3, D),\n repeat(1, D),\n repeat(2, D),\n )\n\n # TODO(jwfromm): Merge with other tests once group_conv3d is supported.\n for D in [1, 2]:\n # Group Convolution\n verify_conv(\n (1, 8) + repeat(5, D),\n (8, 1) + repeat(3, D),\n (1, 8) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n group=8,\n )\n\n\ndef verify_convtranspose_with_padding(\n x_shape,\n w_shape,\n y_shape,\n padding,\n kernel_shape,\n strides,\n dilations,\n auto_pad=\"NOTSET\",\n unset_pad=False,\n group=1,\n):\n node = helper.make_node(\n \"ConvTranspose\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n )\n if not unset_pad:\n if padding is None:\n pad_attr = helper.make_attribute(\"auto_pad\", auto_pad)\n else:\n pad_attr = helper.make_attribute(\"pads\", padding)\n node.attribute.append(pad_attr)\n\n if group is not None:\n group_attr = helper.make_attribute(\"group\", group)\n node.attribute.append(group_attr)\n\n graph = helper.make_graph(\n [node],\n \"convtranspose_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, list(w_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"convtranspose_pad_test\")\n\n verify_with_ort(model, [x_shape, w_shape], [y_shape], use_vm=True, convert_to_static=True)\n\n\ndef verify_convtranspose(x_shape, w_shape, y_shape, p, group=1):\n node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=[\"x\", \"W\"],\n outputs=[\"y\"],\n strides=[3, 2],\n kernel_shape=[3, 3],\n pads=p,\n )\n\n if group is not None:\n group_attr = helper.make_attribute(\"group\", group)\n node.attribute.append(group_attr)\n\n graph = helper.make_graph(\n [node],\n \"verify_convtranspose_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"W\", TensorProto.FLOAT, list(w_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(y_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"convtranspose_test\")\n verify_with_ort(model, [x_shape, w_shape], y_shape)\n\n\[email protected]_gpu\ndef test_convtranspose():\n # Convolution Transpose with padding\n # (1, 1, 3, 3) input tensor\n # (1, 2, 3, 3) tensor for convolution weights\n # (1, 2, 7, 3) output tensor\n # [1, 2, 1, 2] list for pads\n verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])\n # Test undefined groups.\n verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2], group=None)\n\n def repeat(N, D):\n return tuple([N for _ in range(D)])\n\n # TODO(mbrookhart): onnxruntime in CI only supports 2D,\n # find something else to test 1D and 3D against\n for D in [2]:\n # Convolution with padding\n verify_convtranspose_with_padding(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution without padding\n verify_convtranspose_with_padding(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(7, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution with autopadding\n verify_convtranspose_with_padding(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with valid autopadding\n verify_convtranspose_with_padding(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(7, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"VALID\",\n )\n # Convolution with unset padding\n verify_convtranspose_with_padding(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(7, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n True,\n )\n # Convolution with non uniform stride\n verify_convtranspose_with_padding(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(9, D),\n None,\n repeat(3, D),\n repeat(2, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with dilation\n # TODO(mbrookhart): Relay doesn't currently support convtranspose with dilation\n # verify_convtranspose_with_padding(\n # (1, 1) + repeat(5, D),\n # (1, 1) + repeat(3, D),\n # (1, 1) + repeat(5, D),\n # 2 * repeat(2, D),\n # repeat(3, D),\n # repeat(1, D),\n # repeat(2, D),\n # )\n\n\[email protected]_gpu\ndef test_unsqueeze_constant():\n from torch.nn import Linear, Module, Sequential\n\n class Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n import tempfile\n\n with tempfile.NamedTemporaryFile() as fp:\n file_name = fp.name\n input_size = (1, 16, 32, 32)\n dummy_input = torch.randn(*input_size)\n layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))\n torch.onnx.export(layer, dummy_input, file_name, export_params=True)\n\n onnx_model = onnx.load(file_name)\n relay.frontend.from_onnx(onnx_model, {\"0\": input_size})\n\n\ndef verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad=\"NOTSET\"):\n x_np = np.random.uniform(size=x_shape).astype(\"float32\")\n\n if mode == \"max\":\n node_type = \"MaxPool\"\n elif mode == \"average\":\n node_type = \"AveragePool\"\n else:\n raise ValueError(\"Pool method {} is not supported.\".format(mode))\n\n pool_node = helper.make_node(\n node_type, inputs=[\"x\"], outputs=[\"y\"], kernel_shape=kernel_shape, strides=strides\n )\n\n if pads is None:\n pad_attr = helper.make_attribute(\"auto_pad\", auto_pad)\n else:\n pad_attr = helper.make_attribute(\"pads\", pads)\n pool_node.attribute.append(pad_attr)\n\n if mode == \"max\":\n storage_attr = helper.make_attribute(\"storage_order\", 0)\n pool_node.attribute.append(storage_attr)\n\n graph = helper.make_graph(\n [pool_node],\n \"pooling_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"pooling_test\")\n verify_with_ort(model, [x_shape], [out_shape], use_vm=False, convert_to_static=True)\n\n\[email protected]_gpu\ndef test_pooling():\n for mode in [\"max\", \"average\"]:\n # Pool1D\n verify_pooling(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n strides=[1],\n pads=[1, 1],\n out_shape=[1, 1, 32],\n mode=mode,\n )\n # Pool2D\n verify_pooling(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n strides=[1, 1],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 32, 32],\n mode=mode,\n )\n\n # Pool1D with stride\n verify_pooling(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n strides=[2],\n pads=[1, 1],\n out_shape=[1, 1, 16],\n mode=mode,\n )\n # Pool2D with stride\n verify_pooling(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n strides=[2, 2],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 16, 16],\n mode=mode,\n )\n\n # Pool1D with stride and autopadding\n verify_pooling(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n strides=[2],\n pads=None,\n out_shape=[1, 1, 16],\n mode=mode,\n auto_pad=\"SAME_UPPER\",\n )\n # Pool2D with stride and autopadding\n verify_pooling(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n strides=[2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16],\n mode=mode,\n auto_pad=\"SAME_UPPER\",\n )\n\n # Pool3D with stride\n verify_pooling(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n strides=[2, 2, 2],\n pads=[1, 1, 1, 1, 1, 1],\n out_shape=[1, 1, 16, 16, 16],\n mode=mode,\n )\n\n # Pool3D with stride and autopadding\n verify_pooling(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n strides=[2, 2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16, 16],\n mode=mode,\n auto_pad=\"SAME_UPPER\",\n )\n\n\ndef verify_global_pooling(x_shape, mode):\n out_shape = x_shape[:2] + [1] * (len(x_shape) - 2)\n\n if mode == \"max\":\n node_type = \"GlobalMaxPool\"\n elif mode == \"average\":\n node_type = \"GlobalAveragePool\"\n else:\n raise ValueError(\"Pool method {} is not supported.\".format(mode))\n\n pool_node = helper.make_node(node_type, inputs=[\"x\"], outputs=[\"y\"])\n\n graph = helper.make_graph(\n [pool_node],\n \"global_pooling_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"global_pooling_test\")\n verify_with_ort(model, [x_shape], [out_shape], use_vm=False, convert_to_static=True)\n\n\[email protected]_gpu\ndef test_global_pooling():\n # Test each pooling mode across all N-D inputs.\n for mode in [\"average\", \"max\"]:\n # 1D Pooling (NCW)\n verify_global_pooling([1, 8, 8], mode)\n verify_global_pooling([4, 1, 4], mode)\n # 2D Pooling (NCHW)\n verify_global_pooling([1, 8, 8, 8], mode)\n verify_global_pooling([4, 1, 6, 4], mode)\n # 3D Pooling (NCDHW)\n verify_global_pooling([1, 8, 6, 8, 8], mode)\n verify_global_pooling([4, 1, 2, 6, 4], mode)\n\n\ndef verify_mod(x_shape, y_shape, fmod, out_shape, dtype=\"float32\"):\n x_np = np.random.uniform(-100.0, 100.0, x_shape).astype(dtype)\n y_np = np.random.uniform(-100.0, 100.0, y_shape).astype(dtype)\n y_np = np.where(y_np == 0, 1, y_np) # remove 0's to avoid division by zero error\n\n mod_node = helper.make_node(\"Mod\", inputs=[\"x\", \"y\"], outputs=[\"z\"], fmod=fmod)\n\n onnx_dtype = TensorProto.FLOAT if dtype == \"float32\" else TensorProto.INT32\n graph = helper.make_graph(\n [mod_node],\n \"mod_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", onnx_dtype, list(x_shape)),\n helper.make_tensor_value_info(\"y\", onnx_dtype, list(y_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"z\", onnx_dtype, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"mod_test\")\n verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape])\n\n\[email protected]_gpu\ndef test_mod():\n # Mod\n verify_mod(\n x_shape=[1, 32, 32], y_shape=[1, 1, 32], fmod=0, out_shape=(1, 32, 32), dtype=\"int32\"\n )\n verify_mod(\n x_shape=[1, 32, 32, 32],\n y_shape=[1, 32, 32, 32],\n fmod=0,\n out_shape=(1, 32, 32, 32),\n dtype=\"int32\",\n )\n\n # fmod\n verify_mod(\n x_shape=[1, 32, 32], y_shape=[1, 32, 32], fmod=1, out_shape=(1, 32, 32), dtype=\"int32\"\n )\n verify_mod(x_shape=[1, 1, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))\n verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 1, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))\n verify_mod(\n x_shape=[1, 32, 32, 32],\n y_shape=[1, 32, 32, 32],\n fmod=1,\n out_shape=(1, 32, 32, 32),\n dtype=\"int32\",\n )\n verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))\n\n\ndef verify_xor(x_shape, y_shape):\n x_np = np.random.choice(a=[False, True], size=x_shape).astype(\"bool\")\n y_np = np.random.choice(a=[False, True], size=y_shape).astype(\"bool\")\n\n np_out = np.logical_xor(x_np, y_np)\n out_shape = np_out.shape\n\n xor_node = helper.make_node(\"Xor\", inputs=[\"x\", \"y\"], outputs=[\"z\"])\n\n onnx_dtype = TensorProto.BOOL\n graph = helper.make_graph(\n [xor_node],\n \"xor_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", onnx_dtype, list(x_shape)),\n helper.make_tensor_value_info(\"y\", onnx_dtype, list(y_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"z\", onnx_dtype, list(out_shape))],\n )\n model = helper.make_model(graph, producer_name=\"xor_test\")\n verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape])\n\n\[email protected]_gpu\ndef test_xor():\n # XOR\n verify_xor(x_shape=[1, 32, 32], y_shape=[1, 32, 32])\n\n # Xor broadcast\n verify_xor(x_shape=[1, 32, 32], y_shape=[1, 1, 32])\n\n\ndef verify_max_roi_pool(x_shape, rois_shape, pooled_shape, spatial_scale, out_shape):\n if spatial_scale is None:\n pool_node = helper.make_node(\n \"MaxRoiPool\", inputs=[\"x\", \"rois\"], outputs=[\"y\"], pooled_shape=pooled_shape\n )\n else:\n pool_node = helper.make_node(\n \"MaxRoiPool\",\n inputs=[\"x\", \"rois\"],\n outputs=[\"y\"],\n pooled_shape=pooled_shape,\n spatial_scale=spatial_scale,\n )\n\n graph = helper.make_graph(\n [pool_node],\n \"pool_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape)),\n helper.make_tensor_value_info(\"rois\", TensorProto.FLOAT, list(rois_shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"pool_test\")\n verify_with_ort(model, [x_shape, rois_shape], [out_shape])\n\n\[email protected]_gpu\ndef test_max_roi_pool():\n verify_max_roi_pool(\n x_shape=[1, 3, 6, 6],\n rois_shape=[3, 5],\n pooled_shape=[1, 1],\n spatial_scale=None,\n out_shape=[3, 3, 1, 1],\n )\n\n verify_max_roi_pool(\n x_shape=[1, 3, 10, 10],\n rois_shape=[4, 5],\n pooled_shape=[2, 2],\n spatial_scale=2.0,\n out_shape=[4, 3, 2, 2],\n )\n\n\ndef verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad=\"NOTSET\"):\n if pads is None:\n pool_node = helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n p=p,\n auto_pad=auto_pad,\n strides=strides,\n )\n else:\n pool_node = helper.make_node(\n \"LpPool\",\n inputs=[\"x\"],\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n p=p,\n pads=pads,\n strides=strides,\n )\n\n graph = helper.make_graph(\n [pool_node],\n \"lppool_test\",\n inputs=[helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x_shape))],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(out_shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"lppool_test\")\n verify_with_ort(model, [x_shape], [out_shape], use_vm=True, convert_to_static=True)\n\n\[email protected]_gpu\ndef test_lppool():\n # Pool1D\n verify_lppool(\n x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[1], pads=[1, 1], out_shape=[1, 1, 32]\n )\n\n # Pool2D\n verify_lppool(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n p=2,\n strides=[1, 1],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 32, 32],\n )\n\n # Pool1D with stride\n verify_lppool(\n x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[2], pads=[1, 1], out_shape=[1, 1, 16]\n )\n\n # Pool2D with stride\n verify_lppool(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n p=2,\n strides=[2, 2],\n pads=[1, 1, 1, 1],\n out_shape=[1, 1, 16, 16],\n )\n\n # Pool1D with stride and autopadding\n verify_lppool(\n x_shape=[1, 1, 32],\n kernel_shape=[3],\n p=2,\n strides=[2],\n pads=None,\n out_shape=[1, 1, 16],\n auto_pad=\"SAME_UPPER\",\n )\n\n # Pool2D with stride and autopadding\n verify_lppool(\n x_shape=[1, 1, 32, 32],\n kernel_shape=[3, 3],\n p=2,\n strides=[2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16],\n auto_pad=\"SAME_UPPER\",\n )\n\n # Pool3D with stride\n verify_lppool(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n p=2,\n strides=[2, 2, 2],\n pads=[1, 1, 1, 1, 1, 1],\n out_shape=[1, 1, 16, 16, 16],\n )\n\n # Pool3D with stride and autopadding\n verify_lppool(\n x_shape=[1, 1, 32, 32, 32],\n kernel_shape=[3, 3, 3],\n p=2,\n strides=[2, 2, 2],\n pads=None,\n out_shape=[1, 1, 16, 16, 16],\n auto_pad=\"SAME_UPPER\",\n )\n\n\ndef verify_rnn(\n seq_length,\n batch_size,\n input_size,\n hidden_size,\n rnn_type=\"LSTM\",\n use_bias=False,\n activations=None,\n alphas=None,\n betas=None,\n use_initial_state=False,\n use_peep=False,\n linear_before_reset=False,\n directions=1,\n):\n if rnn_type == \"LSTM\":\n multiplier = 4\n elif rnn_type == \"GRU\":\n multiplier = 3\n else:\n raise NotImplementedError(f\"{rnn_type} RNNs not yet supported.\")\n\n if directions not in [1, 2]:\n raise ValueError(f\"Direction should be either 1 or 2 (for bidirectional LSTMs)\")\n\n def get_inputs():\n input_names = []\n input_values = []\n input_tensors = []\n\n def register(np_arr, name, shape=None):\n input_values.append(np_arr)\n input_names.append(name)\n\n # Map of numpy dtypes to the protobuf equivalent\n dtype_map = {\n \"float32\": TensorProto.FLOAT,\n \"int32\": TensorProto.INT32,\n \"int8\": TensorProto.INT8,\n }\n\n if np_arr.dtype.name not in dtype_map:\n raise ValueError(f\"Unknown dtype we don't know how to handle {np.dtype.name}\")\n if shape is None:\n shape = list(np_arr.shape)\n proto_type = dtype_map[np_arr.dtype.name]\n input_tensors.append(helper.make_tensor_value_info(name, proto_type, shape))\n\n x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype(\"float32\")\n w_np = np.random.uniform(size=(directions, multiplier * hidden_size, input_size)).astype(\n \"float32\"\n )\n r_np = np.random.uniform(size=(directions, multiplier * hidden_size, hidden_size)).astype(\n \"float32\"\n )\n register(x_np, \"X\")\n register(w_np, \"W\")\n register(r_np, \"R\")\n\n if use_bias:\n b_np = np.random.uniform(size=(directions, multiplier * 2 * hidden_size)).astype(\n \"float32\"\n )\n register(b_np, \"B\")\n\n if use_initial_state:\n assert use_bias == True, \"Initial states must have bias specified.\"\n sequence_np = np.repeat(seq_length, batch_size).astype(\"int32\")\n register(sequence_np, \"sequence_lens\")\n\n initial_h_np = np.random.uniform(size=(directions, batch_size, hidden_size)).astype(\n \"float32\"\n )\n register(initial_h_np, \"initial_h\")\n\n if rnn_type == \"LSTM\":\n initial_c_np = np.random.uniform(size=(directions, batch_size, hidden_size)).astype(\n \"float32\"\n )\n register(initial_c_np, \"initial_c\")\n\n if use_peep and rnn_type == \"LSTM\":\n assert use_initial_state == True, \"Peepholes require initial state to be specified.\"\n p_np = np.random.uniform(size=(directions, 3 * hidden_size)).astype(\"float32\")\n register(p_np, \"P\")\n\n return input_names, input_tensors, input_values\n\n input_names, input_tensors, input_values = get_inputs()\n\n def get_outputs():\n output_names = []\n graph_outputs = []\n output_shapes = []\n\n def register(name, shape, proto_type):\n output_names.append(name)\n graph_outputs.append(helper.make_tensor_value_info(name, proto_type, list(shape)))\n output_shapes.append(list(shape))\n\n register(\"Y\", [seq_length, directions, batch_size, hidden_size], TensorProto.FLOAT)\n register(\"Y_h\", [directions, batch_size, hidden_size], TensorProto.FLOAT)\n\n if rnn_type == \"LSTM\":\n register(\"Y_c\", [directions, batch_size, hidden_size], TensorProto.FLOAT)\n\n return output_names, graph_outputs, output_shapes\n\n output_names, graph_outputs, output_shapes = get_outputs()\n\n rnn_node = helper.make_node(\n rnn_type, inputs=input_names, outputs=output_names, hidden_size=hidden_size\n )\n if activations is not None:\n activations_attr = helper.make_attribute(\"activations\", activations)\n rnn_node.attribute.append(activations_attr)\n if directions == 2:\n direction_attr = helper.make_attribute(\"direction\", \"bidirectional\")\n rnn_node.attribute.append(direction_attr)\n if alphas is not None:\n alphas_attr = helper.make_attribute(\"activation_alpha\", alphas)\n rnn_node.attribute.append(alphas_attr)\n if betas is not None:\n betas_attr = helper.make_attribute(\"activation_beta\", betas)\n rnn_node.attribute.append(betas_attr)\n if linear_before_reset and rnn_type == \"GRU\":\n lbr_attr = helper.make_attribute(\"linear_before_reset\", 1)\n rnn_node.attribute.append(lbr_attr)\n\n graph = helper.make_graph([rnn_node], \"rnn_test\", inputs=input_tensors, outputs=graph_outputs)\n\n model = helper.make_model(graph, producer_name=\"rnn_test\")\n\n verify_with_ort_with_inputs(model, input_values, output_shapes, atol=1e-2, rtol=1e-2)\n\n\[email protected]_gpu\ndef test_lstm():\n for directions in [1, 2]:\n # No bias.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # large batch.\n verify_rnn(\n seq_length=4,\n batch_size=8,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # Non power of two.\n verify_rnn(\n seq_length=3,\n batch_size=3,\n input_size=16,\n hidden_size=40,\n use_bias=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # Long sequence.\n verify_rnn(\n seq_length=8,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # Large hidden.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=128,\n use_bias=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # Large input.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=64,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n\n # Different activation testing.\n # Default value hardsigmoid.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"Tanh\", \"Tanh\"] * directions,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # Multiple parameterized activations.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"LeakyRelu\", \"Tanh\"] * directions,\n alphas=[2.0, 0.5, 0.0] * directions,\n betas=[0.3, 0.0, 0.0] * directions,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n # All parameterized with new Affine activation.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"LeakyRelu\", \"Affine\"] * directions,\n alphas=[2.0, 0.5, 0.8] * directions,\n betas=[0.3, 0.1, 0.0] * directions,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n\n # Testing with initial state and peepholes\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n use_initial_state=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n use_initial_state=True,\n use_peep=True,\n rnn_type=\"LSTM\",\n directions=directions,\n )\n\n\[email protected]_gpu\ndef test_gru():\n for directions in [1, 2]:\n # No bias.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n rnn_type=\"GRU\",\n directions=directions,\n )\n # large batch.\n verify_rnn(\n seq_length=4,\n batch_size=8,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"GRU\",\n linear_before_reset=True,\n directions=directions,\n )\n # Non power of two.\n verify_rnn(\n seq_length=3,\n batch_size=3,\n input_size=16,\n hidden_size=40,\n use_bias=True,\n rnn_type=\"GRU\",\n directions=directions,\n )\n # Long sequence.\n verify_rnn(\n seq_length=8,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"GRU\",\n directions=directions,\n )\n # Large hidden.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=128,\n use_bias=True,\n rnn_type=\"GRU\",\n directions=directions,\n )\n # Large input.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=64,\n hidden_size=32,\n use_bias=True,\n rnn_type=\"GRU\",\n directions=directions,\n )\n\n # Different activation testing.\n # Default value hardsigmoid.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"Softsign\"] * directions,\n rnn_type=\"GRU\",\n directions=directions,\n )\n # Multiple parameterized activations.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"LeakyRelu\"] * directions,\n alphas=[2.0, 0.5] * directions,\n betas=[0.3, 0.0] * directions,\n rnn_type=\"GRU\",\n directions=directions,\n )\n # All parameterized with new Affine activation.\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=False,\n activations=[\"HardSigmoid\", \"Affine\"] * directions,\n alphas=[2.0, 0.8] * directions,\n betas=[0.3, 0.1] * directions,\n rnn_type=\"GRU\",\n directions=directions,\n )\n\n # Testing with initial state\n verify_rnn(\n seq_length=2,\n batch_size=1,\n input_size=16,\n hidden_size=32,\n use_bias=True,\n use_initial_state=True,\n rnn_type=\"GRU\",\n directions=directions,\n )\n\n\[email protected]_gpu\ndef test_resize():\n def verify(ishape, oshape, scales, mode, coord_trans=\"asymmetric\", alpha=0.5, exclude=False):\n nodes = [\n make_constant_node(\"roi\", onnx.TensorProto.FLOAT, (0,), []),\n make_constant_node(\"scales\", onnx.TensorProto.FLOAT, (len(scales),), scales),\n ]\n input_names = [\"X\", \"roi\", \"scales\"]\n if oshape != []:\n nodes.append(\n make_constant_node(\"sizes\", onnx.TensorProto.INT64, (len(oshape),), oshape)\n )\n input_names.append(\"sizes\")\n nodes.append(\n helper.make_node(\n \"Resize\",\n inputs=input_names,\n outputs=[\"Y\"],\n mode=mode,\n coordinate_transformation_mode=coord_trans,\n cubic_coeff_a=alpha,\n exclude_outside=exclude,\n )\n )\n\n if oshape == []:\n oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]\n graph = helper.make_graph(\n nodes,\n \"resize_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, ishape)],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, oshape)],\n )\n\n model = helper.make_model(graph, producer_name=\"resize_test\")\n\n verify_with_ort(model, [ishape], [oshape], use_vm=True, opset=11, freeze_params=True)\n\n for ndim in [1, 2, 3]:\n method = \"nearest\"\n for coord_trans in [\"asymmetric\", \"align_corners\", \"half_pixel\"]:\n # upsampling\n verify([1, 16] + [32] * ndim, [1, 16] + [64] * ndim, [], method, coord_trans)\n # downsampling\n verify([1, 16] + [32] * ndim, [1, 16] + [16] * ndim, [], method, coord_trans)\n # scales are specified instead of sizes\n verify([1, 16] + [32] * ndim, [], [1, 1] + [0.5] * ndim, method, coord_trans)\n verify([1, 16] + [32] * ndim, [], [1, 1] + [2] * ndim, method, coord_trans)\n\n if ndim == 2:\n ## TODO(mbrookhart): ONNX Runtime in CI only supports 2D linear resize\n ## Remove this condition when updating CI\n method = \"linear\"\n # upsampling\n verify([1, 16] + [32] * ndim, [1, 16] + [64] * ndim, [], method)\n # downsampling\n verify([1, 16] + [32] * ndim, [1, 16] + [16] * ndim, [], method)\n # scales are specified instead of sizes\n verify([1, 16] + [32] * ndim, [], [1, 1] + [0.5] * ndim, method)\n verify([1, 16] + [32] * ndim, [], [1, 1] + [2] * ndim, method)\n\n if ndim == 2:\n # ONNX Runtime only supports cubic interpolation for 2D images\n method = \"cubic\"\n for alpha in [0.5, 0.75]:\n for exclude in [True, False]:\n # upsampling\n verify(\n [1, 16] + [32] * ndim,\n [1, 16] + [64] * ndim,\n [],\n method,\n alpha=alpha,\n exclude=exclude,\n )\n # downsampling\n verify(\n [1, 16] + [32] * ndim,\n [1, 16] + [16] * ndim,\n [],\n method,\n alpha=alpha,\n exclude=exclude,\n )\n # scales are specified instead of sizes\n verify(\n [1, 16] + [32] * ndim,\n [],\n [1, 1] + [0.5] * ndim,\n method,\n alpha=alpha,\n exclude=exclude,\n )\n verify(\n [1, 16] + [32] * ndim,\n [],\n [1, 1] + [2] * ndim,\n method,\n alpha=alpha,\n exclude=exclude,\n )\n\n def verify_opset_10(ishape, scales, mode):\n nodes = [\n make_constant_node(\"scales\", onnx.TensorProto.FLOAT, (len(scales),), scales),\n ]\n input_names = [\"X\", \"scales\"]\n nodes.append(\n helper.make_node(\n \"Resize\",\n inputs=input_names,\n outputs=[\"Y\"],\n mode=mode,\n )\n )\n\n oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]\n graph = helper.make_graph(\n nodes,\n \"resize_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, ishape)],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, oshape)],\n )\n\n model = helper.make_model(graph, producer_name=\"resize_test\")\n verify_with_ort(model, [ishape], [oshape], use_vm=True, freeze_params=True, opset=10)\n\n verify_opset_10([1, 16, 32, 32], [1, 1, 2, 2], \"nearest\")\n verify_opset_10([1, 16, 32, 32], [1, 1, 0.5, 0.5], \"linear\")\n\n\[email protected]_gpu\ndef test_nonzero():\n def verify_nonzero(indata, outdata, dtype):\n node = helper.make_node(\n \"NonZero\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"nonzero_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.INT64, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.INT64, list(outdata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"nonzero_test\")\n\n verify_with_ort_with_inputs(model, [indata], dtype=\"int64\", use_vm=True, opset=9)\n\n input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)\n result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 1], [0, 0, 1]]\n verify_nonzero(input_data, result, dtype=np.int64)\n\n input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)\n result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 2, 2], [0, 1, 0, 1]]\n verify_nonzero(input_data, result, dtype=np.int64)\n\n\[email protected]_gpu\ndef test_topk():\n def verify_topk(input_dims, K, axis=-1):\n output_dims = list(input_dims)\n output_dims[axis] = K\n\n node = helper.make_node(\n \"TopK\", inputs=[\"X\", \"K\"], outputs=[\"Values\", \"Indicies\"], axis=axis\n )\n\n graph = helper.make_graph(\n [node],\n \"topk_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(input_dims)),\n helper.make_tensor_value_info(\n \"K\",\n TensorProto.INT64,\n [\n 1,\n ],\n ),\n ],\n outputs=[\n helper.make_tensor_value_info(\"Values\", TensorProto.FLOAT, output_dims),\n helper.make_tensor_value_info(\"Indicies\", TensorProto.INT64, output_dims),\n ],\n )\n\n model = helper.make_model(graph, producer_name=\"topk_test\")\n\n indata = np.random.uniform(-10, 10, input_dims).astype(np.float32)\n verify_with_ort_with_inputs(model, [indata, np.array([K])], use_vm=True)\n\n for n in [12, 32]:\n for shape in [[n], [n, n], [n, n, n]]:\n for k in [1, 5, 10]:\n verify_topk(shape, k)\n\n verify_topk([n, n, n], 5, 0)\n verify_topk([n, n, n], 5, 1)\n verify_topk([n, n, n], 5, 2)\n\n\[email protected]_gpu\ndef test_roi_align():\n def verify_roi_align(\n input_dims,\n num_roi,\n output_height,\n output_width,\n sampling_ratio=0,\n spatial_scale=1.0,\n mode=\"avg\",\n ):\n output_dims = [num_roi, input_dims[1], output_height, output_width]\n\n node = helper.make_node(\n \"RoiAlign\",\n inputs=[\"X\", \"rois\", \"batch_indicies\"],\n outputs=[\"Y\"],\n mode=mode,\n output_height=output_height,\n output_width=output_width,\n sampling_ratio=sampling_ratio,\n spatial_scale=spatial_scale,\n )\n\n graph = helper.make_graph(\n [node],\n \"roialign_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(input_dims)),\n helper.make_tensor_value_info(\"rois\", TensorProto.FLOAT, [num_roi, 4]),\n helper.make_tensor_value_info(\n \"batch_indicies\",\n TensorProto.INT64,\n [\n num_roi,\n ],\n ),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, output_dims)],\n )\n\n model = helper.make_model(graph, producer_name=\"roialign_test\")\n\n np_data = np.random.uniform(size=input_dims).astype(\"float32\")\n np_rois = np.random.uniform(size=[num_roi, 4]).astype(\"float32\") * input_dims[2]\n np_batch_indicies = np.random.randint(low=0, high=input_dims[0], size=num_roi)\n\n verify_with_ort_with_inputs(\n model, [np_data, np_rois, np_batch_indicies], out_shape=[output_dims]\n )\n\n verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((4, 4, 16, 32), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 8, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 8, 8), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 16), 16, 5, 7, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 12), 8, 7, 3, sampling_ratio=0, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=0.5)\n verify_roi_align((3, 4, 12, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.5)\n verify_roi_align((5, 4, 16, 14), 32, 7, 7, sampling_ratio=1, spatial_scale=1.0)\n verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=2, spatial_scale=1.0)\n\n # ONNX implementation of roi_align with max mode is incorrect, so we don't compare outputs here.\n\n\[email protected]_gpu\ndef test_non_max_suppression():\n def verify_nms(\n boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims\n ):\n input_names = [\"boxes\", \"scores\", \"max_output_boxes_per_class\", \"iou_threshold\"]\n input_nodes = [\n helper.make_tensor_value_info(\"boxes\", TensorProto.FLOAT, boxes.shape),\n helper.make_tensor_value_info(\"scores\", TensorProto.FLOAT, scores.shape),\n helper.make_tensor_value_info(\n \"max_output_boxes_per_class\", TensorProto.INT64, max_output_boxes_per_class.shape\n ),\n helper.make_tensor_value_info(\"iou_threshold\", TensorProto.FLOAT, iou_threshold.shape),\n ]\n inputs = [boxes, scores, max_output_boxes_per_class, iou_threshold]\n if score_threshold is not None:\n input_names.append(\"score_threshold\")\n input_nodes.append(\n helper.make_tensor_value_info(\n \"score_threshold\", TensorProto.FLOAT, score_threshold.shape\n )\n )\n inputs.append(score_threshold)\n node = helper.make_node(\n \"NonMaxSuppression\",\n inputs=input_names,\n outputs=[\"Y\"],\n center_point_box=0,\n )\n\n graph = helper.make_graph(\n [node],\n \"nms_test\",\n inputs=input_nodes,\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.INT64, output_dims)],\n )\n\n model = helper.make_model(graph, producer_name=\"nms_test\")\n\n verify_with_ort_with_inputs(model, inputs, use_vm=True)\n\n boxes = np.array(\n [\n [\n [0.0, 0.0, 0.3, 0.3],\n [0.0, 0.0, 0.4, 0.4],\n [0.0, 0.0, 0.5, 0.5],\n [0.5, 0.5, 0.9, 0.9],\n [0.5, 0.5, 1.0, 1.0],\n ],\n [\n [0.0, 0.0, 0.3, 0.3],\n [0.0, 0.0, 0.4, 0.4],\n [0.5, 0.5, 0.95, 0.95],\n [0.5, 0.5, 0.96, 0.96],\n [0.5, 0.5, 1.0, 1.0],\n ],\n ]\n ).astype(\"float32\")\n\n scores = np.array(\n [\n [[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],\n [[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],\n ]\n ).astype(\"float32\")\n max_output_boxes_per_class = np.array(2).astype(\"int64\")\n iou_threshold = np.array(0.8).astype(\"float32\")\n output_dims = [8, 3]\n verify_nms(boxes, scores, max_output_boxes_per_class, iou_threshold, None, output_dims)\n\n boxes = np.array(\n [\n [\n [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.1, 1.0, 1.1],\n [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0],\n [0.0, 10.1, 1.0, 11.1],\n [0.0, 100.0, 1.0, 101.0],\n ]\n ]\n ).astype(np.float32)\n scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)\n max_output_boxes_per_class = np.array([3]).astype(np.int64)\n iou_threshold = np.array([0.5]).astype(np.float32)\n score_threshold = np.array([0.4]).astype(np.float32)\n output_dims = [2, 3]\n verify_nms(\n boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims\n )\n\n\ndef verify_cond_loop():\n y_in = helper.make_tensor_value_info(\"y_in\", TensorProto.FLOAT, [1])\n y_out = helper.make_tensor_value_info(\"y_out\", TensorProto.FLOAT, [1])\n scan_out = helper.make_tensor_value_info(\"scan_out\", TensorProto.FLOAT, [1])\n cond_in = helper.make_tensor_value_info(\"cond_in\", TensorProto.BOOL, [])\n cond_out = helper.make_tensor_value_info(\"cond_out\", TensorProto.BOOL, [])\n iter_count = helper.make_tensor_value_info(\"iter_count\", TensorProto.INT64, [])\n\n y = np.array([-2]).astype(np.float32)\n\n five_const_node = helper.make_node(\n \"Constant\",\n inputs=[],\n outputs=[\"five\"],\n value=helper.make_tensor(\n name=\"const_tensor_five\", data_type=TensorProto.FLOAT, dims=(), vals=[5]\n ),\n )\n\n iter_cast_node = helper.make_node(\n \"Cast\", inputs=[\"iter_count\"], outputs=[\"iter_cast\"], to=onnx.TensorProto.FLOAT\n )\n\n y_add_node = helper.make_node(\"Add\", inputs=[\"y_in\", \"iter_cast\"], outputs=[\"y_out\"])\n\n less_node = helper.make_node(\"Less\", inputs=[\"y_out\", \"five\"], outputs=[\"cond_less\"])\n\n squeeze_node = helper.make_node(\"Squeeze\", inputs=[\"cond_less\"], outputs=[\"cond_squeeze\"])\n\n cond_cast_node = helper.make_node(\n \"Cast\", inputs=[\"cond_squeeze\"], outputs=[\"cond_out\"], to=onnx.TensorProto.BOOL\n )\n\n scan_identity_node = helper.make_node(\"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"])\n\n loop_body = helper.make_graph(\n [\n five_const_node,\n iter_cast_node,\n y_add_node,\n less_node,\n squeeze_node,\n cond_cast_node,\n scan_identity_node,\n ],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n )\n\n loop_node = helper.make_node(\n \"Loop\", inputs=[\"trip_count\", \"cond\", \"y\"], outputs=[\"res_y\", \"res_scan\"], body=loop_body\n )\n\n trip_count = np.array(5).astype(np.int64)\n res_y = np.array([13]).astype(np.float32)\n cond = np.array(1).astype(bool)\n loop_graph = onnx.helper.make_graph(\n [loop_node],\n \"loop_outer\",\n inputs=[\n onnx.helper.make_tensor_value_info(\"trip_count\", onnx.TensorProto.INT64, []),\n onnx.helper.make_tensor_value_info(\"cond\", onnx.TensorProto.BOOL, []),\n onnx.helper.make_tensor_value_info(\"y\", onnx.TensorProto.FLOAT, [1]),\n ],\n outputs=[\n onnx.helper.make_tensor_value_info(\"res_y\", onnx.TensorProto.FLOAT, [1]),\n onnx.helper.make_tensor_value_info(\"res_scan\", onnx.TensorProto.FLOAT, [5, 1]),\n ],\n )\n loop_model = onnx.helper.make_model(loop_graph)\n\n # Set a high trip count so that condition trips first.\n trip_count = np.array(40).astype(np.int64)\n cond = np.array(1).astype(bool)\n input_vals = [trip_count, cond, y]\n verify_with_ort_with_inputs(loop_model, input_vals, use_vm=True, freeze_params=True)\n\n\ndef verify_count_loop():\n y_in = helper.make_tensor_value_info(\"y_in\", TensorProto.FLOAT, [])\n y_out = helper.make_tensor_value_info(\"y_out\", TensorProto.FLOAT, [])\n scan_out = helper.make_tensor_value_info(\"scan_out\", TensorProto.FLOAT, [])\n cond_in = helper.make_tensor_value_info(\"cond_in\", TensorProto.BOOL, [])\n cond_out = helper.make_tensor_value_info(\"cond_out\", TensorProto.BOOL, [])\n iter_count = helper.make_tensor_value_info(\"iter_count\", TensorProto.INT64, [])\n\n y = np.array(-2).astype(np.float32)\n\n iter_cast_node = helper.make_node(\n \"Cast\", inputs=[\"iter_count\"], outputs=[\"iter_cast\"], to=onnx.TensorProto.FLOAT\n )\n\n y_add_node = helper.make_node(\"Add\", inputs=[\"y_in\", \"iter_cast\"], outputs=[\"y_out\"])\n\n identity_node = helper.make_node(\"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"])\n\n scan_identity_node = helper.make_node(\"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"])\n\n loop_body = helper.make_graph(\n [identity_node, iter_cast_node, y_add_node, scan_identity_node],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n )\n\n loop_node = helper.make_node(\n \"Loop\", inputs=[\"trip_count\", \"cond\", \"y\"], outputs=[\"res_y\", \"res_scan\"], body=loop_body\n )\n\n trip_count = np.array(5).astype(np.int64)\n res_y = np.array([13]).astype(np.float32)\n cond = np.array(1).astype(bool)\n loop_graph = onnx.helper.make_graph(\n [loop_node],\n \"loop_outer\",\n inputs=[\n onnx.helper.make_tensor_value_info(\"trip_count\", onnx.TensorProto.INT64, []),\n onnx.helper.make_tensor_value_info(\"cond\", onnx.TensorProto.BOOL, []),\n onnx.helper.make_tensor_value_info(\"y\", onnx.TensorProto.FLOAT, []),\n ],\n outputs=[\n onnx.helper.make_tensor_value_info(\"res_y\", onnx.TensorProto.FLOAT, []),\n onnx.helper.make_tensor_value_info(\"res_scan\", onnx.TensorProto.FLOAT, [5]),\n ],\n )\n loop_model = onnx.helper.make_model(loop_graph)\n\n trip_count = np.array(5).astype(np.int64)\n cond = np.array(1).astype(bool)\n input_vals = [trip_count, cond, y]\n verify_with_ort_with_inputs(loop_model, input_vals, use_vm=True, freeze_params=True)\n\n\ndef verify_tensor_loop():\n y_in = helper.make_tensor_value_info(\"y_in\", TensorProto.FLOAT, [3, 3, 3, 3])\n y_out = helper.make_tensor_value_info(\"y_out\", TensorProto.FLOAT, [3, 3, 3, 3])\n scan_out = helper.make_tensor_value_info(\"scan_out\", TensorProto.FLOAT, [3, 3, 3, 3])\n cond_in = helper.make_tensor_value_info(\"cond_in\", TensorProto.BOOL, [])\n cond_out = helper.make_tensor_value_info(\"cond_out\", TensorProto.BOOL, [])\n iter_count = helper.make_tensor_value_info(\"iter_count\", TensorProto.INT64, [])\n\n y = np.random.normal(size=[3, 3, 3, 3]).astype(np.float32)\n\n iter_cast_node = helper.make_node(\n \"Cast\", inputs=[\"iter_count\"], outputs=[\"iter_cast\"], to=onnx.TensorProto.FLOAT\n )\n\n y_add_node = helper.make_node(\"Add\", inputs=[\"y_in\", \"iter_cast\"], outputs=[\"y_out\"])\n\n identity_node = helper.make_node(\"Identity\", inputs=[\"cond_in\"], outputs=[\"cond_out\"])\n\n scan_identity_node = helper.make_node(\"Identity\", inputs=[\"y_out\"], outputs=[\"scan_out\"])\n\n loop_body = helper.make_graph(\n [identity_node, iter_cast_node, y_add_node, scan_identity_node],\n \"loop_body\",\n [iter_count, cond_in, y_in],\n [cond_out, y_out, scan_out],\n )\n\n loop_node = helper.make_node(\n \"Loop\", inputs=[\"trip_count\", \"cond\", \"y\"], outputs=[\"res_y\", \"res_scan\"], body=loop_body\n )\n\n trip_count = np.array(5).astype(np.int64)\n cond = np.array(1).astype(bool)\n loop_graph = onnx.helper.make_graph(\n [loop_node],\n \"loop_outer\",\n inputs=[\n onnx.helper.make_tensor_value_info(\"trip_count\", onnx.TensorProto.INT64, []),\n onnx.helper.make_tensor_value_info(\"cond\", onnx.TensorProto.BOOL, []),\n onnx.helper.make_tensor_value_info(\"y\", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),\n ],\n outputs=[\n onnx.helper.make_tensor_value_info(\"res_y\", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),\n onnx.helper.make_tensor_value_info(\"res_scan\", onnx.TensorProto.FLOAT, [5, 3, 3, 3, 3]),\n ],\n )\n loop_model = onnx.helper.make_model(loop_graph)\n\n trip_count = np.array(5).astype(np.int64)\n cond = np.array(1).astype(bool)\n input_vals = [trip_count, cond, y]\n verify_with_ort_with_inputs(\n loop_model, input_vals, use_vm=True, freeze_params=True, convert_to_static=True\n )\n\n\ndef test_loop():\n # Test a loop that exits once a condition is met.\n verify_cond_loop()\n # Test a loop that exits after a fixed number of iterations with scalar outputs.\n verify_count_loop()\n # Test a loop that uses an array output.\n verify_tensor_loop()\n\n\ndef verify_if(cond_array, num_outputs):\n # Given a bool scalar input cond.\n # return constant tensor x if cond is True, otherwise return constant tensor y.\n\n def append_constant_nodes(nodes, outputs, expected, name):\n outputs.append(onnx.helper.make_tensor_value_info(name, onnx.TensorProto.FLOAT, [5]))\n\n expected.append(np.random.randn(5).astype(\"float32\"))\n\n nodes.append(\n onnx.helper.make_node(\n \"Constant\", inputs=[], outputs=[name], value=numpy_helper.from_array(expected[-1])\n )\n )\n\n if_outputs = []\n graph_outputs = []\n\n then_nodes, then_outs, then_expected = [], [], []\n else_nodes, else_outs, else_expected = [], [], []\n\n for i in range(num_outputs):\n append_constant_nodes(then_nodes, then_outs, then_expected, \"then_out{}\".format(i))\n append_constant_nodes(else_nodes, else_outs, else_expected, \"else_out{}\".format(i))\n\n if_outputs.append(\"res{}\".format(i))\n graph_outputs.append(\n onnx.helper.make_tensor_value_info(\"res{}\".format(i), onnx.TensorProto.FLOAT, [5]),\n )\n\n then_body = onnx.helper.make_graph(then_nodes, \"then_body\", [], then_outs)\n else_body = onnx.helper.make_graph(else_nodes, \"else_body\", [], else_outs)\n\n if_node = onnx.helper.make_node(\n \"If\", inputs=[\"cond\"], outputs=if_outputs, then_branch=then_body, else_branch=else_body\n )\n\n if_graph = onnx.helper.make_graph(\n [if_node],\n \"if_outer\",\n inputs=[\n onnx.helper.make_tensor_value_info(\"cond\", onnx.TensorProto.BOOL, []),\n ],\n outputs=graph_outputs,\n )\n\n if_model = onnx.helper.make_model(if_graph)\n if cond_array:\n cond = np.array([1]).astype(\"bool\")\n else:\n cond = np.array(1).astype(\"bool\")\n correct_out = then_expected if cond else else_expected\n\n # TODO(jwfromm): Onnxruntime 1.0.0 is buggy with If statements. Replace this with\n # verify_with_ort once we update versions.\n for target, dev in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output_with_vm(if_model, [cond], target, dev, freeze_params=True)\n if not isinstance(tvm_out, list):\n tvm_out = [tvm_out]\n for i in range(len(tvm_out)):\n tvm.testing.assert_allclose(correct_out[i], tvm_out[i], rtol=1e-05, atol=1e-05)\n\n\[email protected]_gpu\ndef test_if():\n # Confirm that if works with cond as an array or scalar.\n verify_if(cond_array=False, num_outputs=1)\n verify_if(cond_array=False, num_outputs=2)\n verify_if(cond_array=True, num_outputs=1)\n verify_if(cond_array=True, num_outputs=2)\n\n\[email protected]_gpu\ndef test_size():\n def verify_size(indata):\n node = helper.make_node(\n \"Size\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"size_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.INT64, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.INT64, [])],\n )\n\n model = helper.make_model(graph, producer_name=\"size_test\")\n\n verify_with_ort_with_inputs(model, [indata], dtype=\"int64\", use_vm=True, opset=11)\n\n input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)\n verify_size(input_data)\n\n input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)\n verify_size(input_data)\n\n\[email protected]_gpu\ndef test_maxunpool():\n def verify_maxunpool(data, indices, kernel_shape, strides, output_shape=None, pads=None):\n input_names = [\"xT\", \"xI\"]\n input_info = [\n helper.make_tensor_value_info(\"xT\", TensorProto.FLOAT, list(data.shape)),\n helper.make_tensor_value_info(\"xI\", TensorProto.INT64, list(indices.shape)),\n ]\n input_values = [data, indices]\n if output_shape is not None:\n input_names.append(\"output_shape\")\n input_info.append(\n helper.make_tensor_value_info(\n \"output_shape\", TensorProto.INT64, list(output_shape.shape)\n )\n )\n input_values.append(output_shape)\n else:\n # Compute expected output shape\n output_shape = np.asarray(([1, 1] + list(strides))) * np.asarray(list(data.shape))\n output_shape += np.asarray(([0, 0] + list(kernel_shape))) - np.asarray(\n ([0, 0] + list(strides))\n )\n if pads is not None:\n output_shape -= np.asarray(\n [0, 0] + list(np.sum(np.reshape(list(pads), [-1, 2]), axis=-1))\n )\n output_shape = [int(i) for i in output_shape]\n\n node = helper.make_node(\n \"MaxUnpool\", inputs=input_names, outputs=[\"y\"], kernel_shape=kernel_shape\n )\n\n if pads is not None:\n pad_attr = helper.make_attribute(\"pads\", pads)\n node.attribute.append(pad_attr)\n\n if strides is not None:\n strides_attr = helper.make_attribute(\"strides\", strides)\n node.attribute.append(strides_attr)\n\n graph = helper.make_graph(\n [node],\n \"maxunpool_test\",\n inputs=input_info,\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, output_shape)],\n )\n\n model = helper.make_model(graph, producer_name=\"size_test\")\n\n verify_with_ort_with_inputs(model, input_values, use_vm=True, opset=11)\n\n # Basic test\n xT = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)\n xI = np.array([[[[0, 7], [13, 15]]]], dtype=np.int64)\n verify_maxunpool(xT, xI, [2, 2], strides=[2, 2])\n # Small stride\n verify_maxunpool(xT, xI, [2, 2], strides=[1, 1])\n # Big kernel\n verify_maxunpool(xT, xI, [3, 3], strides=[2, 2])\n # With output shape\n output_shape = np.array((1, 1, 5, 5), dtype=np.int64)\n verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], output_shape=output_shape)\n # With explicit reverse padding\n pads = np.asarray([1, 1, 1, 1]).astype(np.int64)\n verify_maxunpool(xT, xI, [2, 2], strides=[2, 2], pads=pads)\n\n\[email protected]_gpu\ndef test_softplus():\n def verify_softplus(indata):\n node = helper.make_node(\n \"Softplus\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"softplus_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(indata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"softplus_test\")\n\n verify_with_ort_with_inputs(model, [indata], dtype=\"float32\", use_vm=True, opset=11)\n\n # Simple case with all signs.\n input_data = np.array([[-1, 0, 1]], dtype=np.float32)\n verify_softplus(input_data)\n # More fancy case.\n input_data = np.random.randn(1, 32, 32, 3).astype(\"float32\")\n verify_softplus(input_data)\n\n\[email protected]_gpu\ndef test_cumsum():\n def verify_cumsum(indata, axis, exclusive=0, reverse=0, type=\"float32\"):\n cumsum_node = onnx.helper.make_node(\n \"CumSum\",\n inputs=[\"X\", \"axis\"],\n outputs=[\"Y\"],\n )\n if exclusive != 0:\n exclusive_attr = helper.make_attribute(\"exclusive\", exclusive)\n cumsum_node.attribute.append(exclusive_attr)\n if reverse != 0:\n reverse_attr = helper.make_attribute(\"reverse\", reverse)\n cumsum_node.attribute.append(reverse_attr)\n nodes = [\n make_constant_node(\"axis\", onnx.TensorProto.INT32, [1], [axis]),\n cumsum_node,\n ]\n if type == \"float32\":\n tensor_type = TensorProto.FLOAT\n else:\n tensor_type = TensorProto.INT32\n type = \"int32\"\n\n graph = helper.make_graph(\n nodes,\n \"cumsum_test\",\n inputs=[\n helper.make_tensor_value_info(\"X\", tensor_type, list(indata.shape)),\n ],\n outputs=[helper.make_tensor_value_info(\"Y\", tensor_type, list(indata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"cumsum_test\")\n\n verify_with_ort_with_inputs(model, [indata], dtype=type, use_vm=True, opset=11)\n\n data = (\n np.array(\n [\n 1.0,\n 2.0,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 8.0,\n 9.0,\n 10.0,\n 11.0,\n 12.0,\n ]\n )\n .astype(np.float32)\n .reshape((3, 4))\n )\n\n verify_cumsum(data, 0)\n verify_cumsum(data, 1)\n verify_cumsum(data, 0, 1, 0)\n verify_cumsum(data, 1, 1, 0)\n verify_cumsum(data, 0, 0, 1)\n verify_cumsum(data, 1, 0, 1)\n verify_cumsum(data, 1, 1, 1)\n data = np.random.randn(1, 32, 32, 3).astype(\"float32\")\n verify_cumsum(data, 1)\n data = np.random.randn(1, 32, 32, 3).astype(\"int32\")\n verify_cumsum(data, 0, type=\"int32\")\n verify_cumsum(data, 1, type=\"int32\")\n verify_cumsum(data, 0, 1, 0, type=\"int32\")\n verify_cumsum(data, 1, 1, 0, type=\"int32\")\n verify_cumsum(data, 0, 0, 1, type=\"int32\")\n verify_cumsum(data, 1, 0, 1, type=\"int32\")\n verify_cumsum(data, 1, 1, 1, type=\"int32\")\n\n\[email protected]_gpu\ndef test_eyelike():\n def verify_eyelike(indata):\n node = helper.make_node(\n \"EyeLike\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"eyelike_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list(indata.shape))],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list(indata.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"eyelike_test\")\n\n verify_with_ort_with_inputs(model, [indata], dtype=\"float32\", opset=9)\n\n input_data = np.zeros((5, 5), dtype=np.float32)\n verify_eyelike(input_data)\n\n\n\"\"\"\n The following parameterized tests loads the tests that ONNX ships as\n serialized ONNX files, inputs, and outputs. The goal of this test\n is to ensure the ONNX importer is in line with the ONNX specification.\n To allow these tests to run in CI before all pass, a number of tests that\n are not yet supported are skipped.\n\"\"\"\n\nfrom onnx import numpy_helper\n\nf = onnx.__file__\nimport glob\n\nonnx_test_folders = sorted(glob.glob(\"/\".join(f.split(\"/\")[0:-1]) + \"/backend/test/data/node/*/\"))\n\nunsupported_onnx_tests = [\n \"test_cast_DOUBLE_to_FLOAT16/\",\n \"test_cast_FLOAT_to_STRING/\",\n \"test_cast_STRING_to_FLOAT/\",\n \"test_compress_0/\",\n \"test_compress_1/\",\n \"test_compress_default_axis/\",\n \"test_compress_negative_axis/\",\n \"test_convtranspose_dilations/\",\n \"test_convtranspose_output_shape/\",\n \"test_cumsum_1d/\",\n \"test_cumsum_1d_exclusive/\",\n \"test_cumsum_1d_reverse/\",\n \"test_cumsum_1d_reverse_exclusive/\",\n \"test_cumsum_2d_axis_0/\",\n \"test_cumsum_2d_axis_1/\",\n \"test_cumsum_2d_negative_axis/\",\n \"test_det_2d/\",\n \"test_det_nd/\",\n \"test_matmulinteger/\",\n \"test_maxpool_2d_same_lower/\",\n \"test_maxpool_2d_same_upper/\",\n \"test_maxpool_with_argmax_2d_precomputed_pads/\",\n \"test_maxpool_with_argmax_2d_precomputed_strides/\",\n \"test_maxunpool_export_with_output_shape/\",\n \"test_mvn/\",\n \"test_qlinearmatmul_2D/\",\n \"test_qlinearmatmul_3D/\",\n \"test_resize_tf_crop_and_resize/\",\n ## For these three tests, ONNX 1.6.0 has incorrect graphs, they pass with ONNX 1.7.0\n \"test_resize_upsample_sizes_nearest_ceil_half_pixel/\",\n \"test_resize_upsample_sizes_nearest_floor_align_corners/\",\n \"test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric/\",\n \"test_rnn_seq_length/\",\n \"test_round/\",\n \"test_scan9_sum/\",\n \"test_scan_sum/\",\n \"test_simple_rnn_defaults/\",\n \"test_simple_rnn_with_initial_bias/\",\n \"test_strnormalizer_export_monday_casesensintive_lower/\",\n \"test_strnormalizer_export_monday_casesensintive_nochangecase/\",\n \"test_strnormalizer_export_monday_casesensintive_upper/\",\n \"test_strnormalizer_export_monday_empty_output/\",\n \"test_strnormalizer_export_monday_insensintive_upper_twodim/\",\n \"test_strnormalizer_nostopwords_nochangecase/\",\n \"test_tfidfvectorizer_tf_batch_onlybigrams_skip0/\",\n \"test_tfidfvectorizer_tf_batch_onlybigrams_skip5/\",\n \"test_tfidfvectorizer_tf_batch_uniandbigrams_skip5/\",\n \"test_tfidfvectorizer_tf_only_bigrams_skip0/\",\n \"test_tfidfvectorizer_tf_onlybigrams_levelempty/\",\n \"test_tfidfvectorizer_tf_onlybigrams_skip5/\",\n \"test_tfidfvectorizer_tf_uniandbigrams_skip5/\",\n \"test_unique_sorted_with_axis/\",\n \"test_unique_sorted_with_axis_3d/\",\n \"test_unique_sorted_with_negative_axis/\",\n \"test_upsample_nearest/\",\n]\n\n\ntargets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n\ntarget_skips = {\n \"cuda\": [\n \"test_mod_mixed_sign_float16/\",\n \"test_qlinearconv/\",\n \"test_resize_upsample_sizes_nearest/\",\n ]\n}\n\n\[email protected](\"target\", targets)\[email protected](\"test\", onnx_test_folders)\ndef test_onnx_nodes(test, target):\n if target in target_skips:\n for failure in target_skips[target]:\n if failure in test:\n pytest.skip()\n break\n for failure in unsupported_onnx_tests:\n if failure in test:\n pytest.skip()\n break\n atol = 1e-5\n rtol = 1e-5\n if \"roialign\" in test:\n # for some reason the ONNX test crops the\n # roialign results to 4 decimal places\n atol = 1e-4\n onnx_model = onnx.load(test + \"/model.onnx\")\n inputs = []\n outputs = []\n for dataset in glob.glob(test + \"/*/\"):\n tensors = sorted(glob.glob(dataset + \"/*.pb\"))\n for tensor in tensors:\n new_tensor = onnx.TensorProto()\n with open(tensor, \"rb\") as f:\n new_tensor.ParseFromString(f.read())\n if \"input\" in tensor.split(\"/\")[-1]:\n inputs.append(numpy_helper.to_array(new_tensor))\n elif \"output\" in tensor.split(\"/\")[-1]:\n outputs.append(numpy_helper.to_array(new_tensor))\n else:\n raise ImportError(str(tensor) + \" not labeled as an import or an output\")\n\n dev = tvm.device(target, 0)\n tvm_val = get_tvm_output_with_vm(onnx_model, inputs, target, dev)\n if len(outputs) == 1:\n tvm.testing.assert_allclose(outputs[0], tvm_val, rtol=rtol, atol=atol)\n else:\n for output, val in zip(outputs, tvm_val):\n tvm.testing.assert_allclose(output, val, rtol=rtol, atol=atol)\n\n\ndef test_wrong_input():\n node = helper.make_node(\n \"Softplus\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n )\n\n graph = helper.make_graph(\n [node],\n \"softplus_test\",\n inputs=[helper.make_tensor_value_info(\"X\", TensorProto.FLOAT, list([5]))],\n outputs=[helper.make_tensor_value_info(\"Y\", TensorProto.FLOAT, list([5]))],\n )\n model = helper.make_model(graph, producer_name=\"softplus_test\")\n\n # Check that the graph can import correctly with proper shape definitions.\n correct_shape_dict = {\"X\": [5]}\n relay.frontend.from_onnx(model, shape=correct_shape_dict)\n\n # Check that an assertion is triggered when an input not in the graph is provided.\n wrong_shape_dict = {\"Z\": [5]}\n with pytest.raises(AssertionError):\n relay.frontend.from_onnx(model, shape=wrong_shape_dict)\n\n\ndef test_aten():\n torch.set_grad_enabled(False)\n\n def _convert_to_onnx(model, inputs):\n file_name = \"{}.onnx\".format(\"aten_model\")\n torch.onnx.export(\n model,\n inputs,\n file_name,\n export_params=True,\n verbose=False,\n opset_version=10,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,\n )\n onnx_model = onnx.load(file_name)\n assert 's: \"embedding_bag\"' in str(onnx_model)\n return onnx_model\n\n def verify_embedding_bag(num_embedding, embedding_dim, data_shape, num_bags=None):\n dummy_data = torch.randint(0, num_embedding - 1, data_shape)\n tvm_inputs = [dummy_data.numpy()]\n model = torch.nn.EmbeddingBag(num_embedding, embedding_dim)\n onnx_model = _convert_to_onnx(model, dummy_data)\n torch_out = model(dummy_data)\n for target, ctx in tvm.testing.enabled_targets():\n tvm_out = get_tvm_output_with_vm(\n onnx_model, tvm_inputs, target, ctx, freeze_params=True, convert_to_static=True\n )\n tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)\n\n verify_embedding_bag(10, 3, [2, 10])\n verify_embedding_bag(32, 2, [3, 3])\n\n\ndef verify_reverse_sequence(x, sequence_lens, batch_axis, time_axis):\n node = onnx.helper.make_node(\n \"ReverseSequence\",\n inputs=[\"x\", \"sequence_lens\"],\n outputs=[\"y\"],\n time_axis=time_axis,\n batch_axis=batch_axis,\n )\n\n graph = helper.make_graph(\n [node],\n \"reverse_sequence_test\",\n inputs=[\n helper.make_tensor_value_info(\"x\", TensorProto.FLOAT, list(x.shape)),\n helper.make_tensor_value_info(\n \"sequence_lens\", TensorProto.INT64, list(sequence_lens.shape)\n ),\n ],\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.FLOAT, list(x.shape))],\n )\n\n model = helper.make_model(graph, producer_name=\"reverse_sequence_test\")\n verify_with_ort_with_inputs(model, [x, sequence_lens], [x.shape])\n\n\[email protected]_gpu\ndef test_reverse_sequence():\n x = np.array(\n [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]],\n dtype=np.float32,\n )\n sequence_lens = np.array([1, 2, 3, 4], dtype=np.int64)\n verify_reverse_sequence(x, sequence_lens, 0, 1)\n\n sequence_lens = np.array([4, 3, 2, 1], dtype=np.int64)\n verify_reverse_sequence(x, sequence_lens, 1, 0)\n\n\ndef verify_qlinearconv(\n x_shape,\n w_shape,\n y_shape,\n padding,\n kernel_shape,\n strides,\n dilations,\n auto_pad=\"NOTSET\",\n bias=False,\n):\n\n x_array = np.random.randint(low=0, high=255, size=x_shape).astype(\"uint8\")\n w_array = np.random.uniform(low=0, high=255, size=w_shape).astype(\"uint8\")\n\n initializer = [\n helper.make_tensor(\"x_scale\", TensorProto.FLOAT, (), [np.random.rand()]),\n helper.make_tensor(\"x_zero_point\", TensorProto.UINT8, (), [np.random.randint(0, 255)]),\n helper.make_tensor(\"w_scale\", TensorProto.FLOAT, (), [np.random.rand()]),\n helper.make_tensor(\"w_zero_point\", TensorProto.UINT8, (), [np.random.randint(0, 255)]),\n helper.make_tensor(\"y_scale\", TensorProto.FLOAT, (), [np.random.rand()]),\n helper.make_tensor(\"y_zero_point\", TensorProto.UINT8, (), [np.random.randint(0, 255)]),\n ]\n\n input_nodes = [\n helper.make_tensor_value_info(\"x\", TensorProto.UINT8, list(x_shape)),\n helper.make_tensor_value_info(\"w\", TensorProto.UINT8, list(w_shape)),\n ]\n input_names = [\n \"x\",\n \"x_scale\",\n \"x_zero_point\",\n \"w\",\n \"w_scale\",\n \"w_zero_point\",\n \"y_scale\",\n \"y_zero_point\",\n ]\n input_values = [x_array, w_array]\n\n if bias is True:\n b_shape = w_shape[0:1]\n b_array = np.random.randint(low=0, high=65536, size=b_shape).astype(\"int32\")\n input_nodes.append(helper.make_tensor_value_info(\"B\", TensorProto.INT32, list(b_shape)))\n input_names.append(\"B\")\n input_values.append(b_array)\n\n if padding is None:\n ## autopadding with unset default attributes\n kwargs = {}\n if not all([s == 1 for s in strides]):\n kwargs[\"strides\"] = strides\n if not all([d == 1 for d in dilations]):\n kwargs[\"dilations\"] = dilations\n\n node = helper.make_node(\n \"QLinearConv\",\n inputs=input_names,\n outputs=[\"y\"],\n # Default values for other attributes:\n auto_pad=auto_pad,\n **kwargs,\n )\n else:\n node = helper.make_node(\n \"QLinearConv\",\n inputs=input_names,\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n # groups=1\n pads=padding,\n )\n\n graph = helper.make_graph(\n [node],\n \"conv_test\",\n inputs=input_nodes,\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.UINT8, list(y_shape))],\n initializer=initializer,\n )\n model = helper.make_model(graph, producer_name=\"qlinearconv_test\")\n # opt_level=1 will cause error\n verify_with_ort_with_inputs(model, input_values, opt_level=2)\n\n\ndef test_qlinearconv():\n def repeat(N, D):\n return tuple([N for _ in range(D)])\n\n # only support QLinearConv2d because only support qnn.conv2d\n D = 2\n\n # Convolution with padding\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n\n # Convolution with bias\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n bias=True,\n )\n\n # Convolution with assymetric padding\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(4, D),\n repeat(0, D) + repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution without padding\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution with autopadding\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with valid autopadding\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"VALID\",\n )\n # Convolution with non uniform stride\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(2, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with dilation\n verify_qlinearconv(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(2, D),\n repeat(3, D),\n repeat(1, D),\n repeat(2, D),\n )\n\n\ndef verify_qlinearadd(a_shape, b_shape, c_shape):\n\n a_array = np.random.random(a_shape).astype(\"float32\")\n b_array = np.random.random(b_shape).astype(\"float32\")\n\n input_nodes = [\n helper.make_tensor_value_info(\"a\", TensorProto.FLOAT, list(a_shape)),\n helper.make_tensor_value_info(\"b\", TensorProto.FLOAT, list(b_shape)),\n ]\n input_names = [\n \"a\",\n \"b\",\n ]\n input_values = [a_array, b_array]\n\n node = helper.make_node(\"QLinearAdd\", inputs=input_names, outputs=[\"C\"])\n\n node = helper.make_node(\"Add\", [\"a\", \"b\"], [\"C\"])\n graph = helper.make_graph(\n [node],\n \"qlinearadd_test\",\n inputs=input_nodes,\n outputs=[helper.make_tensor_value_info(\"C\", TensorProto.FLOAT, list(c_shape))],\n )\n model = helper.make_model(graph, producer_name=\"qlinearconv_test\")\n from onnxruntime.quantization import quantize_static, CalibrationDataReader, QuantType\n\n class RandomDataReader(CalibrationDataReader):\n def __init__(self, n=10):\n self.data = iter(\n [\n {\n \"a\": np.random.random(a_shape).astype(\"float32\"),\n \"b\": np.random.random(b_shape).astype(\"float32\"),\n }\n for _ in range(n)\n ]\n )\n\n def get_next(self):\n return next(self.data, None)\n\n d = tvm.contrib.utils.tempdir()\n model_fp32 = os.path.join(d.temp_dir, \"model.onnx\")\n onnx.save_model(model, model_fp32)\n model_quant = os.path.join(d.temp_dir, \"model.quant.onnx\")\n quantized_model = quantize_static(model_fp32, model_quant, RandomDataReader())\n # opt_level=1 will cause error with qnn lowering\n model = onnx.load(model_quant)\n verify_with_ort_with_inputs(model, input_values, opt_level=2)\n\n\ndef test_qlinearadd():\n verify_qlinearadd([4, 2], [4, 2], [4, 2])\n verify_qlinearadd([4, 2], [2], [4, 2])\n verify_qlinearadd([5, 1, 7], [2, 7], [5, 2, 7])\n\n\ndef get_random_uniform(shape, dtype=\"float32\", high=1.0, low=0.0, seed=None, target=\"llvm\"):\n ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]\n node = helper.make_node(\n \"RandomUniform\", [], [\"out\"], shape=shape, dtype=ONNX_DTYPE, high=high, low=low\n )\n if seed is not None:\n seed_attr = helper.make_attribute(\"seed\", seed)\n node.attribute.append(seed_attr)\n\n graph = helper.make_graph(\n [node],\n \"random_uniform_test\",\n inputs=[],\n outputs=[helper.make_tensor_value_info(\"out\", ONNX_DTYPE, shape)],\n )\n model = helper.make_model(graph, producer_name=\"random_uniform_test\")\n return get_tvm_output_with_vm(model, [], target=target, device=tvm.device(target, 0))\n\n\ndef test_random_uniform():\n targets = [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n for target in targets:\n # Check that function runs and produces proper shape.\n vals = get_random_uniform([10], dtype=\"float32\", target=target)\n assert list(vals.shape) == [10]\n assert vals.dtype == \"float32\"\n\n # Test N-D tensor generation.\n vals = get_random_uniform([1, 3, 100, 100], dtype=\"float32\", target=target)\n assert list(vals.shape) == [1, 3, 100, 100]\n\n # Check that bounds aren't exceeded.\n vals = get_random_uniform(shape=[100], high=100, low=-100)\n assert list(vals.shape) == [100]\n assert all(vals >= -100) and all(vals <= 100)\n\n # Check that a fixed seed produces the same values when run twice.\n vals_1 = get_random_uniform(shape=[10], seed=1)\n vals_2 = get_random_uniform(shape=[10], seed=1)\n assert all(vals_1 == vals_2)\n\n # Test against an expected output with a fixed seed.\n real = get_random_uniform(shape=[10], seed=5)\n expected = np.asarray(\n [\n 0.8614111,\n 0.46572232,\n 0.6007328,\n 0.21619737,\n 0.6361222,\n 0.7298056,\n 0.13094282,\n 0.03556716,\n 0.32997167,\n 0.2977605,\n ]\n )\n tvm.testing.assert_allclose(real, expected, rtol=1e-5)\n\n\ndef verify_convinteger(\n x_shape,\n w_shape,\n y_shape,\n padding,\n kernel_shape,\n strides,\n dilations,\n auto_pad=\"NOTSET\",\n dtype=\"uint8\",\n):\n\n x_array = np.random.randint(low=0, high=255, size=x_shape).astype(dtype)\n w_array = np.random.uniform(low=0, high=255, size=w_shape).astype(dtype)\n x_zero_point_array = np.random.randint(0, 255, size=[]).astype(dtype)\n w_zero_point_array = np.random.randint(0, 255, size=[]).astype(dtype)\n\n ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]\n input_nodes = [\n helper.make_tensor_value_info(\"x\", ONNX_DTYPE, list(x_shape)),\n helper.make_tensor_value_info(\"w\", ONNX_DTYPE, list(w_shape)),\n helper.make_tensor_value_info(\"x_zero_point\", ONNX_DTYPE, []),\n helper.make_tensor_value_info(\"w_zero_point\", ONNX_DTYPE, []),\n ]\n input_names = [\n \"x\",\n \"w\",\n \"x_zero_point\",\n \"w_zero_point\",\n ]\n input_values = [x_array, w_array, x_zero_point_array, w_zero_point_array]\n\n if padding is None:\n ## autopadding with unset default attributes\n kwargs = {}\n if not all([s == 1 for s in strides]):\n kwargs[\"strides\"] = strides\n if not all([d == 1 for d in dilations]):\n kwargs[\"dilations\"] = dilations\n\n node = helper.make_node(\n \"ConvInteger\",\n inputs=input_names,\n outputs=[\"y\"],\n # Default values for other attributes:\n auto_pad=auto_pad,\n **kwargs,\n )\n else:\n node = helper.make_node(\n \"ConvInteger\",\n inputs=input_names,\n outputs=[\"y\"],\n kernel_shape=kernel_shape,\n # Default values for other attributes:\n strides=strides,\n dilations=dilations,\n # groups=1\n pads=padding,\n )\n\n graph = helper.make_graph(\n [node],\n \"convinteger_test\",\n inputs=input_nodes,\n outputs=[helper.make_tensor_value_info(\"y\", TensorProto.INT32, list(y_shape))],\n )\n model = helper.make_model(graph, producer_name=\"convinteger_test\")\n # opt_level=1 will cause error\n verify_with_ort_with_inputs(model, input_values, opt_level=2)\n\n\ndef test_convinteger():\n def repeat(N, D):\n return tuple([N for _ in range(D)])\n\n # only support 2D ConvInteger because we only support qnn.conv2d for now.\n D = 2\n\n # Convolution with padding\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n\n # Convolution with asymmetric padding\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(4, D),\n repeat(0, D) + repeat(1, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution without padding\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n 2 * repeat(0, D),\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n )\n # Convolution with autopadding\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with valid autopadding\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(1, D),\n repeat(1, D),\n auto_pad=\"VALID\",\n )\n # Convolution with non uniform stride\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(3, D),\n None,\n repeat(3, D),\n repeat(2, D),\n repeat(1, D),\n auto_pad=\"SAME_UPPER\",\n )\n # Convolution with dilation\n verify_convinteger(\n (1, 1) + repeat(5, D),\n (1, 1) + repeat(3, D),\n (1, 1) + repeat(5, D),\n 2 * repeat(2, D),\n repeat(3, D),\n repeat(1, D),\n repeat(2, D),\n )\n\n\nif __name__ == \"__main__\":\n test_flatten()\n test_reshape()\n test_shape()\n test_expand()\n test_power()\n test_squeeze()\n test_unsqueeze()\n test_slice()\n test_floor()\n test_ceil()\n test_round()\n test_isinf()\n test_isnan()\n test_clip()\n test_clip_min_max_as_inputs()\n test_onehot()\n test_gemm()\n test_matmul()\n test_gather()\n test_gatherelements()\n test_gather_nd()\n test_scatter()\n test_lrn()\n test_instance_norm()\n test_upsample()\n test_forward_min()\n test_forward_max()\n test_forward_mean()\n test_forward_hardsigmoid()\n test_forward_arg_min_max()\n test_softmax()\n test_constantofshape()\n test_all_reduce_funcs()\n test_pad()\n test_split()\n test_binary_ops()\n test_unary_ops()\n test_leaky_relu()\n test_elu()\n test_selu()\n test_prelu()\n test_ThresholdedRelu()\n test_LogSoftmax()\n test_resnet()\n test_inception()\n test_densenet()\n test_sign()\n test_not()\n test_and()\n test_tile()\n test_erf()\n test_where()\n test_or()\n test_depth_to_space()\n test_space_to_depth()\n test_batch_norm()\n test_batch_norm_dynamic_subgraph()\n test_conv()\n test_convtranspose()\n test_unsqueeze_constant()\n test_pooling()\n test_lppool()\n test_lstm()\n test_gru()\n test_resize()\n test_nonzero()\n test_topk()\n test_mod()\n test_xor()\n test_max_roi_pool()\n test_roi_align()\n test_range()\n test_loop()\n test_size()\n test_maxunpool()\n test_softplus()\n test_cumsum()\n test_wrong_input()\n test_aten()\n test_reverse_sequence()\n test_eyelike()\n test_qlinearconv()\n test_random_uniform()\n test_convinteger()\n test_batch_matmul()\n"
] |
[
[
"numpy.logical_xor",
"torch.randint",
"numpy.take",
"numpy.asarray",
"numpy.dtype",
"torch.set_grad_enabled",
"numpy.random.randn",
"numpy.exp",
"numpy.where",
"scipy.special.softmax",
"torch.nn.EmbeddingBag",
"numpy.random.randint",
"torch.onnx.export",
"numpy.pad",
"numpy.clip",
"torch.randn",
"numpy.eye",
"numpy.arange",
"numpy.matmul",
"numpy.repeat",
"scipy.special.erf",
"numpy.zeros",
"numpy.nonzero",
"numpy.power",
"numpy.random.choice",
"numpy.logical_or",
"torch.nn.Linear",
"numpy.ndim",
"numpy.random.rand",
"numpy.logical_and",
"numpy.array",
"numpy.sum",
"numpy.random.random",
"numpy.tile",
"numpy.ones",
"numpy.sign",
"numpy.random.normal",
"numpy.random.uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
narendrameena/featuerSelectionAssignment
|
[
"d6f3be3953934b46eb59b23e3f66aee1273fa4e4"
] |
[
"comparision.py"
] |
[
"print(__doc__)\n\n\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import linalg\n\nfrom sklearn.linear_model import (RandomizedLasso, lasso_stability_path,\n LassoLarsCV)\nfrom sklearn.feature_selection import f_regression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import auc, precision_recall_curve\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.utils.extmath import pinvh\nfrom sklearn.utils import ConvergenceWarning\nfrom sklearn.datasets import load_svmlight_file\n\n\ndef mutual_incoherence(X_relevant, X_irelevant):\n \"\"\"Mutual incoherence, as defined by formula (26a) of [Wainwright2006].\n \"\"\"\n projector = np.dot(np.dot(X_irelevant.T, X_relevant),\n pinvh(np.dot(X_relevant.T, X_relevant)))\n return np.max(np.abs(projector).sum(axis=1))\n\n\nfor conditioning in (1, 1e-4):\n ###########################################################################\n # Simulate regression data with a correlated design\n n_features = 501\n n_relevant_features = 3\n noise_level = .2\n coef_min = .2\n # The Donoho-Tanner phase transition is around n_samples=25: below we\n # will completely fail to recover in the well-conditioned case\n n_samples = 25\n block_size = n_relevant_features\n\n rng = np.random.RandomState(42)\n #rng = load_svmlight_file(\"leu\")\n print(rng)\n # The coefficients of our model\n coef = np.zeros(n_features)\n coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)\n\n # The correlation of our design: variables correlated by blocs of 3\n corr = np.zeros((n_features, n_features))\n for i in range(0, n_features, block_size):\n corr[i:i + block_size, i:i + block_size] = 1 - conditioning\n corr.flat[::n_features + 1] = 1\n corr = linalg.cholesky(corr)\n\n # Our design\n X = rng.normal(size=(n_samples, n_features))\n X = np.dot(X, corr)\n # Keep [Wainwright2006] (26c) constant\n X[:n_relevant_features] /= np.abs(\n linalg.svdvals(X[:n_relevant_features])).max()\n X = StandardScaler().fit_transform(X.copy())\n\n # The output variable\n y = np.dot(X, coef)\n y /= np.std(y)\n # We scale the added noise as a function of the average correlation\n # between the design and the output variable\n y += noise_level * rng.normal(size=n_samples)\n mi = mutual_incoherence(X[:, :n_relevant_features],\n X[:, n_relevant_features:])\n\n ###########################################################################\n # Plot stability selection path, using a high eps for early stopping\n # of the path, to save computation time\n alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,\n eps=0.05)\n\n plt.figure()\n # We plot the path as a function of alpha/alpha_max to the power 1/3: the\n # power 1/3 scales the path less brutally than the log, and enables to\n # see the progression along the path\n hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')\n hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')\n ymin, ymax = plt.ylim()\n plt.xlabel(r'$(\\alpha / \\alpha_{max})^{1/3}$')\n plt.ylabel('Stability score: proportion of times selected')\n plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)\n plt.axis('tight')\n plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),\n loc='best')\n\n ###########################################################################\n # Plot the estimated stability scores for a given alpha\n\n # Use 6-fold cross-validation rather than the default 3-fold: it leads to\n # a better choice of alpha:\n # Stop the user warnings outputs- they are not necessary for the example\n # as it is specifically set up to be challenging.\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', UserWarning)\n warnings.simplefilter('ignore', ConvergenceWarning)\n lars_cv = LassoLarsCV(cv=6).fit(X, y)\n\n # Run the RandomizedLasso: we use a paths going down to .1*alpha_max\n # to avoid exploring the regime in which very noisy variables enter\n # the model\n alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)\n clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)\n trees = ExtraTreesRegressor(100).fit(X, y)\n # Compare with F-score\n F, _ = f_regression(X, y)\n\n plt.figure()\n for name, score in [('F-test', F),\n ('Stability selection', clf.scores_),\n ('Lasso coefs', np.abs(lars_cv.coef_)),\n ('Trees', trees.feature_importances_),\n ]:\n precision, recall, thresholds = precision_recall_curve(coef != 0,\n score)\n plt.semilogy(np.maximum(score / np.max(score), 1e-4),\n label=\"%s. AUC: %.3f\" % (name, auc(recall, precision)))\n\n plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',\n label=\"Ground truth\")\n plt.xlabel(\"Features\")\n plt.ylabel(\"Score\")\n # Plot only the 100 first coefficients\n plt.xlim(0, 100)\n plt.legend(loc='best')\n plt.title('Feature selection scores - Mutual incoherence: %.1f'\n % mi)\n\nplt.show()"
] |
[
[
"numpy.dot",
"matplotlib.pyplot.legend",
"sklearn.feature_selection.f_regression",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.where",
"sklearn.linear_model.LassoLarsCV",
"sklearn.linear_model.RandomizedLasso",
"sklearn.metrics.precision_recall_curve",
"numpy.std",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"sklearn.ensemble.ExtraTreesRegressor",
"scipy.linalg.svdvals",
"sklearn.metrics.auc",
"matplotlib.pyplot.show",
"numpy.random.RandomState",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"sklearn.linear_model.lasso_stability_path",
"matplotlib.pyplot.xlim",
"scipy.linalg.cholesky",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
hadi-nayebi/SeqEN
|
[
"2885ab322f6b51821ce67a532265a0087668fe12"
] |
[
"SeqEN2/utils/seq_tools.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# by [email protected]\n__version__ = \"0.0.1\"\n\nfrom numpy.random import choice\nfrom torch import (\n Tensor,\n argmax,\n cat,\n diagonal,\n empty,\n eye,\n fliplr,\n index_select,\n mode,\n randperm,\n)\nfrom torch import sum as torch_sum\nfrom torch import tensor\nfrom torch.nn.functional import unfold\n\nfrom SeqEN2.protein.utils import ndx_to_seq\n\n\ndef consensus(output, ndx, device):\n output_length, w = output.shape\n seq_length = output_length + w - 1\n filter_size = min(seq_length - ndx, ndx + 1)\n if filter_size > w:\n filter_size = w\n r_min = max(0, ndx - w + 1)\n r_max = r_min + filter_size\n r_indices = tensor(range(r_min, r_max), device=device)\n c_min = max(0, ndx - output_length + 1)\n c_max = min(ndx, w - 1) + 1\n c_indices = tensor(range(c_min, c_max), device=device)\n sub_result = index_select(index_select(output, 0, r_indices), 1, c_indices)\n val = mode(diagonal(fliplr(fliplr(eye(filter_size, device=device).long()) * sub_result)))\n return val.values.item()\n\n\ndef get_seq(ndx, ndx_windows):\n output_length, w = ndx_windows.shape\n seq_length = output_length + w - 1\n if ndx < output_length:\n return ndx_windows[ndx][0]\n elif ndx < seq_length:\n return ndx_windows[-1][ndx - output_length + 1]\n else:\n raise IndexError(\n f\"index {ndx-output_length+1} is out of bounds for dimension 1 with size {w}\"\n )\n\n # add comment\n\n\ndef get_consensus_seq(seq, device):\n output_length, w = seq.shape\n seq_length = output_length + w - 1\n consensus_seq = empty(seq_length, device=device)\n for i in range(seq_length):\n consensus_seq[i] = consensus(seq, i, device=device)\n return consensus_seq\n\n\ndef consensus_acc(seq, output, w, device):\n output = output_to_ndx(output, w)\n output_length, w = output.shape\n seq_length = output_length + w - 1\n n = 0\n consensus_seq = empty(seq_length, device=device)\n for i in range(seq_length):\n consensus_seq[i] = consensus(output, i, device=device)\n if get_seq(i, seq).item() == consensus_seq[i]:\n n += 1\n return n / seq_length, consensus_seq\n\n\ndef sliding_window(input_vals, w, keys=None):\n assert isinstance(input_vals, Tensor)\n assert input_vals.shape[1] == 1, \"input shape must be (-1, 1)\"\n kernel_size = (input_vals.shape[1], w)\n input_vals = unfold(input_vals.float().T[None, None, :, :], kernel_size=kernel_size)[0].T\n input_ndx = input_vals[:, :w]\n if keys is not None:\n sliced_seq = []\n for item in input_ndx:\n sliced_seq.append(ndx_to_seq(item, keys))\n return sliced_seq\n return input_ndx\n\n\ndef output_to_ndx(output, w):\n return argmax(output, dim=1).reshape((-1, w))\n\n\ndef reconstructor_acc(output, input_ndx):\n return torch_sum(argmax(output, dim=1) == input_ndx.reshape((-1,))) / output.shape[0]\n\n\ndef add_noise(one_hot_input, input_noise, device):\n ndx = randperm(one_hot_input.shape[1])\n size = list(one_hot_input.shape)\n size[-1] = 1\n p = tensor(choice([1, 0], p=[input_noise, 1 - input_noise], size=size)).to(device)\n return (one_hot_input[:, ndx, :] * p) + (one_hot_input * (1 - p))\n\n\ndef slide_window(input_vals, w):\n kernel_size = (input_vals.shape[1], w)\n return unfold(input_vals.float().T[None, None, :, :], kernel_size=kernel_size)[0].T\n\n\ndef split_input_vals(input_vals, input_keys, w):\n target_vals_ss = None\n target_vals_cl = None\n if len(input_keys) == 2:\n if input_keys[1] == \"S\":\n target_vals_ss = input_vals[:, w:].long()\n elif input_keys[1] == \"C\":\n target_vals_cl = input_vals[:, w:].mean(axis=1).reshape((-1, 1))\n target_vals_cl = cat((target_vals_cl, 1 - target_vals_cl), 1).float()\n elif len(input_keys) == 3:\n if input_keys[1] == \"S\":\n target_vals_ss = input_vals[:, w:-w].long()\n elif input_keys[2] == \"S\":\n target_vals_ss = input_vals[:, -w:].long()\n if input_keys[1] == \"C\":\n target_vals_cl = input_vals[:, w:-w].mean(axis=1).reshape((-1, 1))\n target_vals_cl = cat((target_vals_cl, 1 - target_vals_cl), 1).float()\n elif input_keys[2] == \"C\":\n target_vals_cl = input_vals[:, -w:].mean(axis=1).reshape((-1, 1))\n target_vals_cl = cat((target_vals_cl, 1 - target_vals_cl), 1).float()\n return target_vals_ss, target_vals_cl\n\n\ndef continuity_target_right(encoded_output):\n return cat((encoded_output[1:], encoded_output[-1].unsqueeze(0)), 0)\n\n\ndef continuity_target_left(encoded_output):\n return cat((encoded_output[0].unsqueeze(0), encoded_output[:-1]), 0)\n"
] |
[
[
"torch.empty",
"numpy.random.choice",
"torch.randperm",
"torch.cat",
"torch.eye",
"torch.index_select",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gauss314/juanpy
|
[
"5e3f1a747e660786f5f99ea140f238247923b9d5"
] |
[
"juanpy/max_min.py"
] |
[
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport yfinance as yf\nimport datetime \n\ndef trends(ticker, start='2000-01-01', end=None, sensibilidad=None, escala='linear'):\n '''\n Encuentra maximos y mínimos locales dada una sensibilidad (cantidad de velas de la ventana local)\n Gráfico 1: Precios de cierre con mínimos y máximos encontrados\n Grafico 2: Precios de cierre con lineas de soportes y resistencias desde cada máximo y mínimo encontrado\n Gráfico 3: Precios de cierre con zona alcista, bajista o lateral en función de las pendientes de los últimos pares de mínimos y máximos encontrados \n '''\n\n if not end:\n end = datetime.datetime.now().strftime('%Y-%m-%d')\n \n data = yf.download(ticker, auto_adjust=True, start=start, end=end)\n k = sensibilidad if sensibilidad else len(data)//40\n \n r_max = data.Close.rolling(k).max()\n max_back = (data.Close == r_max)\n max_fw = max_back.shift(-k).rolling(k).sum() == 0\n data['loc_max'] = max_back & max_fw\n\n r_min = data.Close.rolling(k).min()\n min_back = (data.Close == r_min)\n min_fw = min_back.shift(-k).rolling(k).sum() == 0\n data['loc_min'] = min_back & min_fw\n\n pmin = data.loc[data['loc_min']==True].Close.pct_change()\n pmax = data.loc[data['loc_max']==True].Close.pct_change()\n vmin = r_min.loc[data['loc_min']==True]\n vmax = r_max.loc[data['loc_max']==True]\n\n r = pd.concat([pmin,pmax,vmin,vmax], axis=1, keys=['pMin','pMax','vMin','vMax'])\n ultima_pmax = r.pMax.fillna(method='ffill')\n ultima_pmin = r.pMin.fillna(method='ffill')\n\n r['tipo'] = 0\n r.loc[(ultima_pmax > 0) & (ultima_pmin > 0),'tipo'] = 1\n r.loc[(ultima_pmax < 0) & (ultima_pmin < 0),'tipo'] = -1\n\n data = pd.concat([data,r], axis=1).fillna(method='ffill')\n fig, ax = plt.subplots(figsize=(15,12), nrows=3)\n \n ax[0].plot(data.Close, 'k', lw=0.5)\n ax[0].plot(data.Close.loc[data.loc_max==True], lw=0, marker='o', markersize=15, alpha=0.5, color='green')\n ax[0].plot(data.Close.loc[data.loc_min==True], lw=0, marker='o', markersize=15, alpha=0.5, color='red')\n\n ax[1].plot(data.Close, 'k', lw=0.5)\n ax[1].plot(data.vMin, 'r--', lw=1.5, alpha=1)\n ax[1].plot(data.vMax, 'g--', lw=1.5, alpha=1)\n \n ax[2].fill_between(data.index,data.Close, where=data.tipo==1, color='tab:green', alpha=0.7)\n ax[2].fill_between(data.index,data.Close, where=data.tipo==-1, color='tab:red', alpha=0.7)\n ax[2].fill_between(data.index,data.Close, where=data.tipo==0, color='gray', alpha=0.2)\n ax[2].plot(data.Close, 'k', lw=0.5, alpha=1)\n\n titulos = [f'Máximos y mínimos locales (ventana {k} velas)','Soportes y Resistencias','Zonas por Tendencia Post-Observación pares de Min y Max']\n for i in range(3):\n ax[i].set_yscale(escala)\n ax[i].set_title(titulos[i], y=0.88, fontsize=16, color='gray')\n\n plt.subplots_adjust(hspace=0)\n return data.dropna()\n\n\n\ndef waves(ticker, start='2000-01-01', end=None, sensibilidad=None, escala='linear'):\n \n if not end:\n end = datetime.datetime.now().strftime('%Y-%m-%d')\n \n data = yf.download(ticker, auto_adjust=True, start=start, end=end)\n k = sensibilidad if sensibilidad else len(data)//40\n\n r_max = data.Close.rolling(k).max()\n max_back = (data.Close == r_max)\n max_fw = max_back.shift(-k).rolling(k).sum() == 0\n data['loc_max'] = max_back & max_fw\n\n r_min = data.Close.rolling(k).min()\n min_back = (data.Close == r_min)\n min_fw = min_back.shift(-k).rolling(k).sum() == 0\n data['loc_min'] = min_back & min_fw\n\n vmin = r_min.loc[data['loc_min']==True]\n vmax = r_max.loc[data['loc_max']==True]\n\n r = pd.concat([vmin,vmax], axis=1, keys=['vMin','vMax'])\n r['fecha'] = r.index\n for idx, row in r.iterrows():\n if (r.loc[idx].vMin>0) & (r.shift().loc[idx].vMin>0):\n fmax = data.loc[(data.index > r.fecha.shift().loc[idx]) & (data.index < row['fecha']) ].Close.idxmax()\n vmax = data.loc[(data.index > r.fecha.shift().loc[idx]) & (data.index < row['fecha']) ].max().Close\n d = pd.DataFrame({'vMax':vmax, 'fecha':fmax}, index=[fmax])\n r = pd.concat([r,d],sort=False)\n if (r.loc[idx].vMax>0) & (r.shift().loc[idx].vMax>0):\n fmin = data.loc[(data.index > r.fecha.shift().loc[idx]) & (data.index < row['fecha']) ].Close.idxmin()\n vmin = data.loc[(data.index > r.fecha.shift().loc[idx]) & (data.index < row['fecha']) ].min().Close\n d = pd.DataFrame({'vMin':vmin, 'fecha':fmin}, index=[fmin])\n r = pd.concat([r,d],sort=False) \n r.sort_index(inplace=True)\n r['valor'] = r[['vMin','vMax']].max(axis=1)\n\n data = pd.concat([data,r], axis=1).fillna(method='ffill')\n fig, ax = plt.subplots(figsize=(15,6), nrows=1)\n \n ax.plot(data.Close, 'k', lw=0.5, alpha=0.2)\n ax.plot(r.vMax, marker='o', markersize=10, alpha=0.5, color='k')\n ax.plot(r.vMin, marker='o', markersize=10, alpha=0.5, color='k')\n ax.plot(r.valor, '--k', lw=1)\n\n ax.set_yscale(escala)\n ax.set_title('Ondas', y=0.88, fontsize=16, color='gray')\n\n plt.subplots_adjust(hspace=0)\n return r\n\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()"
] |
[
[
"pandas.concat",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.subplots_adjust"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fancompute/simphox
|
[
"917673cc3ef8fb54fcbbaaa93b8efdc09a8e3614",
"917673cc3ef8fb54fcbbaaa93b8efdc09a8e3614"
] |
[
"simphox/viz.py",
"tests/circuit_test.py"
] |
[
"import numpy as np\nfrom typing import Dict\n\nimport xarray\nfrom .typing import Tuple, Optional, List\n\ntry:\n HOLOVIEWS_IMPORTED = True\n import holoviews as hv\n from holoviews.streams import Pipe\n from holoviews import opts\n import panel as pn\n from bokeh.models import Range1d, LinearAxis\n from bokeh.models.renderers import GlyphRenderer\n from bokeh.plotting.figure import Figure\nexcept ImportError:\n HOLOVIEWS_IMPORTED = False\n\ntry:\n K3D_IMPORTED = True\n import k3d\n from k3d import Plot\nexcept ImportError:\n K3D_IMPORTED = False\n\nfrom matplotlib import colors as mcolors\n\n\ndef _plot_twinx_bokeh(plot, _):\n \"\"\"Hook to plot data on a secondary (twin) axis on a Holoviews Plot with Bokeh backend.\n\n Args:\n plot: Holoviews plot object to hook for twinx\n\n See Also:\n The code was copied from a comment in https://github.com/holoviz/holoviews/issues/396.\n - http://holoviews.org/user_guide/Customizing_Plots.html#plot-hooks\n - https://docs.bokeh.org/en/latest/docs/user_guide/plotting.html#twin-axes\n\n \"\"\"\n fig: Figure = plot.state\n glyph_first: GlyphRenderer = fig.renderers[0] # will be the original plot\n glyph_last: GlyphRenderer = fig.renderers[-1] # will be the new plot\n right_axis_name = \"twiny\"\n # Create both axes if right axis does not exist\n if right_axis_name not in fig.extra_y_ranges.keys():\n # Recreate primary axis (left)\n y_first_name = glyph_first.glyph.y\n y_first_min = glyph_first.data_source.data[y_first_name].min()\n y_first_max = glyph_first.data_source.data[y_first_name].max()\n y_first_offset = (y_first_max - y_first_min) * 0.1\n fig.y_range = Range1d(\n start=y_first_min - y_first_offset,\n end=y_first_max + y_first_offset\n )\n fig.y_range.name = glyph_first.y_range_name\n # Create secondary axis (right)\n y_last_name = glyph_last.glyph.y\n y_last_min = glyph_last.data_source.data[y_last_name].min()\n y_last_max = glyph_last.data_source.data[y_last_name].max()\n y_last_offset = (y_last_max - y_last_min) * 0.1\n fig.extra_y_ranges = {right_axis_name: Range1d(\n start=y_last_min - y_last_offset,\n end=y_last_max + y_last_offset\n )}\n fig.add_layout(LinearAxis(y_range_name=right_axis_name, axis_label=glyph_last.glyph.y), \"right\")\n # Set right axis for the last glyph added to the figure\n glyph_last.y_range_name = right_axis_name\n\n\ndef get_extent_2d(shape, spacing: Optional[float] = None):\n \"\"\"2D extent\n\n Args:\n shape: shape of the elements to plot\n spacing: spacing between grid points (assumed to be isotropic)\n\n Returns:\n The extent in 2D.\n\n \"\"\"\n return (0, shape[0] * spacing, 0, shape[1] * spacing) if spacing else (0, shape[0], 0, shape[1])\n\n\ndef plot_eps_2d(ax, eps: np.ndarray, spacing: Optional[float] = None, cmap: str = 'gray'):\n \"\"\"Plot eps in 2D\n\n Args:\n ax: Matplotlib axis handle\n eps: epsilon permittivity\n spacing: spacing between grid points (assumed to be isotropic)\n cmap: colormap for field array (we highly recommend RdBu)\n\n \"\"\"\n extent = get_extent_2d(eps.shape, spacing)\n if spacing: # in microns!\n ax.set_ylabel(r'$y$ ($\\mu$m)')\n ax.set_xlabel(r'$x$ ($\\mu$m)')\n ax.imshow(eps.T, cmap=cmap, origin='lower', alpha=1, extent=extent)\n\n\ndef plot_field_2d(ax, field: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,\n cmap: str = 'RdBu', mat_cmap: str = 'gray', alpha: float = 0.8):\n \"\"\"Plot field in 2D\n\n Args:\n ax: Matplotlib axis handle\n field: field to plot\n eps: epsilon permittivity for overlaying field onto materials\n spacing: spacing between grid points (assumed to be isotropic)\n cmap: colormap for field array (we highly recommend RdBu)\n mat_cmap: colormap for eps array (we recommend gray)\n alpha: transparency of the plots to visualize overlay\n\n \"\"\"\n extent = get_extent_2d(field.shape, spacing)\n if spacing: # in microns!\n ax.set_ylabel(r'$y$ ($\\mu$m)')\n ax.set_xlabel(r'$x$ ($\\mu$m)')\n if eps is not None:\n plot_eps_2d(ax, eps, spacing, mat_cmap)\n im_val = field * np.sign(field.flat[np.abs(field).argmax()])\n norm = mcolors.TwoSlopeNorm(vcenter=0, vmin=-im_val.max(), vmax=im_val.max())\n ax.imshow(im_val.T, cmap=cmap, origin='lower', alpha=alpha, extent=extent, norm=norm)\n\n\ndef plot_eps_1d(ax, eps: Optional[np.ndarray], spacing: Optional[float] = None,\n color: str = 'blue', units: str = \"$\\mu$m\", axis_label_rotation: float = 90):\n \"\"\"Plot eps in 1D.\n\n Args:\n ax: Matplotlib axis handle\n eps: epsilon permittivity for overlaying field onto materials\n spacing: spacing between grid points (assumed to be isotropic)\n color: Color to plot the epsilon\n units: Units for plotting (default microns)\n axis_label_rotation: Rotate the axis label in case a plot is made with shared axes.\n\n \"\"\"\n x = np.arange(eps.shape[0]) * spacing\n if spacing:\n ax.set_xlabel(rf'$x$ ({units})')\n ax.set_ylabel(rf'Relative permittivity ($\\epsilon$)', color=color,\n rotation=axis_label_rotation, labelpad=15)\n ax.plot(x, eps, color=color)\n ax.tick_params(axis='y', labelcolor=color)\n\n\ndef plot_field_1d(ax, field: np.ndarray, field_name: str, eps: Optional[np.ndarray] = None,\n spacing: Optional[float] = None, color: str = 'red', eps_color: str = 'blue',\n units: str = \"$\\mu$m\"):\n \"\"\"Plot field in 1D\n\n Args:\n ax: Matplotlib axis handle.\n field: Field to plot.\n field_name: Name of the field being plotted\n spacing: spacing between grid points (assumed to be isotropic).\n color: Color to plot the epsilon\n units: Units for plotting (default microns)\n\n \"\"\"\n x = np.arange(field.shape[0]) * spacing\n if spacing: # in microns!\n ax.set_xlabel(rf'$x$ ({units})')\n ax.set_ylabel(rf'{field_name}', color=color)\n ax.plot(x, field.real, color=color)\n ax.tick_params(axis='y', labelcolor=color)\n if eps is not None:\n ax_eps = ax.twinx()\n plot_eps_1d(ax_eps, eps, spacing, eps_color, units, axis_label_rotation=270)\n\n\ndef hv_field_1d(field: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,\n width: float = 600):\n x = np.arange(field.shape[0]) * spacing\n field = field.squeeze().real / np.max(np.abs(field))\n c1 = hv.Curve((x, (field + 1) / 2), kdims='x', vdims='field').opts(\n width=width, show_grid=True, framewise=True, yaxis='left', ylim=(-1, 1))\n c2 = hv.Curve((x, eps), kdims='x', vdims='eps').opts(width=width, show_grid=True, framewise=True, color='red',\n hooks=[_plot_twinx_bokeh])\n return c1 * c2\n\n\ndef hv_field_2d(field: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,\n cmap: str = 'RdBu', mat_cmap: str = 'gray', alpha: float = 0.2, width: float = 600):\n extent = get_extent_2d(field.squeeze().T.shape, spacing)\n bounds = (extent[0], extent[2], extent[1], extent[3])\n aspect = (extent[3] - extent[2]) / (extent[1] - extent[0])\n field_img = hv.Image(field.squeeze().T.real / np.max(np.abs(field)),\n bounds=bounds, vdims='field').opts(cmap=cmap, aspect=aspect, frame_width=width)\n eps_img = hv.Image(eps.T / np.max(eps), bounds=bounds).opts(cmap=mat_cmap, alpha=alpha, aspect=aspect, frame_width=width)\n return field_img.redim.range(field=(-1, 1)) * eps_img\n\n\ndef hv_power_1d(power: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,\n width: float = 600):\n x = np.arange(power.shape[0]) * spacing\n power = power.squeeze().real / np.max(np.abs(power))\n c1 = hv.Curve((x, power), kdims='x', vdims='field').opts(width=width, show_grid=True, framewise=True,\n yaxis='left', ylim=(-1, 1))\n c2 = hv.Curve((x, eps), kdims='x', vdims='eps').opts(width=width, show_grid=True, framewise=True, color='red',\n hooks=[_plot_twinx_bokeh])\n return c1 * c2\n\n\ndef hv_power_2d(power: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,\n cmap: str = 'hot', mat_cmap: str = 'gray', alpha: float = 0.2, width: float = 600):\n extent = get_extent_2d(power.squeeze().T.shape, spacing)\n bounds = (extent[0], extent[2], extent[1], extent[3])\n aspect = (extent[3] - extent[2]) / (extent[1] - extent[0])\n power_img = hv.Image(power.squeeze().T.real / np.max(np.abs(power)),\n bounds=bounds, vdims='power').opts(cmap=cmap, aspect=aspect, frame_width=width)\n eps_img = hv.Image(eps.T / np.max(eps), bounds=bounds).opts(cmap=mat_cmap, alpha=alpha,\n aspect=aspect, frame_width=width)\n return power_img.redim.range(power=(0, 1)) * eps_img\n\n\ndef plot_power_2d(ax, power: np.ndarray, eps: Optional[np.ndarray] = None, spacing: Optional[float] = None,\n cmap: str = 'hot', mat_cmap: str = 'gray', alpha: float = 0.8):\n \"\"\"Plot the power (computed using Poynting) in 2D\n\n Args:\n ax: Matplotlib axis handle\n power: power array of size (X, Y)\n eps: epsilon for overlay with materials\n spacing: spacing between grid points (assumed to be isotropic)\n cmap: colormap for power array\n mat_cmap: colormap for eps array (we recommend gray)\n alpha: transparency of the plots to visualize overlay\n\n \"\"\"\n extent = get_extent_2d(power.shape, spacing)\n if spacing: # in microns!\n ax.set_ylabel(r'$y$ ($\\mu$m)')\n ax.set_xlabel(r'$x$ ($\\mu$m)')\n if eps is not None:\n plot_eps_2d(ax, eps, spacing, mat_cmap)\n ax.imshow(power.T, cmap=cmap, origin='lower', alpha=alpha, extent=extent)\n\n\ndef plot_power_3d(plot: \"Plot\", power: np.ndarray, eps: Optional[np.ndarray] = None, axis: int = 0,\n spacing: float = 1, color_range: Tuple[float, float] = None, alpha: float = 100,\n samples: float = 1200):\n \"\"\"Plot the 3d power in a notebook given the fields :math:`E` and :math:`H`.\n\n Args:\n plot: K3D plot handle (NOTE: this is for plotting in a Jupyter notebook)\n power: power (either Poynting field of size (3, X, Y, Z) or power of size (X, Y, Z))\n eps: permittivity (if specified, plot with default options)\n axis: pick the correct axis if inputting power in Poynting field\n spacing: spacing between grid points (assumed to be isotropic)\n color_range: color range for visualization (if none, use half maximum value of field)\n alpha: alpha for k3d plot\n samples: samples for k3d plot rendering\n\n Returns:\n\n \"\"\"\n\n if not K3D_IMPORTED:\n raise ImportError(\"Need to install k3d for this function to work.\")\n\n power = power[axis] if power.ndim == 4 else power\n color_range = (0, np.max(power) / 2) if color_range is None else color_range\n\n if eps is not None:\n plot_eps_3d(plot, eps, spacing=spacing) # use defaults for now\n\n power_volume = k3d.volume(\n power.transpose((2, 1, 0)),\n alpha_coef=alpha,\n samples=samples,\n color_range=color_range,\n color_map=(np.array(k3d.colormaps.matplotlib_color_maps.hot).reshape(-1, 4)).astype(np.float32),\n compression_level=8,\n name='power'\n )\n\n bounds = [0, power.shape[0] * spacing, 0, power.shape[1] * spacing, 0, power.shape[2] * spacing]\n power_volume.transform.bounds = bounds\n plot += power_volume\n\n\ndef plot_field_3d(plot: \"Plot\", field: np.ndarray, eps: Optional[np.ndarray] = None, axis: int = 1,\n imag: bool = False, spacing: float = 1,\n alpha: float = 100, samples: float = 1200, color_range: Tuple[float, float] = None):\n \"\"\"\n\n Args:\n plot: K3D plot handle (NOTE: this is for plotting in a Jupyter notebook)\n field: field to plot\n eps: permittivity (if specified, plot with default options)\n axis: pick the correct axis for power in Poynting vector form\n imag: whether to use the imaginary (instead of real) component of the field\n spacing: spacing between grid points (assumed to be isotropic)\n color_range: color range for visualization (if none, use half maximum value of field)\n alpha: alpha for k3d plot\n samples: samples for k3d plot rendering\n\n Returns:\n\n \"\"\"\n\n if not K3D_IMPORTED:\n raise ImportError(\"Need to install k3d for this function to work.\")\n\n field = field[axis] if field.ndim == 4 else field\n field = field.imag if imag else field.real\n color_range = np.asarray((0, np.max(field)) if color_range is None else color_range)\n\n if eps is not None:\n plot_eps_3d(plot, eps, spacing=spacing) # use defaults for now\n\n bounds = [0, field.shape[0] * spacing, 0, field.shape[1] * spacing, 0, field.shape[2] * spacing]\n\n pos_e_volume = k3d.volume(\n volume=field.transpose((2, 1, 0)),\n alpha_coef=alpha,\n samples=samples,\n color_range=color_range,\n color_map=(np.array(k3d.colormaps.matplotlib_color_maps.RdBu).reshape(-1, 4)).astype(np.float32),\n compression_level=8,\n name='pos'\n )\n\n neg_e_volume = k3d.volume(\n volume=-field.transpose((2, 1, 0)),\n alpha_coef=alpha,\n samples=1200,\n color_range=color_range,\n color_map=(np.array(k3d.colormaps.matplotlib_color_maps.RdBu_r).reshape(-1, 4)).astype(np.float32),\n compression_level=8,\n name='neg'\n )\n\n neg_e_volume.transform.bounds = bounds\n pos_e_volume.transform.bounds = bounds\n\n plot += neg_e_volume\n plot += pos_e_volume\n\n\ndef plot_eps_3d(plot: \"Plot\", eps: Optional[np.ndarray] = None, spacing: float = 1,\n color_range: Tuple[float, float] = None, alpha: float = 100, samples: float = 1200):\n \"\"\"\n\n Args:\n plot: K3D plot handle (NOTE: this is for plotting in a Jupyter notebook)\n eps: relative permittivity\n spacing: spacing between grid points (assumed to be isotropic)\n color_range: color range for visualization (if none, use half maximum value of field)\n alpha: alpha for k3d plot\n samples: samples for k3d plot rendering\n\n Returns:\n\n \"\"\"\n\n if not K3D_IMPORTED:\n raise ImportError(\"Need to install k3d for this function to work.\")\n\n color_range = (1, np.max(eps)) if color_range is None else color_range\n\n eps_volume = k3d.volume(\n eps.transpose((2, 1, 0)),\n alpha_coef=alpha,\n samples=samples,\n color_map=(np.array(k3d.colormaps.matplotlib_color_maps.Greens).reshape(-1, 4)).astype(np.float32),\n compression_level=8,\n color_range=color_range,\n name='epsilon'\n )\n\n bounds = [0, eps.shape[0] * spacing, 0, eps.shape[1] * spacing, 0, eps.shape[2] * spacing]\n eps_volume.transform.bounds = bounds\n plot += eps_volume\n\n\ndef scalar_metrics_viz(metric_config: Dict[str, List[str]]):\n if not HOLOVIEWS_IMPORTED:\n raise ImportError(\"Holoviews not imported, cannot visualize\")\n metrics_pipe = {title: Pipe(data=xarray.DataArray(\n data=np.asarray([[] for _ in metric_config[title]]),\n coords={\n 'metric': metric_config[title],\n 'iteration': np.arange(0)\n },\n dims=['metric', 'iteration'],\n name=title\n )) for title in metric_config}\n metrics_dmaps = [\n hv.DynamicMap(lambda data: hv.Dataset(data).to(hv.Curve, kdims=['iteration']).overlay('metric'),\n streams=[metrics_pipe[title]]).opts(opts.Curve(framewise=True, shared_axes=False, title=title))\n for title in metric_config\n ]\n return pn.Row(*metrics_dmaps), metrics_pipe\n",
"from itertools import product, zip_longest\n\nimport numpy as np\nimport pytest\nfrom scipy.stats import unitary_group\n\nfrom simphox.circuit import cascade, vector_unit, rectangular, balanced_tree\nfrom simphox.circuit.vector import _program_vector_unit, tree, hessian_fd, hessian_vector_unit, PhaseStyle\nfrom simphox.utils import random_vector\n\nimport copy\n\nnp.random.seed(0)\n\nN = [2, 4, 7, 10, 15, 16]\n\nRAND_VECS = [random_vector(n, normed=True) for n in N]\nRAND_UNITARIES = [unitary_group.rvs(n) for n in N]\n\n\[email protected](\n \"n, balanced, expected_node_idxs, expected_num_columns, expected_num_top, expected_num_bottom\",\n [\n (6, True, [2, 0, 1, 3, 4], 3, [3, 1, 1, 1, 1], [3, 2, 1, 2, 1]),\n (8, True, [3, 1, 0, 2, 5, 4, 6], 3, [4, 2, 1, 1, 2, 1, 1], [4, 2, 1, 1, 2, 1, 1]),\n (11, True, [4, 1, 0, 2, 3, 7, 5, 6, 8, 9], 4, [5, 2, 1, 1, 1, 3, 1, 1, 1, 1], [6, 3, 1, 2, 1, 3, 2, 1, 2, 1]),\n (6, False, [4, 3, 2, 1, 0], 5, [1, 2, 3, 4, 5], [1, 1, 1, 1, 1]),\n (8, False, [6, 5, 4, 3, 2, 1, 0], 7, [1, 2, 3, 4, 5, 6, 7], [1, 1, 1, 1, 1, 1, 1]),\n (11, False, [9, 8, 7, 6, 5, 4, 3, 2, 1, 0], 10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n ]\n)\ndef test_tree_network(n: int, balanced: bool, expected_node_idxs: np.ndarray, expected_num_columns: np.ndarray,\n expected_num_top: np.ndarray, expected_num_bottom: np.ndarray):\n circuit = tree(n, balanced=balanced)\n np.testing.assert_allclose(circuit.node_idxs, expected_node_idxs)\n np.testing.assert_allclose(circuit.num_columns, expected_num_columns)\n np.testing.assert_allclose(circuit.beta, expected_num_top)\n np.testing.assert_allclose(circuit.alpha, expected_num_bottom)\n\n\[email protected](\n \"v, balanced, phase_style\",\n product(RAND_VECS, [True, False], [PhaseStyle.TOP, PhaseStyle.BOTTOM])\n)\ndef test_vector_configure(v: np.ndarray, balanced: bool, phase_style: PhaseStyle):\n circuit, _ = vector_unit(v, balanced=balanced)\n res = circuit.matrix_fn(use_jax=False)(circuit.params) @ v\n np.testing.assert_allclose(res, np.eye(v.size)[v.size - 1], atol=1e-10)\n\n\[email protected](\n \"u, balanced\",\n product(RAND_UNITARIES, [True, False])\n)\ndef test_unitary_configure(u: np.ndarray, balanced: bool):\n circuit = cascade(u, balanced=balanced)\n np.testing.assert_allclose(circuit.matrix(), u, atol=1e-10)\n\n\[email protected](\n \"u, num_columns\",\n zip_longest(RAND_UNITARIES, [2 * n - 3 for n in N])\n)\ndef test_triangular_columns(u: np.ndarray, num_columns: int):\n circuit = cascade(u, balanced=False)\n np.testing.assert_allclose(circuit.num_columns, num_columns, atol=1e-10)\n\n\[email protected](\n \"u, num_columns\",\n zip_longest(RAND_UNITARIES, [1, 5, 14, 25, 45, 49])\n)\ndef test_cascade_columns(u: np.ndarray, num_columns: int):\n circuit = cascade(u, balanced=True)\n np.testing.assert_allclose(circuit.num_columns, num_columns, atol=1e-10)\n\n\[email protected](\n \"u\", RAND_UNITARIES\n)\ndef test_rectangular(u: np.ndarray):\n circuit = rectangular(u)\n np.testing.assert_allclose(circuit.matrix(), u, atol=1e-10)\n\n\[email protected](\n \"u\", RAND_UNITARIES\n)\ndef test_inverse(u: np.ndarray):\n circuit = rectangular(u)\n np.testing.assert_allclose(circuit.matrix(), circuit.matrix(back=True).T, atol=1e-10)\n\n\[email protected](\n \"u\", RAND_UNITARIES\n)\ndef test_program_null_basis(u: np.ndarray):\n circuit = rectangular(u)\n basis = circuit.nullification_basis\n params = copy.deepcopy(circuit.params)\n circuit.program_by_null_basis(basis)\n for param, param_expected in zip(params, circuit.params):\n np.testing.assert_allclose(param, param_expected, atol=1e-10)\n\n\[email protected](\n \"u\", RAND_UNITARIES\n)\ndef test_error_correction(u: np.ndarray):\n tree = balanced_tree(u, error_mean_std=(0, 0.1))\n x = tree.matrix()[-1]\n tree_corrected = _program_vector_unit(x.conj(), copy.deepcopy(tree))[0]\n np.testing.assert_allclose(np.abs(tree_corrected.matrix_fn(inputs=x.conj())(tree_corrected.params)[-1]), 1)\n\n\[email protected](\n \"u, balanced\", product(RAND_UNITARIES[:3], [True, False])\n)\ndef test_hessian(u: np.ndarray, balanced: bool):\n h = hessian_vector_unit(u[0], balanced=balanced)\n h_fd = hessian_fd(u[0], balanced=balanced)\n np.testing.assert_allclose(h, h_fd, atol=1e-4)\n\n\[email protected](\n \"u, balanced\", product(RAND_UNITARIES[:3], [True, False])\n)\ndef test_hessian_correlated_error(u: np.ndarray, balanced: bool):\n mesh = balanced_tree(u[0])\n vhat = mesh.matrix(params=(mesh.thetas + 0.0001, mesh.phis + 0.0001, mesh.gammas))[-1]\n np.testing.assert_allclose(\n np.sum(hessian_vector_unit(u[0], balanced=True)),\n 2 * np.linalg.norm(u[0] - vhat) ** 2 / 0.0001 ** 2, atol=1e-3\n )\n"
] |
[
[
"numpy.abs",
"numpy.asarray",
"numpy.arange",
"numpy.max",
"numpy.array"
],
[
"numpy.random.seed",
"numpy.eye",
"numpy.linalg.norm",
"numpy.testing.assert_allclose",
"scipy.stats.unitary_group.rvs"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yinanl/rocs
|
[
"bf2483903e39f4c0ea254a9ef56720a1259955ad",
"bf2483903e39f4c0ea254a9ef56720a1259955ad"
] |
[
"examples/Moore-Greitzer/sim_abst.py",
"examples/scara/animation.py"
] |
[
"from os.path import dirname,realpath\nimport numpy as np\nfrom scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport sys\npypath = dirname(dirname(dirname(realpath(__file__)))) + '/python/'\nsys.path.insert(1, pypath)\nimport utils\nfrom odes import MG\n\n\ndirpath = dirname(realpath(__file__))\nctlrfile = \"/controller_abstI_0.00009.h5\"\nspecfile = \"/FGb.txt\"\n# ctlrfile = \"/controller_Gb_abst_0.00018.h5\"\n# specfile = \"/Gb.txt\"\n\n\n# # Set up state space\ngoal_real = np.array([[0.5009, 0.5069], [0.6575, 0.6635]])\nobs = np.array([[0.520, 0.526], [0.658, 0.664]])\n\n\n# # Read the specification file\ndba = utils.read_spec_from_txt(dirpath+specfile)\n\n\n# # Read controller file and problem settings\ntau, X, eta, _, winids, controller = \\\n utils.read_controller_abst_from_h5(dirpath+ctlrfile)\n\n\n# # Compute the percentage of winning set on the state space\nwinset = controller.xgrid[winids, :]\nprint(\"\\nWinning set coverage:\")\nwinper = \"{:.2%}\".format(winids.size/controller.xgrid.shape[0])\nprint(winper)\n\n\n\n# # x-y 2D plot of state space\nfig, ax = plt.subplots()\nax.set_xlim(X[0, 0], X[0, 1])\nax.set_ylim(X[1, 0], X[1, 1])\nrect_goal = patches.Rectangle((goal_real[0, 0], goal_real[1, 0]),\n goal_real[0, 1]-goal_real[0, 0],\n goal_real[1, 1]-goal_real[1, 0],\n linewidth=1.5, edgecolor='g', fill=False)\nrect_obs = patches.Rectangle((obs[0, 0], obs[1, 0]),\n obs[0, 1]-obs[0, 0],\n obs[1, 1]-obs[1, 0],\n linewidth=1, edgecolor='k',\n fill=True, facecolor='k')\nax.add_patch(rect_goal)\nax.add_patch(rect_obs)\n# ax.add_collection(\n# utils.polycoll_grid_array(winset, eta, True, 'palegoldenrod', 0.7)\n# )\n\n\n# # Simulation\nTsim = 20\nx0 = np.array([0.5343, 0.6553]) # for FGb.txt\n# x0 = np.array([0.5056, 0.6595]) # for Gb.txt\n\nrng = np.random.default_rng()\nt = 0\nx = x0\nq = dba.q0\ntsim = []\nxsim = []\nusim = []\nqsim = []\n\nwhile(t < Tsim):\n i = utils.index_in_grid(x, controller.xgrid) # convert x to node id\n\n p5 = controller.nts_ctrlr[controller.encode3[i], :]\n p7 = controller.ctlr[p5[2]:p5[2]+p5[0], :]\n uset = np.argwhere(p7[:, 0] == q).squeeze()\n\n if(uset.size > 1):\n uid = rng.choice(uset, 1) # randomly pick one\n else:\n uid = int(uset)\n u = controller.ugrid[p7[uid, 1], :].squeeze()\n\n # Integrate ode\n sol = solve_ivp(MG, [0, tau], x, method='RK45', args=(u,))\n tt = sol.t[-1]\n y = sol.y[:, -1]\n\n # Update DBA state\n q = controller.q_prime[p5[1]*dba.n_dba+q] # p5[1] is the label/proposition of current x\n\n # Save trajectories\n tsim.append(t)\n xsim.append(x)\n usim.append(u)\n qsim.append(q)\n # Update state\n x = y\n t += tt\n\n\nxsim = np.asarray(xsim)\nax.plot(xsim[:, 0], xsim[:, 1], 'b')\nax.plot(xsim[0, 0], xsim[0, 1], marker='o', markerfacecolor='r')\n\n\nplt.show()\n",
"import numpy as np\nimport re\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport matplotlib.animation as animation\nimport sys\nfrom os.path import dirname, realpath\npypath = dirname(dirname(dirname(realpath(__file__)))) + '/python/'\nsys.path.insert(1, pypath)\nfrom odes import scara\n\n\n# # Load simulated trajectories\ndirpath = dirname(realpath(__file__))\nstatefile = \"traj_gb2\"\ntorqfile = \"torq_gb2\"\nsfile = dirpath + '/' + statefile + \".npy\"\ntfile = dirpath + '/' + torqfile + \".npy\"\n\nthetas = np.load(sfile)\ntorqs = np.load(tfile)\n\n# # Joint space to operation space\nmodel = scara(0.1)\nxy2 = model.theta2xy(thetas[:, 0], thetas[:, 1])\nx1 = model.l1 * np.cos(thetas[:, 0])\ny1 = model.l2 * np.sin(thetas[:, 0])\nx2 = xy2[0, :]\ny2 = xy2[1, :]\n\n\n# # Setup workspace\nFS = 12\n\nfig, ax = plt.subplots()\nax.set_xlim(0, 1.2*(model.l1+model.l2))\nax.set_xlabel(r'$x$')\nax.set_ylim(0, model.l1+model.l2)\nax.set_ylabel(r'$y$')\nax.title\n\n# The bar obstacle\nH = 0.8*model.l1\nr = 0.5*model.l1\n# bar = plt.plot([0, r], [H, H], linewidth=10, color='k')\nbar = patches.Rectangle((0, H), r, 0.01, facecolor='tab:gray',\n hatch='/', zorder=0)\nax.add_patch(bar)\n\n# Goals\nnG = 2\nG = np.zeros(shape=(2, 2, nG))\nG[:, :, 0] = np.array([[0.0277, 0.0597], [0.1852, 0.2134]])\nG[:, :, 1] = np.array([[0.2585, 0.2784], [0.0059, 0.0514]])\nfor i in range(nG):\n ax.add_patch(\n patches.Rectangle((G[0, 0, i], G[1, 0, i]),\n G[0, 1, i]-G[0, 0, i], G[1, 1, i]-G[1, 0, i],\n linewidth=1.5, facecolor='yellowgreen', fill=True,\n zorder=0)\n )\nplt.text((G[0, 0, 0]+G[0, 1, 0])*0.4, (G[1, 1, 0]+G[1, 0, 0])/2,\n r'$g_1$', fontsize=FS)\nplt.text((G[0, 0, 1]+G[0, 1, 1])*0.49, (G[1, 1, 1]+G[1, 0, 1])/2,\n r'$g_2$', fontsize=FS)\n\n# arm1 = lines.Line2D([0, x1[0]], [0, y1[0]],\n# linewidth=3, color='k', alpha=0, zorder=1)\n# arm2 = lines.Line2D([x1[0], x2[0]], [y1[0], y2[0]],\n# linewidth=3, color='k', alpha=0, zorder=1)\n# joint1 = patches.Circle((0, 0), radius=0.005, color='k',\n# fill=True, alpha=1, zorder=2)\n# joint2 = patches.Circle((x1[0], y1[0]), radius=0.005, color='k',\n# fill=True, alpha=0, zorder=2)\n# end = patches.Circle((x2[0], y2[0]), radius=0.005, color='tab:orange',\n# fill=True, alpha=0, zorder=2)\ni = 93\narm1 = lines.Line2D([0, x1[i]], [0, y1[i]],\n linewidth=3, color='k', alpha=1, zorder=1)\narm2 = lines.Line2D([x1[i], x2[i]], [y1[i], y2[i]],\n linewidth=3, color='k', alpha=1, zorder=1)\njoint1 = patches.Circle((0, 0), radius=0.005, color='k',\n fill=True, alpha=1, zorder=2)\njoint2 = patches.Circle((x1[i], y1[i]), radius=0.005, color='k',\n fill=True, alpha=1, zorder=2)\nend = patches.Circle((x2[i], y2[i]), radius=0.005, color='tab:orange',\n fill=True, alpha=1, zorder=2)\nax.add_patch(joint1)\nax.add_patch(joint2)\nax.add_patch(end)\nax.add_artist(arm1)\nax.add_artist(arm2)\n\n\n# # Animation\ntorque_text = ax.text(0.05, 0.95, '', transform=ax.transAxes)\ntorque_template = 'torques=%.3f,%.3f'\n\n\ndef animate(i):\n arm1.set_data([0, x1[i]], [0, y1[i]])\n arm2.set_data([x1[i], x2[i]], [y1[i], y2[i]])\n joint2.center = (x1[i], y1[i])\n end.center = (x2[i], y2[i])\n arm1.set_alpha(1)\n arm2.set_alpha(1)\n joint2.set_alpha(1)\n end.set_alpha(1)\n joint2.set_zorder(10)\n end.set_zorder(10)\n torque_text.set_text(torque_template % (torqs[i, 0], torqs[i, 1]))\n return joint2, end, torque_text, arm1, arm2\n\nani = animation.FuncAnimation(fig, animate, x1.size,\n interval=0.1*500, blit=True)\nWriter = animation.writers['ffmpeg']\nwriter = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)\nani.save(dirpath+'/replay'+'.mp4', writer=writer)\n\n\n# # End-effector trajectory\nax.plot(x2, y2, color='peru')\n\nplt.savefig(dirpath+'/fig_traj-gb2-os.png')\n\nplt.show()\n"
] |
[
[
"numpy.asarray",
"matplotlib.patches.Rectangle",
"scipy.integrate.solve_ivp",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.argwhere",
"numpy.array",
"numpy.random.default_rng"
],
[
"matplotlib.patches.Rectangle",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Circle",
"matplotlib.pyplot.savefig",
"numpy.cos",
"numpy.sin",
"matplotlib.animation.FuncAnimation",
"numpy.load",
"matplotlib.pyplot.text",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cbenitez81/probreg
|
[
"1769af92e06e5e1e7441d9abf9982e94736b4685"
] |
[
"probreg/filterreg.py"
] |
[
"from __future__ import print_function\nfrom __future__ import division\nimport abc\nfrom collections import namedtuple\nimport six\nimport numpy as np\nimport open3d as o3\nfrom . import transformation as tf\nfrom . import gaussian_filtering as gf\nfrom . import gauss_transform as gt\nfrom . import se3_op as so\nfrom . import _kabsch as kabsch\nfrom . import _pt2pl as pt2pl\nfrom . import math_utils as mu\n\n\nEstepResult = namedtuple('EstepResult', ['m0', 'm1', 'm2', 'nx'])\nMstepResult = namedtuple('MstepResult', ['transformation', 'sigma2', 'q'])\n\[email protected]_metaclass(abc.ABCMeta)\nclass FilterReg():\n \"\"\"FilterReg\n FilterReg is similar to CPD, and the speed performance is improved.\n In this algorithm, not only point-to-point alignment but also\n point-to-plane alignment are implemented.\n\n Args:\n source (numpy.ndarray, optional): Source point cloud data.\n target_normals (numpy.ndarray, optional): Normals of target points.\n sigma2 (Float, optional): Variance parameter. If this variable is None,\n the variance is updated in Mstep.\n \"\"\"\n def __init__(self, source=None, target_normals=None,\n sigma2=None):\n self._source = source\n self._target_normals = target_normals\n self._sigma2 = sigma2\n self._update_sigma2 = self._sigma2 is None\n self._tf_type = None\n self._tf_result = None\n self._callbacks = []\n\n def set_source(self, source):\n self._source = source\n\n def set_target_normals(self, target_normals):\n self._target_normals = target_normals\n\n def set_callbacks(self, callbacks):\n self._callbacks = callbacks\n\n def expectation_step(self, t_source, target, sigma2,\n objective_type='pt2pt', alpha=0.015):\n \"\"\"Expectation step\n \"\"\"\n assert t_source.ndim == 2 and target.ndim == 2, \"source and target must have 2 dimensions.\"\n m, ndim = t_source.shape\n n = target.shape[0]\n sigma = np.sqrt(sigma2)\n fx = t_source / sigma\n fy = target / sigma\n zero_m1 = np.zeros((m, 1))\n zeros_md = np.zeros_like(fx)\n dem = np.power(2.0 * np.pi * sigma2, ndim * 0.5)\n fin = np.r_[fx, fy]\n ph = gf.Permutohedral(fin)\n if ph.get_lattice_size() < n * alpha:\n ph = gf.Permutohedral(fin, False)\n vin0 = np.r_[zero_m1, np.ones((n, 1)) / dem]\n vin1 = np.r_[zeros_md, target / dem]\n m0 = ph.filter(vin0, m).flatten()[:m]\n m1 = ph.filter(vin1, m)[:m]\n if self._update_sigma2:\n vin2 = np.r_[zero_m1,\n np.expand_dims(np.square(target).sum(axis=1), axis=1) / dem]\n m2 = ph.filter(vin2, m).flatten()[:m]\n else:\n m2 = None\n if objective_type == 'pt2pt':\n nx = None\n elif objective_type == 'pt2pl':\n vin = np.r_[zeros_md, self._target_normals / dem]\n nx = ph.filter(vin, m)[:m]\n else:\n raise ValueError('Unknown objective_type: %s.' % objective_type)\n return EstepResult(m0, m1, m2, nx)\n\n def maximization_step(self, t_source, target, estep_res, w=0.0,\n objective_type='pt2pt'):\n return self._maximization_step(t_source, target, estep_res,\n self._tf_result, self._sigma2, w,\n objective_type)\n\n @staticmethod\n @abc.abstractmethod\n def _maximization_step(t_source, target, estep_res, sigma2, w=0.0,\n objective_type='pt2pt'):\n return None\n\n def registration(self, target, w=0.0,\n objective_type='pt2pt',\n maxiter=50, tol=0.001):\n assert not self._tf_type is None, \"transformation type is None.\"\n q = None\n if self._update_sigma2:\n self._sigma2 = mu.squared_kernel_sum(self._source, target)\n for _ in range(maxiter):\n t_source = self._tf_result.transform(self._source)\n estep_res = self.expectation_step(t_source, target, self._sigma2, objective_type)\n res = self.maximization_step(t_source, target, estep_res, w=w,\n objective_type=objective_type)\n self._tf_result = res.transformation\n self._sigma2 = res.sigma2\n for c in self._callbacks:\n c(self._tf_result)\n if not q is None and abs(res.q - q) < tol:\n break\n q = res.q\n return res\n\n\nclass RigidFilterReg(FilterReg):\n def __init__(self, source=None, target_normals=None,\n sigma2=None):\n super(RigidFilterReg, self).__init__(source, target_normals, sigma2)\n self._tf_type = tf.RigidTransformation\n self._tf_result = self._tf_type()\n\n @staticmethod\n def _maximization_step(t_source, target, estep_res, trans_p, sigma2, w=0.0,\n objective_type='pt2pt', maxiter=10, tol=1.0e-4):\n m, ndim = t_source.shape\n n = target.shape[0]\n assert ndim == 3, \"ndim must be 3.\"\n m0, m1, m2, nx = estep_res\n tw = np.zeros(ndim * 2)\n c = w / (1.0 - w) * n / m\n m0[m0==0] = np.finfo(np.float32).eps\n m1m0 = np.divide(m1.T, m0).T\n m0m0 = m0 / (m0 + c)\n drxdx = np.sqrt(m0m0 * 1.0 / sigma2)\n if objective_type == 'pt2pt':\n dr, dt = kabsch.kabsch(t_source, m1m0, drxdx)\n rx = np.multiply(drxdx, (t_source - m1m0).T).T.sum(axis=1)\n rot, t = np.dot(dr, trans_p.rot), np.dot(trans_p.t, dr.T) + dt\n q = np.dot(rx.T, rx).sum()\n elif objective_type == 'pt2pl':\n nxm0 = (nx.T / m0).T\n tw, q = pt2pl.compute_twist_for_pt2pl(t_source, m1m0, nxm0, drxdx)\n rot, t = so.twist_mul(tw, trans_p.rot, trans_p.t)\n else:\n raise ValueError('Unknown objective_type: %s.' % objective_type)\n\n if not m2 is None:\n sigma2 = (m0 *(np.square(t_source).sum(axis=1) - 2.0 * (t_source * m1).sum(axis=1) + m2) / (m0 + c)).sum()\n sigma2 /= (3*m0m0.sum())\n return MstepResult(tf.RigidTransformation(rot, t), sigma2, q)\n\n\ndef registration_filterreg(source, target, target_normals=None,\n sigma2=None, objective_type='pt2pt', maxiter=50, tol=0.001,\n callbacks=[], **kargs):\n cv = lambda x: np.asarray(x.points if isinstance(x, o3.PointCloud) else x)\n frg = RigidFilterReg(cv(source), cv(target_normals), sigma2, **kargs)\n frg.set_callbacks(callbacks)\n return frg.registration(cv(target), objective_type=objective_type, maxiter=maxiter, tol=tol)\n"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.sqrt",
"numpy.multiply",
"numpy.power",
"numpy.finfo",
"numpy.ones",
"numpy.zeros_like",
"numpy.zeros",
"numpy.divide"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.