repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
subhashsuman01/fury
[ "13809a10cb4a6753f166ef547ac6a087357189ad" ]
[ "fury/ui.py" ]
[ "from _warnings import warn\n\nimport numpy as np\nimport vtk\nimport os\nimport abc\n\nfrom fury.data import read_viz_icons\nfrom fury.interactor import CustomInteractorStyle\nfrom fury.io import load_image\nfrom fury.utils import set_input, rotate\nfrom fury.actor import grid\n\n\nTWO_PI = 2 * np.pi\n\n\nclass UI(object, metaclass=abc.ABCMeta):\n \"\"\"An umbrella class for all UI elements.\n\n While adding UI elements to the scene, we go over all the sub-elements\n that come with it and add those to the scene automatically.\n\n Attributes\n ----------\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of this\n UI component.\n center : (float, float)\n Absolute coordinates (x, y) of the center of this UI component.\n size : (int, int)\n Width and height in pixels of this UI component.\n on_left_mouse_button_pressed: function\n Callback function for when the left mouse button is pressed.\n on_left_mouse_button_released: function\n Callback function for when the left mouse button is released.\n on_left_mouse_button_clicked: function\n Callback function for when clicked using the left mouse button\n (i.e. pressed -> released).\n on_left_mouse_button_dragged: function\n Callback function for when dragging using the left mouse button.\n on_right_mouse_button_pressed: function\n Callback function for when the right mouse button is pressed.\n on_right_mouse_button_released: function\n Callback function for when the right mouse button is released.\n on_right_mouse_button_clicked: function\n Callback function for when clicking using the right mouse button\n (i.e. pressed -> released).\n on_right_mouse_button_dragged: function\n Callback function for when dragging using the right mouse button.\n on_middle_mouse_button_pressed: function\n Callback function for when the middle mouse button is pressed.\n on_middle_mouse_button_released: function\n Callback function for when the middle mouse button is released.\n on_middle_mouse_button_clicked: function\n Callback function for when clicking using the middle mouse button\n (i.e. pressed -> released).\n on_middle_mouse_button_dragged: function\n Callback function for when dragging using the middle mouse button.\n on_key_press: function\n Callback function for when a keyboard key is pressed.\n\n \"\"\"\n\n def __init__(self, position=(0, 0)):\n \"\"\"Init scene.\n\n Parameters\n ----------\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of this\n UI component.\n\n \"\"\"\n self._scene = object()\n self._position = np.array([0, 0])\n self._callbacks = []\n\n self._setup() # Setup needed actors and sub UI components.\n self.position = position\n\n self.left_button_state = \"released\"\n self.right_button_state = \"released\"\n self.middle_button_state = \"released\"\n\n self.on_left_mouse_button_pressed = lambda i_ren, obj, element: None\n self.on_left_mouse_button_dragged = lambda i_ren, obj, element: None\n self.on_left_mouse_button_released = lambda i_ren, obj, element: None\n self.on_left_mouse_button_clicked = lambda i_ren, obj, element: None\n self.on_right_mouse_button_pressed = lambda i_ren, obj, element: None\n self.on_right_mouse_button_released = lambda i_ren, obj, element: None\n self.on_right_mouse_button_clicked = lambda i_ren, obj, element: None\n self.on_right_mouse_button_dragged = lambda i_ren, obj, element: None\n self.on_middle_mouse_button_pressed = lambda i_ren, obj, element: None\n self.on_middle_mouse_button_released = lambda i_ren, obj, element: None\n self.on_middle_mouse_button_clicked = lambda i_ren, obj, element: None\n self.on_middle_mouse_button_dragged = lambda i_ren, obj, element: None\n self.on_key_press = lambda i_ren, obj, element: None\n\n @abc.abstractmethod\n def _setup(self):\n \"\"\"Set up this UI component.\n\n This is where you should create all your needed actors and sub UI\n components.\n\n \"\"\"\n msg = \"Subclasses of UI must implement `_setup(self)`.\"\n raise NotImplementedError(msg)\n\n @abc.abstractmethod\n def _get_actors(self):\n \"\"\"Get the actors composing this UI component.\"\"\"\n msg = \"Subclasses of UI must implement `_get_actors(self)`.\"\n raise NotImplementedError(msg)\n\n @property\n def actors(self):\n \"\"\"Actors composing this UI component.\"\"\"\n return self._get_actors()\n\n @abc.abstractmethod\n def _add_to_scene(self, _scene):\n \"\"\"Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n _scene : scene\n\n \"\"\"\n msg = \"Subclasses of UI must implement `_add_to_scene(self, scene)`.\"\n raise NotImplementedError(msg)\n\n def add_to_scene(self, scene):\n \"\"\"Allow UI objects to add their own props to the scene.\n\n Parameters\n ----------\n scene : scene\n\n \"\"\"\n self._add_to_scene(scene)\n\n # Get a hold on the current interactor style.\n iren = scene.GetRenderWindow().GetInteractor().GetInteractorStyle()\n\n for callback in self._callbacks:\n if not isinstance(iren, CustomInteractorStyle):\n msg = (\"The ShowManager requires `CustomInteractorStyle` in\"\n \" order to use callbacks.\")\n raise TypeError(msg)\n\n if callback[0] == self._scene:\n\n iren.add_callback(iren, callback[1], callback[2], args=[self])\n else:\n iren.add_callback(*callback, args=[self])\n\n def add_callback(self, prop, event_type, callback, priority=0):\n \"\"\"Add a callback to a specific event for this UI component.\n\n Parameters\n ----------\n prop : vtkProp\n The prop on which is callback is to be added.\n event_type : string\n The event code.\n callback : function\n The callback function.\n priority : int\n Higher number is higher priority.\n\n \"\"\"\n # Actually since we need an interactor style we will add the callback\n # only when this UI component is added to the scene.\n self._callbacks.append((prop, event_type, callback, priority))\n\n @property\n def position(self):\n return self._position\n\n @position.setter\n def position(self, coords):\n coords = np.asarray(coords)\n self._set_position(coords)\n self._position = coords\n\n @abc.abstractmethod\n def _set_position(self, _coords):\n \"\"\"Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n _coords: (float, float)\n Absolute pixel coordinates (x, y).\n\n \"\"\"\n msg = \"Subclasses of UI must implement `_set_position(self, coords)`.\"\n raise NotImplementedError(msg)\n\n @property\n def size(self):\n return np.asarray(self._get_size(), dtype=int)\n\n @abc.abstractmethod\n def _get_size(self):\n msg = \"Subclasses of UI must implement property `size`.\"\n raise NotImplementedError(msg)\n\n @property\n def center(self):\n return self.position + self.size / 2.\n\n @center.setter\n def center(self, coords):\n \"\"\"Position the center of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n\n \"\"\"\n if not hasattr(self, \"size\"):\n msg = \"Subclasses of UI must implement the `size` property.\"\n raise NotImplementedError(msg)\n\n new_center = np.array(coords)\n size = np.array(self.size)\n new_lower_left_corner = new_center - size / 2.\n self.position = new_lower_left_corner\n\n def set_visibility(self, visibility):\n \"\"\"Set visibility of this UI component.\"\"\"\n for actor in self.actors:\n actor.SetVisibility(visibility)\n\n def handle_events(self, actor):\n self.add_callback(actor, \"LeftButtonPressEvent\",\n self.left_button_click_callback)\n self.add_callback(actor, \"LeftButtonReleaseEvent\",\n self.left_button_release_callback)\n self.add_callback(actor, \"RightButtonPressEvent\",\n self.right_button_click_callback)\n self.add_callback(actor, \"RightButtonReleaseEvent\",\n self.right_button_release_callback)\n self.add_callback(actor, \"MiddleButtonPressEvent\",\n self.middle_button_click_callback)\n self.add_callback(actor, \"MiddleButtonReleaseEvent\",\n self.middle_button_release_callback)\n self.add_callback(actor, \"MouseMoveEvent\", self.mouse_move_callback)\n self.add_callback(actor, \"KeyPressEvent\", self.key_press_callback)\n\n @staticmethod\n def left_button_click_callback(i_ren, obj, self):\n self.left_button_state = \"pressing\"\n self.on_left_mouse_button_pressed(i_ren, obj, self)\n i_ren.event.abort()\n\n @staticmethod\n def left_button_release_callback(i_ren, obj, self):\n if self.left_button_state == \"pressing\":\n self.on_left_mouse_button_clicked(i_ren, obj, self)\n self.left_button_state = \"released\"\n self.on_left_mouse_button_released(i_ren, obj, self)\n\n @staticmethod\n def right_button_click_callback(i_ren, obj, self):\n self.right_button_state = \"pressing\"\n self.on_right_mouse_button_pressed(i_ren, obj, self)\n i_ren.event.abort()\n\n @staticmethod\n def right_button_release_callback(i_ren, obj, self):\n if self.right_button_state == \"pressing\":\n self.on_right_mouse_button_clicked(i_ren, obj, self)\n self.right_button_state = \"released\"\n self.on_right_mouse_button_released(i_ren, obj, self)\n\n @staticmethod\n def middle_button_click_callback(i_ren, obj, self):\n self.middle_button_state = \"pressing\"\n self.on_middle_mouse_button_pressed(i_ren, obj, self)\n i_ren.event.abort()\n\n @staticmethod\n def middle_button_release_callback(i_ren, obj, self):\n if self.middle_button_state == \"pressing\":\n self.on_middle_mouse_button_clicked(i_ren, obj, self)\n self.middle_button_state = \"released\"\n self.on_middle_mouse_button_released(i_ren, obj, self)\n\n @staticmethod\n def mouse_move_callback(i_ren, obj, self):\n left_pressing_or_dragging = (self.left_button_state == \"pressing\" or\n self.left_button_state == \"dragging\")\n\n right_pressing_or_dragging = (self.right_button_state == \"pressing\" or\n self.right_button_state == \"dragging\")\n\n middle_pressing_or_dragging = \\\n (self.middle_button_state == \"pressing\" or\n self.middle_button_state == \"dragging\")\n\n if left_pressing_or_dragging:\n self.left_button_state = \"dragging\"\n self.on_left_mouse_button_dragged(i_ren, obj, self)\n elif right_pressing_or_dragging:\n self.right_button_state = \"dragging\"\n self.on_right_mouse_button_dragged(i_ren, obj, self)\n elif middle_pressing_or_dragging:\n self.middle_button_state = \"dragging\"\n self.on_middle_mouse_button_dragged(i_ren, obj, self)\n\n @staticmethod\n def key_press_callback(i_ren, obj, self):\n self.on_key_press(i_ren, obj, self)\n\n\nclass Button2D(UI):\n \"\"\"A 2D overlay button and is of type vtkTexturedActor2D.\n\n Currently supports::\n\n - Multiple icons.\n - Switching between icons.\n\n \"\"\"\n\n def __init__(self, icon_fnames, position=(0, 0), size=(30, 30)):\n \"\"\"Init class instance.\n\n Parameters\n ----------\n icon_fnames : List(string, string)\n ((iconname, filename), (iconname, filename), ....)\n position : (float, float), optional\n Absolute coordinates (x, y) of the lower-left corner of the button.\n size : (int, int), optional\n Width and height in pixels of the button.\n\n \"\"\"\n super(Button2D, self).__init__(position)\n\n self.icon_extents = dict()\n self.icons = self._build_icons(icon_fnames)\n self.icon_names = [icon[0] for icon in self.icons]\n self.current_icon_id = 0\n self.current_icon_name = self.icon_names[self.current_icon_id]\n self.set_icon(self.icons[self.current_icon_id][1])\n self.resize(size)\n\n def _get_size(self):\n lower_left_corner = self.texture_points.GetPoint(0)\n upper_right_corner = self.texture_points.GetPoint(2)\n size = np.array(upper_right_corner) - np.array(lower_left_corner)\n return abs(size[:2])\n\n def _build_icons(self, icon_fnames):\n \"\"\"Convert file names to vtkImageDataGeometryFilters.\n\n A pre-processing step to prevent re-read of file names during every\n state change.\n\n Parameters\n ----------\n icon_fnames : List(string, string)\n ((iconname, filename), (iconname, filename), ....)\n\n Returns\n -------\n icons : List\n A list of corresponding vtkImageDataGeometryFilters.\n\n \"\"\"\n icons = []\n for icon_name, icon_fname in icon_fnames:\n icons.append((icon_name, load_image(icon_fname, as_vtktype=True)))\n\n return icons\n\n def _setup(self):\n \"\"\"Set up this UI component.\n\n Creating the button actor used internally.\n\n \"\"\"\n # This is highly inspired by\n # https://github.com/Kitware/VTK/blob/c3ec2495b183e3327820e927af7f8f90d34c3474/Interaction/Widgets/vtkBalloonRepresentation.cxx#L47\n\n self.texture_polydata = vtk.vtkPolyData()\n self.texture_points = vtk.vtkPoints()\n self.texture_points.SetNumberOfPoints(4)\n\n polys = vtk.vtkCellArray()\n polys.InsertNextCell(4)\n polys.InsertCellPoint(0)\n polys.InsertCellPoint(1)\n polys.InsertCellPoint(2)\n polys.InsertCellPoint(3)\n self.texture_polydata.SetPolys(polys)\n\n tc = vtk.vtkFloatArray()\n tc.SetNumberOfComponents(2)\n tc.SetNumberOfTuples(4)\n tc.InsertComponent(0, 0, 0.0)\n tc.InsertComponent(0, 1, 0.0)\n tc.InsertComponent(1, 0, 1.0)\n tc.InsertComponent(1, 1, 0.0)\n tc.InsertComponent(2, 0, 1.0)\n tc.InsertComponent(2, 1, 1.0)\n tc.InsertComponent(3, 0, 0.0)\n tc.InsertComponent(3, 1, 1.0)\n self.texture_polydata.GetPointData().SetTCoords(tc)\n\n texture_mapper = vtk.vtkPolyDataMapper2D()\n texture_mapper = set_input(texture_mapper, self.texture_polydata)\n\n button = vtk.vtkTexturedActor2D()\n button.SetMapper(texture_mapper)\n\n self.texture = vtk.vtkTexture()\n button.SetTexture(self.texture)\n\n button_property = vtk.vtkProperty2D()\n button_property.SetOpacity(1.0)\n button.SetProperty(button_property)\n self.actor = button\n\n # Add default events listener to the VTK actor.\n self.handle_events(self.actor)\n\n def _get_actors(self):\n \"\"\"Get the actors composing this UI component.\"\"\"\n return [self.actor]\n\n def _add_to_scene(self, scene):\n \"\"\"Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n\n \"\"\"\n scene.add(self.actor)\n\n def resize(self, size):\n \"\"\"Resize the button.\n\n Parameters\n ----------\n size : (float, float)\n Button size (width, height) in pixels.\n\n \"\"\"\n # Update actor.\n self.texture_points.SetPoint(0, 0, 0, 0.0)\n self.texture_points.SetPoint(1, size[0], 0, 0.0)\n self.texture_points.SetPoint(2, size[0], size[1], 0.0)\n self.texture_points.SetPoint(3, 0, size[1], 0.0)\n self.texture_polydata.SetPoints(self.texture_points)\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.actor.SetPosition(*coords)\n\n @property\n def color(self):\n \"\"\" Gets the button's color.\n \"\"\"\n color = self.actor.GetProperty().GetColor()\n return np.asarray(color)\n\n @color.setter\n def color(self, color):\n \"\"\" Sets the button's color.\n\n Parameters\n ----------\n color : (float, float, float)\n RGB. Must take values in [0, 1].\n \"\"\"\n self.actor.GetProperty().SetColor(*color)\n\n def scale(self, factor):\n \"\"\" Scales the button.\n\n Parameters\n ----------\n factor : (float, float)\n Scaling factor (width, height) in pixels.\n \"\"\"\n self.resize(self.size * factor)\n\n def set_icon_by_name(self, icon_name):\n \"\"\" Set the button icon using its name.\n\n Parameters\n ----------\n icon_name : str\n \"\"\"\n icon_id = self.icon_names.index(icon_name)\n self.set_icon(self.icons[icon_id][1])\n\n def set_icon(self, icon):\n \"\"\" Modifies the icon used by the vtkTexturedActor2D.\n\n Parameters\n ----------\n icon : imageDataGeometryFilter\n \"\"\"\n self.texture = set_input(self.texture, icon)\n\n def next_icon_id(self):\n \"\"\" Sets the next icon ID while cycling through icons.\n \"\"\"\n self.current_icon_id += 1\n if self.current_icon_id == len(self.icons):\n self.current_icon_id = 0\n self.current_icon_name = self.icon_names[self.current_icon_id]\n\n def next_icon(self):\n \"\"\" Increments the state of the Button.\n\n Also changes the icon.\n \"\"\"\n self.next_icon_id()\n self.set_icon(self.icons[self.current_icon_id][1])\n\n\nclass Rectangle2D(UI):\n \"\"\" A 2D rectangle sub-classed from UI.\n \"\"\"\n\n def __init__(self, size=(0, 0), position=(0, 0), color=(1, 1, 1),\n opacity=1.0):\n \"\"\" Initializes a rectangle.\n\n Parameters\n ----------\n size : (int, int)\n The size of the rectangle (width, height) in pixels.\n position : (float, float)\n Coordinates (x, y) of the lower-left corner of the rectangle.\n color : (float, float, float)\n Must take values in [0, 1].\n opacity : float\n Must take values in [0, 1].\n \"\"\"\n super(Rectangle2D, self).__init__(position)\n self.color = color\n self.opacity = opacity\n self.resize(size)\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Creating the polygon actor used internally.\n \"\"\"\n # Setup four points\n size = (1, 1)\n self._points = vtk.vtkPoints()\n self._points.InsertNextPoint(0, 0, 0)\n self._points.InsertNextPoint(size[0], 0, 0)\n self._points.InsertNextPoint(size[0], size[1], 0)\n self._points.InsertNextPoint(0, size[1], 0)\n\n # Create the polygon\n polygon = vtk.vtkPolygon()\n polygon.GetPointIds().SetNumberOfIds(4) # make a quad\n polygon.GetPointIds().SetId(0, 0)\n polygon.GetPointIds().SetId(1, 1)\n polygon.GetPointIds().SetId(2, 2)\n polygon.GetPointIds().SetId(3, 3)\n\n # Add the polygon to a list of polygons\n polygons = vtk.vtkCellArray()\n polygons.InsertNextCell(polygon)\n\n # Create a PolyData\n self._polygonPolyData = vtk.vtkPolyData()\n self._polygonPolyData.SetPoints(self._points)\n self._polygonPolyData.SetPolys(polygons)\n\n # Create a mapper and actor\n mapper = vtk.vtkPolyDataMapper2D()\n mapper = set_input(mapper, self._polygonPolyData)\n\n self.actor = vtk.vtkActor2D()\n self.actor.SetMapper(mapper)\n\n # Add default events listener to the VTK actor.\n self.handle_events(self.actor)\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return [self.actor]\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n scene.add(self.actor)\n\n def _get_size(self):\n # Get 2D coordinates of two opposed corners of the rectangle.\n lower_left_corner = np.array(self._points.GetPoint(0)[:2])\n upper_right_corner = np.array(self._points.GetPoint(2)[:2])\n size = abs(upper_right_corner - lower_left_corner)\n return size\n\n @property\n def width(self):\n return self._points.GetPoint(2)[0]\n\n @width.setter\n def width(self, width):\n self.resize((width, self.height))\n\n @property\n def height(self):\n return self._points.GetPoint(2)[1]\n\n @height.setter\n def height(self, height):\n self.resize((self.width, height))\n\n def resize(self, size):\n \"\"\" Sets the button size.\n\n Parameters\n ----------\n size : (float, float)\n Button size (width, height) in pixels.\n \"\"\"\n self._points.SetPoint(0, 0, 0, 0.0)\n self._points.SetPoint(1, size[0], 0, 0.0)\n self._points.SetPoint(2, size[0], size[1], 0.0)\n self._points.SetPoint(3, 0, size[1], 0.0)\n self._polygonPolyData.SetPoints(self._points)\n mapper = vtk.vtkPolyDataMapper2D()\n mapper = set_input(mapper, self._polygonPolyData)\n\n self.actor.SetMapper(mapper)\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.actor.SetPosition(*coords)\n\n @property\n def color(self):\n \"\"\" Gets the rectangle's color.\n \"\"\"\n color = self.actor.GetProperty().GetColor()\n return np.asarray(color)\n\n @color.setter\n def color(self, color):\n \"\"\" Sets the rectangle's color.\n\n Parameters\n ----------\n color : (float, float, float)\n RGB. Must take values in [0, 1].\n \"\"\"\n self.actor.GetProperty().SetColor(*color)\n\n @property\n def opacity(self):\n \"\"\" Gets the rectangle's opacity.\n \"\"\"\n return self.actor.GetProperty().GetOpacity()\n\n @opacity.setter\n def opacity(self, opacity):\n \"\"\" Sets the rectangle's opacity.\n\n Parameters\n ----------\n opacity : float\n Degree of transparency. Must be between [0, 1].\n \"\"\"\n self.actor.GetProperty().SetOpacity(opacity)\n\n\nclass Disk2D(UI):\n \"\"\" A 2D disk UI component.\n \"\"\"\n\n def __init__(self, outer_radius, inner_radius=0, center=(0, 0),\n color=(1, 1, 1), opacity=1.0):\n \"\"\" Initializes a rectangle.\n\n Parameters\n ----------\n outer_radius : int\n Outer radius of the disk.\n inner_radius : int, optional\n Inner radius of the disk. A value > 0, makes a ring.\n center : (float, float), optional\n Coordinates (x, y) of the center of the disk.\n color : (float, float, float), optional\n Must take values in [0, 1].\n opacity : float, optional\n Must take values in [0, 1].\n \"\"\"\n super(Disk2D, self).__init__()\n self.outer_radius = outer_radius\n self.inner_radius = inner_radius\n self.color = color\n self.opacity = opacity\n self.center = center\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Creating the disk actor used internally.\n \"\"\"\n # Setting up disk actor.\n self._disk = vtk.vtkDiskSource()\n self._disk.SetRadialResolution(10)\n self._disk.SetCircumferentialResolution(50)\n self._disk.Update()\n\n # Mapper\n mapper = vtk.vtkPolyDataMapper2D()\n mapper = set_input(mapper, self._disk.GetOutputPort())\n\n # Actor\n self.actor = vtk.vtkActor2D()\n self.actor.SetMapper(mapper)\n\n # Add default events listener to the VTK actor.\n self.handle_events(self.actor)\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return [self.actor]\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n scene.add(self.actor)\n\n def _get_size(self):\n diameter = 2 * self.outer_radius\n size = (diameter, diameter)\n return size\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component's bounding box.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n # Disk actor are positioned with respect to their center.\n self.actor.SetPosition(*coords + self.outer_radius)\n\n @property\n def color(self):\n \"\"\" Gets the rectangle's color.\n \"\"\"\n color = self.actor.GetProperty().GetColor()\n return np.asarray(color)\n\n @color.setter\n def color(self, color):\n \"\"\" Sets the rectangle's color.\n\n Parameters\n ----------\n color : (float, float, float)\n RGB. Must take values in [0, 1].\n \"\"\"\n self.actor.GetProperty().SetColor(*color)\n\n @property\n def opacity(self):\n \"\"\" Gets the rectangle's opacity.\n \"\"\"\n return self.actor.GetProperty().GetOpacity()\n\n @opacity.setter\n def opacity(self, opacity):\n \"\"\" Sets the rectangle's opacity.\n\n Parameters\n ----------\n opacity : float\n Degree of transparency. Must be between [0, 1].\n \"\"\"\n self.actor.GetProperty().SetOpacity(opacity)\n\n @property\n def inner_radius(self):\n return self._disk.GetInnerRadius()\n\n @inner_radius.setter\n def inner_radius(self, radius):\n self._disk.SetInnerRadius(radius)\n self._disk.Update()\n\n @property\n def outer_radius(self):\n return self._disk.GetOuterRadius()\n\n @outer_radius.setter\n def outer_radius(self, radius):\n self._disk.SetOuterRadius(radius)\n self._disk.Update()\n\n\nclass Panel2D(UI):\n \"\"\" A 2D UI Panel.\n\n Can contain one or more UI elements.\n\n Attributes\n ----------\n alignment : [left, right]\n Alignment of the panel with respect to the overall screen.\n \"\"\"\n\n def __init__(self, size, position=(0, 0), color=(0.1, 0.1, 0.1),\n opacity=0.7, align=\"left\"):\n \"\"\"\n Parameters\n ----------\n size : (int, int)\n Size (width, height) in pixels of the panel.\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of the panel.\n color : (float, float, float)\n Must take values in [0, 1].\n opacity : float\n Must take values in [0, 1].\n align : [left, right]\n Alignment of the panel with respect to the overall screen.\n \"\"\"\n super(Panel2D, self).__init__(position)\n self.resize(size)\n self.alignment = align\n self.color = color\n self.opacity = opacity\n self.position = position\n self._drag_offset = None\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the background (Rectangle2D) of the panel.\n \"\"\"\n self._elements = []\n self.element_offsets = []\n self.background = Rectangle2D()\n self.add_element(self.background, (0, 0))\n\n # Add default events listener for this UI component.\n self.background.on_left_mouse_button_pressed = self.left_button_pressed\n self.background.on_left_mouse_button_dragged = self.left_button_dragged\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n actors = []\n for element in self._elements:\n actors += element.actors\n\n return actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n for element in self._elements:\n element.add_to_scene(scene)\n\n def _get_size(self):\n return self.background.size\n\n def resize(self, size):\n \"\"\" Sets the panel size.\n\n Parameters\n ----------\n size : (float, float)\n Panel size (width, height) in pixels.\n \"\"\"\n self.background.resize(size)\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n coords = np.array(coords)\n for element, offset in self.element_offsets:\n element.position = coords + offset\n\n @property\n def color(self):\n return self.background.color\n\n @color.setter\n def color(self, color):\n self.background.color = color\n\n @property\n def opacity(self):\n return self.background.opacity\n\n @opacity.setter\n def opacity(self, opacity):\n self.background.opacity = opacity\n\n def add_element(self, element, coords, anchor=\"position\"):\n \"\"\" Adds a UI component to the panel.\n\n The coordinates represent an offset from the lower left corner of the\n panel.\n\n Parameters\n ----------\n element : UI\n The UI item to be added.\n coords : (float, float) or (int, int)\n If float, normalized coordinates are assumed and they must be\n between [0,1].\n If int, pixels coordinates are assumed and it must fit within the\n panel's size.\n \"\"\"\n coords = np.array(coords)\n\n if np.issubdtype(coords.dtype, np.floating):\n if np.any(coords < 0) or np.any(coords > 1):\n raise ValueError(\"Normalized coordinates must be in [0,1].\")\n\n coords = coords * self.size\n\n if anchor == \"center\":\n element.center = self.position + coords\n elif anchor == \"position\":\n element.position = self.position + coords\n else:\n msg = (\"Unknown anchor {}. Supported anchors are 'position'\"\n \" and 'center'.\")\n raise ValueError(msg)\n\n self._elements.append(element)\n offset = element.position - self.position\n self.element_offsets.append((element, offset))\n\n def remove_element(self, element):\n \"\"\" Removes a UI component from the panel.\n\n Parameters\n ----------\n element : UI\n The UI item to be removed.\n \"\"\"\n idx = self._elements.index(element)\n del self._elements[idx]\n del self.element_offsets[idx]\n\n def update_element(self, element, coords, anchor=\"position\"):\n \"\"\" Updates the position of a UI component in the panel.\n\n Parameters\n ----------\n element : UI\n The UI item to be updated.\n coords : (float, float) or (int, int)\n New coordinates.\n If float, normalized coordinates are assumed and they must be\n between [0,1].\n If int, pixels coordinates are assumed and it must fit within the\n panel's size.\n \"\"\"\n self.remove_element(element)\n self.add_element(element, coords, anchor)\n\n def left_button_pressed(self, i_ren, _obj, panel2d_object):\n click_pos = np.array(i_ren.event.position)\n self._drag_offset = click_pos - panel2d_object.position\n i_ren.event.abort() # Stop propagating the event.\n\n def left_button_dragged(self, i_ren, _obj, _panel2d_object):\n if self._drag_offset is not None:\n click_position = np.array(i_ren.event.position)\n new_position = click_position - self._drag_offset\n self.position = new_position\n i_ren.force_render()\n\n def re_align(self, window_size_change):\n \"\"\" Re-organises the elements in case the window size is changed.\n\n Parameters\n ----------\n window_size_change : (int, int)\n New window size (width, height) in pixels.\n \"\"\"\n if self.alignment == \"left\":\n pass\n elif self.alignment == \"right\":\n self.position += np.array(window_size_change)\n else:\n msg = \"You can only left-align or right-align objects in a panel.\"\n raise ValueError(msg)\n\n\nclass TextBlock2D(UI):\n \"\"\" Wraps over the default vtkTextActor and helps setting the text.\n\n Contains member functions for text formatting.\n\n Attributes\n ----------\n actor : :class:`vtkTextActor`\n The text actor.\n message : str\n The initial text while building the actor.\n position : (float, float)\n (x, y) in pixels.\n color : (float, float, float)\n RGB: Values must be between 0-1.\n bg_color : (float, float, float)\n RGB: Values must be between 0-1.\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n justification : str\n left, right or center.\n vertical_justification : str\n bottom, middle or top.\n bold : bool\n Makes text bold.\n italic : bool\n Makes text italicised.\n shadow : bool\n Adds text shadow.\n \"\"\"\n\n def __init__(self, text=\"Text Block\", font_size=18, font_family='Arial',\n justification='left', vertical_justification=\"bottom\",\n bold=False, italic=False, shadow=False,\n color=(1, 1, 1), bg_color=None, position=(0, 0)):\n \"\"\"\n Parameters\n ----------\n text : str\n The initial text while building the actor.\n position : (float, float)\n (x, y) in pixels.\n color : (float, float, float)\n RGB: Values must be between 0-1.\n bg_color : (float, float, float)\n RGB: Values must be between 0-1.\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n justification : str\n left, right or center.\n vertical_justification : str\n bottom, middle or top.\n bold : bool\n Makes text bold.\n italic : bool\n Makes text italicised.\n shadow : bool\n Adds text shadow.\n \"\"\"\n super(TextBlock2D, self).__init__(position=position)\n self.color = color\n self.background_color = bg_color\n self.font_size = font_size\n self.font_family = font_family\n self.justification = justification\n self.bold = bold\n self.italic = italic\n self.shadow = shadow\n self.vertical_justification = vertical_justification\n self.message = text\n\n def _setup(self):\n self.actor = vtk.vtkTextActor()\n self._background = None # For VTK < 7\n self.handle_events(self.actor)\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n if self._background is not None:\n return [self.actor, self._background]\n\n return [self.actor]\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n if self._background is not None:\n scene.add(self._background)\n\n scene.add(self.actor)\n\n @property\n def message(self):\n \"\"\" Gets message from the text.\n\n Returns\n -------\n str\n The current text message.\n \"\"\"\n return self.actor.GetInput()\n\n @message.setter\n def message(self, text):\n \"\"\" Sets the text message.\n\n Parameters\n ----------\n text : str\n The message to be set.\n \"\"\"\n self.actor.SetInput(text)\n\n @property\n def font_size(self):\n \"\"\" Gets text font size.\n\n Returns\n ----------\n int\n Text font size.\n \"\"\"\n return self.actor.GetTextProperty().GetFontSize()\n\n @font_size.setter\n def font_size(self, size):\n \"\"\" Sets font size.\n\n Parameters\n ----------\n size : int\n Text font size.\n \"\"\"\n self.actor.GetTextProperty().SetFontSize(size)\n\n @property\n def font_family(self):\n \"\"\" Gets font family.\n\n Returns\n ----------\n str\n Text font family.\n \"\"\"\n return self.actor.GetTextProperty().GetFontFamilyAsString()\n\n @font_family.setter\n def font_family(self, family='Arial'):\n \"\"\" Sets font family.\n\n Currently Arial and Courier are supported.\n\n Parameters\n ----------\n family : str\n The font family.\n \"\"\"\n if family == 'Arial':\n self.actor.GetTextProperty().SetFontFamilyToArial()\n elif family == 'Courier':\n self.actor.GetTextProperty().SetFontFamilyToCourier()\n else:\n raise ValueError(\"Font not supported yet: {}.\".format(family))\n\n @property\n def justification(self):\n \"\"\" Gets text justification.\n\n Returns\n -------\n str\n Text justification.\n \"\"\"\n justification = self.actor.GetTextProperty().GetJustificationAsString()\n if justification == 'Left':\n return \"left\"\n elif justification == 'Centered':\n return \"center\"\n elif justification == 'Right':\n return \"right\"\n\n @justification.setter\n def justification(self, justification):\n \"\"\" Justifies text.\n\n Parameters\n ----------\n justification : str\n Possible values are left, right, center.\n \"\"\"\n text_property = self.actor.GetTextProperty()\n if justification == 'left':\n text_property.SetJustificationToLeft()\n elif justification == 'center':\n text_property.SetJustificationToCentered()\n elif justification == 'right':\n text_property.SetJustificationToRight()\n else:\n msg = \"Text can only be justified left, right and center.\"\n raise ValueError(msg)\n\n @property\n def vertical_justification(self):\n \"\"\" Gets text vertical justification.\n\n Returns\n -------\n str\n Text vertical justification.\n \"\"\"\n text_property = self.actor.GetTextProperty()\n vjustification = text_property.GetVerticalJustificationAsString()\n if vjustification == 'Bottom':\n return \"bottom\"\n elif vjustification == 'Centered':\n return \"middle\"\n elif vjustification == 'Top':\n return \"top\"\n\n @vertical_justification.setter\n def vertical_justification(self, vertical_justification):\n \"\"\" Justifies text vertically.\n\n Parameters\n ----------\n vertical_justification : str\n Possible values are bottom, middle, top.\n \"\"\"\n text_property = self.actor.GetTextProperty()\n if vertical_justification == 'bottom':\n text_property.SetVerticalJustificationToBottom()\n elif vertical_justification == 'middle':\n text_property.SetVerticalJustificationToCentered()\n elif vertical_justification == 'top':\n text_property.SetVerticalJustificationToTop()\n else:\n msg = \"Vertical justification must be: bottom, middle or top.\"\n raise ValueError(msg)\n\n @property\n def bold(self):\n \"\"\" Returns whether the text is bold.\n\n Returns\n -------\n bool\n Text is bold if True.\n \"\"\"\n return self.actor.GetTextProperty().GetBold()\n\n @bold.setter\n def bold(self, flag):\n \"\"\" Bolds/un-bolds text.\n\n Parameters\n ----------\n flag : bool\n Sets text bold if True.\n \"\"\"\n self.actor.GetTextProperty().SetBold(flag)\n\n @property\n def italic(self):\n \"\"\" Returns whether the text is italicised.\n\n Returns\n -------\n bool\n Text is italicised if True.\n \"\"\"\n return self.actor.GetTextProperty().GetItalic()\n\n @italic.setter\n def italic(self, flag):\n \"\"\" Italicises/un-italicises text.\n\n Parameters\n ----------\n flag : bool\n Italicises text if True.\n \"\"\"\n self.actor.GetTextProperty().SetItalic(flag)\n\n @property\n def shadow(self):\n \"\"\" Returns whether the text has shadow.\n\n Returns\n -------\n bool\n Text is shadowed if True.\n \"\"\"\n return self.actor.GetTextProperty().GetShadow()\n\n @shadow.setter\n def shadow(self, flag):\n \"\"\" Adds/removes text shadow.\n\n Parameters\n ----------\n flag : bool\n Shadows text if True.\n \"\"\"\n self.actor.GetTextProperty().SetShadow(flag)\n\n @property\n def color(self):\n \"\"\" Gets text color.\n\n Returns\n -------\n (float, float, float)\n Returns text color in RGB.\n \"\"\"\n return self.actor.GetTextProperty().GetColor()\n\n @color.setter\n def color(self, color=(1, 0, 0)):\n \"\"\" Set text color.\n\n Parameters\n ----------\n color : (float, float, float)\n RGB: Values must be between 0-1.\n \"\"\"\n self.actor.GetTextProperty().SetColor(*color)\n\n @property\n def background_color(self):\n \"\"\" Gets background color.\n\n Returns\n -------\n (float, float, float) or None\n If None, there no background color.\n Otherwise, background color in RGB.\n \"\"\"\n if self.actor.GetTextProperty().GetBackgroundOpacity() == 0:\n return None\n\n return self.actor.GetTextProperty().GetBackgroundColor()\n\n @background_color.setter\n def background_color(self, color):\n \"\"\" Set text color.\n\n Parameters\n ----------\n color : (float, float, float) or None\n If None, remove background.\n Otherwise, RGB values (must be between 0-1).\n \"\"\"\n\n if color is None:\n # Remove background.\n self.actor.GetTextProperty().SetBackgroundOpacity(0.)\n\n else:\n self.actor.GetTextProperty().SetBackgroundColor(*color)\n self.actor.GetTextProperty().SetBackgroundOpacity(1.)\n\n def _set_position(self, position):\n \"\"\" Set text actor position.\n\n Parameters\n ----------\n position : (float, float)\n The new position. (x, y) in pixels.\n \"\"\"\n self.actor.SetPosition(*position)\n if self._background is not None:\n self._background.SetPosition(*self.actor.GetPosition())\n\n def _get_size(self):\n if self._background is not None:\n return self._background.size\n return self.font_size * 1.2, self.font_size * 1.2\n\n\nclass TextBox2D(UI):\n \"\"\" An editable 2D text box that behaves as a UI component.\n\n Currently supports:\n - Basic text editing.\n - Cursor movements.\n - Single and multi-line text boxes.\n - Pre text formatting (text needs to be formatted beforehand).\n\n Attributes\n ----------\n text : str\n The current text state.\n actor : :class:`vtkActor2d`\n The text actor.\n width : int\n The number of characters in a single line of text.\n height : int\n The number of lines in the textbox.\n window_left : int\n Left limit of visible text in the textbox.\n window_right : int\n Right limit of visible text in the textbox.\n caret_pos : int\n Position of the caret in the text.\n init : bool\n Flag which says whether the textbox has just been initialized.\n \"\"\"\n def __init__(self, width, height, text=\"Enter Text\", position=(100, 10),\n color=(0, 0, 0), font_size=18, font_family='Arial',\n justification='left', bold=False,\n italic=False, shadow=False):\n \"\"\"\n Parameters\n ----------\n width : int\n The number of characters in a single line of text.\n height : int\n The number of lines in the textbox.\n text : str\n The initial text while building the actor.\n position : (float, float)\n (x, y) in pixels.\n color : (float, float, float)\n RGB: Values must be between 0-1.\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n justification : str\n left, right or center.\n bold : bool\n Makes text bold.\n italic : bool\n Makes text italicised.\n shadow : bool\n Adds text shadow.\n \"\"\"\n super(TextBox2D, self).__init__(position=position)\n\n self.message = text\n self.text.message = text\n self.text.font_size = font_size\n self.text.font_family = font_family\n self.text.justification = justification\n self.text.bold = bold\n self.text.italic = italic\n self.text.shadow = shadow\n self.text.color = color\n self.text.background_color = (1, 1, 1)\n\n self.width = width\n self.height = height\n self.window_left = 0\n self.window_right = 0\n self.caret_pos = 0\n self.init = True\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the TextBlock2D component used for the textbox.\n \"\"\"\n self.text = TextBlock2D()\n\n # Add default events listener for this UI component.\n self.text.on_left_mouse_button_pressed = self.left_button_press\n self.text.on_key_press = self.key_press\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.text.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.text.add_to_scene(scene)\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.text.position = coords\n\n def _get_size(self):\n return self.text.size\n\n def set_message(self, message):\n \"\"\" Set custom text to textbox.\n\n Parameters\n ----------\n message: str\n The custom message to be set.\n \"\"\"\n self.message = message\n self.text.message = message\n self.init = False\n self.window_right = len(self.message)\n self.window_left = 0\n self.caret_pos = self.window_right\n\n def width_set_text(self, text):\n \"\"\" Adds newlines to text where necessary.\n\n This is needed for multi-line text boxes.\n\n Parameters\n ----------\n text : str\n The final text to be formatted.\n\n Returns\n -------\n str\n A multi line formatted text.\n \"\"\"\n multi_line_text = \"\"\n for i, t in enumerate(text):\n multi_line_text += t\n if (i + 1) % self.width == 0:\n multi_line_text += \"\\n\"\n return multi_line_text.rstrip(\"\\n\")\n\n def handle_character(self, character):\n \"\"\" Main driving function that handles button events.\n\n # TODO: Need to handle all kinds of characters like !, +, etc.\n\n Parameters\n ----------\n character : str\n \"\"\"\n if character.lower() == \"return\":\n self.render_text(False)\n return True\n if character.lower() == \"backspace\":\n self.remove_character()\n elif character.lower() == \"left\":\n self.move_left()\n elif character.lower() == \"right\":\n self.move_right()\n else:\n self.add_character(character)\n self.render_text()\n return False\n\n def move_caret_right(self):\n \"\"\" Moves the caret towards right.\n \"\"\"\n self.caret_pos = min(self.caret_pos + 1, len(self.message))\n\n def move_caret_left(self):\n \"\"\" Moves the caret towards left.\n \"\"\"\n self.caret_pos = max(self.caret_pos - 1, 0)\n\n def right_move_right(self):\n \"\"\" Moves right boundary of the text window right-wards.\n \"\"\"\n if self.window_right <= len(self.message):\n self.window_right += 1\n\n def right_move_left(self):\n \"\"\" Moves right boundary of the text window left-wards.\n \"\"\"\n if self.window_right > 0:\n self.window_right -= 1\n\n def left_move_right(self):\n \"\"\" Moves left boundary of the text window right-wards.\n \"\"\"\n if self.window_left <= len(self.message):\n self.window_left += 1\n\n def left_move_left(self):\n \"\"\" Moves left boundary of the text window left-wards.\n \"\"\"\n if self.window_left > 0:\n self.window_left -= 1\n\n def add_character(self, character):\n \"\"\" Inserts a character into the text and moves window and caret.\n\n Parameters\n ----------\n character : str\n \"\"\"\n if len(character) > 1 and character.lower() != \"space\":\n return\n if character.lower() == \"space\":\n character = \" \"\n self.message = (self.message[:self.caret_pos] +\n character +\n self.message[self.caret_pos:])\n self.move_caret_right()\n if (self.window_right -\n self.window_left == self.height * self.width - 1):\n self.left_move_right()\n self.right_move_right()\n\n def remove_character(self):\n \"\"\" Removes a character and moves window and caret accordingly.\n \"\"\"\n if self.caret_pos == 0:\n return\n self.message = (self.message[:self.caret_pos - 1] +\n self.message[self.caret_pos:])\n self.move_caret_left()\n if len(self.message) < self.height * self.width - 1:\n self.right_move_left()\n if (self.window_right -\n self.window_left == self.height * self.width - 1):\n if self.window_left > 0:\n self.left_move_left()\n self.right_move_left()\n\n def move_left(self):\n \"\"\" Handles left button press.\n \"\"\"\n self.move_caret_left()\n if self.caret_pos == self.window_left - 1:\n if (self.window_right -\n self.window_left == self.height * self.width - 1):\n self.left_move_left()\n self.right_move_left()\n\n def move_right(self):\n \"\"\" Handles right button press.\n \"\"\"\n self.move_caret_right()\n if self.caret_pos == self.window_right + 1:\n if (self.window_right -\n self.window_left == self.height * self.width - 1):\n self.left_move_right()\n self.right_move_right()\n\n def showable_text(self, show_caret):\n \"\"\" Chops out text to be shown on the screen.\n\n Parameters\n ----------\n show_caret : bool\n Whether or not to show the caret.\n \"\"\"\n if show_caret:\n ret_text = (self.message[:self.caret_pos] +\n \"_\" +\n self.message[self.caret_pos:])\n else:\n ret_text = self.message\n ret_text = ret_text[self.window_left:self.window_right + 1]\n return ret_text\n\n def render_text(self, show_caret=True):\n \"\"\" Renders text after processing.\n\n Parameters\n ----------\n show_caret : bool\n Whether or not to show the caret.\n \"\"\"\n text = self.showable_text(show_caret)\n if text == \"\":\n text = \"Enter Text\"\n self.text.message = self.width_set_text(text)\n\n def edit_mode(self):\n \"\"\" Turns on edit mode.\n \"\"\"\n if self.init:\n self.message = \"\"\n self.init = False\n self.caret_pos = 0\n self.render_text()\n\n def left_button_press(self, i_ren, _obj, _textbox_object):\n \"\"\" Left button press handler for textbox\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _textbox_object: :class:`TextBox2D`\n \"\"\"\n i_ren.add_active_prop(self.text.actor)\n self.edit_mode()\n i_ren.force_render()\n\n def key_press(self, i_ren, _obj, _textbox_object):\n \"\"\" Key press handler for textbox\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _textbox_object: :class:`TextBox2D`\n \"\"\"\n key = i_ren.event.key\n is_done = self.handle_character(key)\n if is_done:\n i_ren.remove_active_prop(self.text.actor)\n\n i_ren.force_render()\n\n\nclass LineSlider2D(UI):\n \"\"\" A 2D Line Slider.\n\n A sliding handle on a line with a percentage indicator.\n\n Attributes\n ----------\n line_width : int\n Width of the line on which the disk will slide.\n length : int\n Length of the slider.\n track : :class:`Rectangle2D`\n The line on which the slider's handle moves.\n handle : :class:`Disk2D`\n The moving part of the slider.\n text : :class:`TextBlock2D`\n The text that shows percentage.\n shape : string\n Describes the shape of the handle.\n Currently supports 'disk' and 'square'.\n default_color : (float, float, float)\n Color of the handle when in unpressed state.\n active_color : (float, float, float)\n Color of the handle when it is pressed.\n \"\"\"\n def __init__(self, center=(0, 0),\n initial_value=50, min_value=0, max_value=100,\n length=200, line_width=5,\n inner_radius=0, outer_radius=10, handle_side=20,\n font_size=16,\n text_template=\"{value:.1f} ({ratio:.0%})\", shape=\"disk\"):\n \"\"\"\n Parameters\n ----------\n center : (float, float)\n Center of the slider's center.\n initial_value : float\n Initial value of the slider.\n min_value : float\n Minimum value of the slider.\n max_value : float\n Maximum value of the slider.\n length : int\n Length of the slider.\n line_width : int\n Width of the line on which the disk will slide.\n inner_radius : int\n Inner radius of the handles (if disk).\n outer_radius : int\n Outer radius of the handles (if disk).\n handle_side : int\n Side length of the handles (if sqaure).\n font_size : int\n Size of the text to display alongside the slider (pt).\n text_template : str, callable\n If str, text template can contain one or multiple of the\n replacement fields: `{value:}`, `{ratio:}`.\n If callable, this instance of `:class:LineSlider2D` will be\n passed as argument to the text template function.\n shape : string\n Describes the shape of the handle.\n Currently supports 'disk' and 'square'.\n \"\"\"\n self.shape = shape\n self.default_color = (1, 1, 1)\n self.active_color = (0, 0, 1)\n super(LineSlider2D, self).__init__()\n\n self.track.width = length\n self.track.height = line_width\n if shape == \"disk\":\n self.handle.inner_radius = inner_radius\n self.handle.outer_radius = outer_radius\n elif shape == \"square\":\n self.handle.width = handle_side\n self.handle.height = handle_side\n self.center = center\n\n self.min_value = min_value\n self.max_value = max_value\n self.text.font_size = font_size\n self.text_template = text_template\n\n # Offer some standard hooks to the user.\n self.on_change = lambda ui: None\n\n self.value = initial_value\n self.update()\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the slider's track (Rectangle2D), the handle (Disk2D) and\n the text (TextBlock2D).\n \"\"\"\n # Slider's track\n self.track = Rectangle2D()\n self.track.color = (1, 0, 0)\n\n # Slider's handle\n if self.shape == \"disk\":\n self.handle = Disk2D(outer_radius=1)\n elif self.shape == \"square\":\n self.handle = Rectangle2D(size=(1, 1))\n self.handle.color = self.default_color\n\n # Slider Text\n self.text = TextBlock2D(justification=\"center\",\n vertical_justification=\"top\")\n\n # Add default events listener for this UI component.\n self.track.on_left_mouse_button_pressed = self.track_click_callback\n self.track.on_left_mouse_button_dragged = self.handle_move_callback\n self.track.on_left_mouse_button_released = \\\n self.handle_release_callback\n self.handle.on_left_mouse_button_dragged = self.handle_move_callback\n self.handle.on_left_mouse_button_released = \\\n self.handle_release_callback\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.track.actors + self.handle.actors + self.text.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.track.add_to_scene(scene)\n self.handle.add_to_scene(scene)\n self.text.add_to_scene(scene)\n\n def _get_size(self):\n # Consider the handle's size when computing the slider's size.\n width = self.track.width + self.handle.size[0]\n height = max(self.track.height, self.handle.size[1])\n return np.array([width, height])\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n # Offset the slider line by the handle's radius.\n track_position = coords + self.handle.size / 2.\n # Offset the slider line height by half the slider line width.\n track_position[1] -= self.track.size[1] / 2.\n self.track.position = track_position\n self.handle.position = self.handle.position.astype('float64')\n self.handle.position += coords - self.position\n # Position the text below the handle.\n self.text.position = (self.handle.center[0],\n self.handle.position[1] - 10)\n\n @property\n def left_x_position(self):\n return self.track.position[0]\n\n @property\n def right_x_position(self):\n return self.track.position[0] + self.track.size[0]\n\n def set_position(self, position):\n \"\"\" Sets the disk's position.\n\n Parameters\n ----------\n position : (float, float)\n The absolute position of the disk (x, y).\n \"\"\"\n x_position = position[0]\n x_position = max(x_position, self.left_x_position)\n x_position = min(x_position, self.right_x_position)\n\n # Move slider disk.\n self.handle.center = (x_position, self.track.center[1])\n self.update() # Update information.\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n value_range = self.max_value - self.min_value\n self.ratio = (value - self.min_value) / value_range\n\n @property\n def ratio(self):\n return self._ratio\n\n @ratio.setter\n def ratio(self, ratio):\n position_x = self.left_x_position + ratio * self.track.width\n self.set_position((position_x, None))\n\n def format_text(self):\n \"\"\" Returns formatted text to display along the slider. \"\"\"\n if callable(self.text_template):\n return self.text_template(self)\n return self.text_template.format(ratio=self.ratio, value=self.value)\n\n def update(self):\n \"\"\" Updates the slider. \"\"\"\n # Compute the ratio determined by the position of the slider disk.\n length = float(self.right_x_position - self.left_x_position)\n if length != self.track.width:\n raise Exception(\"Disk position outside the slider line\")\n disk_position_x = self.handle.center[0]\n self._ratio = (disk_position_x - self.left_x_position) / length\n\n # Compute the selected value considering min_value and max_value.\n value_range = self.max_value - self.min_value\n self._value = self.min_value + self.ratio * value_range\n\n # Update text.\n text = self.format_text()\n self.text.message = text\n\n # Move the text below the slider's handle.\n self.text.position = (disk_position_x, self.text.position[1])\n\n self.on_change(self)\n\n def track_click_callback(self, i_ren, _vtkactor, _slider):\n \"\"\" Update disk position and grab the focus.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n vtkactor : :class:`vtkActor`\n The picked actor\n _slider : :class:`LineSlider2D`\n \"\"\"\n\n position = i_ren.event.position\n self.set_position(position)\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def handle_move_callback(self, i_ren, _vtkactor, _slider):\n \"\"\" Actual handle movement.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n vtkactor : :class:`vtkActor`\n The picked actor\n slider : :class:`LineSlider2D`\n \"\"\"\n\n self.handle.color = self.active_color\n position = i_ren.event.position\n self.set_position(position)\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def handle_release_callback(self, i_ren, _vtkactor, _slider):\n \"\"\" Change color when handle is released.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n vtkactor : :class:`vtkActor`\n The picked actor\n slider : :class:`LineSlider2D`\n \"\"\"\n self.handle.color = self.default_color\n i_ren.force_render()\n\n\nclass LineDoubleSlider2D(UI):\n \"\"\" A 2D Line Slider with two sliding rings.\n Useful for setting min and max values for something.\n\n Currently supports:\n - Setting positions of both disks.\n\n Attributes\n ----------\n line_width : int\n Width of the line on which the disk will slide.\n length : int\n Length of the slider.\n track : :class:`vtkActor`\n The line on which the handles move.\n handles : [:class:`vtkActor`, :class:`vtkActor`]\n The moving slider disks.\n text : [:class:`TextBlock2D`, :class:`TextBlock2D`]\n The texts that show the values of the disks.\n shape : string\n Describes the shape of the handle.\n Currently supports 'disk' and 'square'.\n default_color : (float, float, float)\n Color of the handles when in unpressed state.\n active_color : (float, float, float)\n Color of the handles when they are pressed.\n\n \"\"\"\n def __init__(self, line_width=5, inner_radius=0, outer_radius=10,\n handle_side=20, center=(450, 300), length=200,\n initial_values=(0, 100), min_value=0, max_value=100,\n font_size=16, text_template=\"{value:.1f}\", shape=\"disk\"):\n \"\"\"\n Parameters\n ----------\n line_width : int\n Width of the line on which the disk will slide.\n inner_radius : int\n Inner radius of the handles (if disk).\n outer_radius : int\n Outer radius of the handles (if disk).\n handle_side : int\n Side length of the handles (if sqaure).\n center : (float, float)\n Center of the slider.\n length : int\n Length of the slider.\n initial_values : (float, float)\n Initial values of the two handles.\n min_value : float\n Minimum value of the slider.\n max_value : float\n Maximum value of the slider.\n font_size : int\n Size of the text to display alongside the slider (pt).\n text_template : str, callable\n If str, text template can contain one or multiple of the\n replacement fields: `{value:}`, `{ratio:}`.\n If callable, this instance of `:class:LineDoubleSlider2D` will be\n passed as argument to the text template function.\n shape : string\n Describes the shape of the handle.\n Currently supports 'disk' and 'square'.\n\n \"\"\"\n self.shape = shape\n self.default_color = (1, 1, 1)\n self.active_color = (0, 0, 1)\n super(LineDoubleSlider2D, self).__init__()\n\n self.track.width = length\n self.track.height = line_width\n self.center = center\n if shape == \"disk\":\n self.handles[0].inner_radius = inner_radius\n self.handles[0].outer_radius = outer_radius\n self.handles[1].inner_radius = inner_radius\n self.handles[1].outer_radius = outer_radius\n elif shape == \"square\":\n self.handles[0].width = handle_side\n self.handles[0].height = handle_side\n self.handles[1].width = handle_side\n self.handles[1].height = handle_side\n\n self.min_value = min_value\n self.max_value = max_value\n self.text[0].font_size = font_size\n self.text[1].font_size = font_size\n self.text_template = text_template\n\n # Setting the handle positions will also update everything.\n self._values = [initial_values[0], initial_values[1]]\n self._ratio = [None, None]\n self.left_disk_value = initial_values[0]\n self.right_disk_value = initial_values[1]\n\n # This is required for correct initialization of the position\n # of the handles\n self.handles[0].position += np.array(self.handles[0].size, 'f8') / 2.\n self.handles[1].position += np.array(self.handles[1].size, 'f8') / 2.\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the slider's track (Rectangle2D), the handles (Disk2D) and\n the text (TextBlock2D).\n \"\"\"\n # Slider's track\n self.track = Rectangle2D()\n self.track.color = (1, 0, 0)\n\n # Handles\n self.handles = []\n if self.shape == \"disk\":\n self.handles.append(Disk2D(outer_radius=1))\n self.handles.append(Disk2D(outer_radius=1))\n elif self.shape == \"square\":\n self.handles.append(Rectangle2D(size=(1, 1)))\n self.handles.append(Rectangle2D(size=(1, 1)))\n self.handles[0].color = self.default_color\n self.handles[1].color = self.default_color\n\n # Slider Text\n self.text = [TextBlock2D(justification=\"center\",\n vertical_justification=\"top\"),\n TextBlock2D(justification=\"center\",\n vertical_justification=\"top\")\n ]\n\n # Add default events listener for this UI component.\n self.track.on_left_mouse_button_dragged = self.handle_move_callback\n self.handles[0].on_left_mouse_button_dragged = \\\n self.handle_move_callback\n self.handles[1].on_left_mouse_button_dragged = \\\n self.handle_move_callback\n self.handles[0].on_left_mouse_button_released = \\\n self.handle_release_callback\n self.handles[1].on_left_mouse_button_released = \\\n self.handle_release_callback\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return (self.track.actors + self.handles[0].actors +\n self.handles[1].actors + self.text[0].actors +\n self.text[1].actors)\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.track.add_to_scene(scene)\n self.handles[0].add_to_scene(scene)\n self.handles[1].add_to_scene(scene)\n self.text[0].add_to_scene(scene)\n self.text[1].add_to_scene(scene)\n\n def _get_size(self):\n # Consider the handle's size when computing the slider's size.\n width = self.track.width + 2 * self.handles[0].size[0]\n height = max(self.track.height, self.handles[0].size[1])\n return np.array([width, height])\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n # Offset the slider line by the handle's radius.\n track_position = coords + self.handles[0].size / 2.\n # Offset the slider line height by half the slider line width.\n track_position[1] -= self.track.size[1] / 2.\n self.track.position = track_position\n\n self.handles[0].position = self.handles[0].position.astype('float64')\n self.handles[1].position = self.handles[1].position.astype('float64')\n\n self.handles[0].position += coords - self.position\n self.handles[1].position += coords - self.position\n\n # Position the text below the handles.\n self.text[0].position = (self.handles[0].center[0],\n self.handles[0].position[1] - 20)\n self.text[1].position = (self.handles[1].center[0],\n self.handles[1].position[1] - 20)\n\n @property\n def left_x_position(self):\n return self.track.position[0]\n\n @property\n def right_x_position(self):\n return self.track.position[0] + self.track.size[0]\n\n def value_to_ratio(self, value):\n \"\"\" Converts the value of a disk to the ratio\n\n Parameters\n ----------\n value : float\n \"\"\"\n value_range = self.max_value - self.min_value\n return (value - self.min_value) / value_range\n\n def ratio_to_coord(self, ratio):\n \"\"\" Converts the ratio to the absolute coordinate.\n\n Parameters\n ----------\n ratio : float\n \"\"\"\n return self.left_x_position + ratio * self.track.width\n\n def coord_to_ratio(self, coord):\n \"\"\" Converts the x coordinate of a disk to the ratio\n\n Parameters\n ----------\n coord : float\n \"\"\"\n return (coord - self.left_x_position) / self.track.width\n\n def ratio_to_value(self, ratio):\n \"\"\" Converts the ratio to the value of the disk.\n\n Parameters\n ----------\n ratio : float\n \"\"\"\n value_range = self.max_value - self.min_value\n return self.min_value + ratio * value_range\n\n def set_position(self, position, disk_number):\n \"\"\" Sets the disk's position.\n\n Parameters\n ----------\n position : (float, float)\n The absolute position of the disk (x, y).\n disk_number : int\n The index of disk being moved.\n \"\"\"\n x_position = position[0]\n\n if disk_number == 0 and x_position >= self.handles[1].center[0]:\n x_position = self.ratio_to_coord(\n self.value_to_ratio(self._values[1] - 1))\n\n if disk_number == 1 and x_position <= self.handles[0].center[0]:\n x_position = self.ratio_to_coord(\n self.value_to_ratio(self._values[0] + 1))\n\n x_position = max(x_position, self.left_x_position)\n x_position = min(x_position, self.right_x_position)\n\n self.handles[disk_number].center = (x_position, self.track.center[1])\n self.update(disk_number)\n\n @property\n def left_disk_value(self):\n \"\"\" Returns the value of the left disk. \"\"\"\n return self._values[0]\n\n @left_disk_value.setter\n def left_disk_value(self, left_disk_value):\n \"\"\" Sets the value of the left disk.\n\n Parameters\n ----------\n left_disk_value : New value for the left disk.\n \"\"\"\n self.left_disk_ratio = self.value_to_ratio(left_disk_value)\n\n @property\n def right_disk_value(self):\n \"\"\" Returns the value of the right disk. \"\"\"\n return self._values[1]\n\n @right_disk_value.setter\n def right_disk_value(self, right_disk_value):\n \"\"\" Sets the value of the right disk.\n\n Parameters\n ----------\n right_disk_value : New value for the right disk.\n \"\"\"\n self.right_disk_ratio = self.value_to_ratio(right_disk_value)\n\n @property\n def left_disk_ratio(self):\n \"\"\" Returns the ratio of the left disk. \"\"\"\n return self._ratio[0]\n\n @left_disk_ratio.setter\n def left_disk_ratio(self, left_disk_ratio):\n \"\"\" Sets the ratio of the left disk.\n\n Parameters\n ----------\n left_disk_ratio : New ratio for the left disk.\n \"\"\"\n position_x = self.ratio_to_coord(left_disk_ratio)\n self.set_position((position_x, None), 0)\n\n @property\n def right_disk_ratio(self):\n \"\"\" Returns the ratio of the right disk. \"\"\"\n return self._ratio[1]\n\n @right_disk_ratio.setter\n def right_disk_ratio(self, right_disk_ratio):\n \"\"\" Sets the ratio of the right disk.\n\n Parameters\n ----------\n right_disk_ratio : New ratio for the right disk.\n \"\"\"\n position_x = self.ratio_to_coord(right_disk_ratio)\n self.set_position((position_x, None), 1)\n\n def format_text(self, disk_number):\n \"\"\" Returns formatted text to display along the slider.\n\n Parameters\n ----------\n disk_number : Index of the disk.\n \"\"\"\n if callable(self.text_template):\n return self.text_template(self)\n\n return self.text_template.format(value=self._values[disk_number])\n\n def on_change(self, slider):\n pass\n\n def update(self, disk_number):\n \"\"\" Updates the slider.\n\n Parameters\n ----------\n disk_number : Index of the disk to be updated.\n \"\"\"\n\n # Compute the ratio determined by the position of the slider disk.\n self._ratio[disk_number] = self.coord_to_ratio(\n self.handles[disk_number].center[0])\n\n # Compute the selected value considering min_value and max_value.\n self._values[disk_number] = self.ratio_to_value(\n self._ratio[disk_number])\n\n # Update text.\n text = self.format_text(disk_number)\n self.text[disk_number].message = text\n\n self.text[disk_number].position = (\n self.handles[disk_number].center[0],\n self.text[disk_number].position[1])\n self.on_change(self)\n\n def handle_move_callback(self, i_ren, vtkactor, _slider):\n \"\"\" Actual handle movement.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n vtkactor : :class:`vtkActor`\n The picked actor\n _slider : :class:`LineDoubleSlider2D`\n \"\"\"\n\n position = i_ren.event.position\n if vtkactor == self.handles[0].actors[0]:\n self.set_position(position, 0)\n self.handles[0].color = self.active_color\n elif vtkactor == self.handles[1].actors[0]:\n self.set_position(position, 1)\n self.handles[1].color = self.active_color\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def handle_release_callback(self, i_ren, vtkactor, _slider):\n \"\"\" Change color when handle is released.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n vtkactor : :class:`vtkActor`\n The picked actor\n _slider : :class:`LineDoubleSlider2D`\n \"\"\"\n if vtkactor == self.handles[0].actors[0]:\n self.handles[0].color = self.default_color\n elif vtkactor == self.handles[1].actors[0]:\n self.handles[1].color = self.default_color\n i_ren.force_render()\n\n\nclass RingSlider2D(UI):\n \"\"\" A disk slider.\n\n A disk moves along the boundary of a ring.\n Goes from 0-360 degrees.\n\n Attributes\n ----------\n mid_track_radius: float\n Distance from the center of the slider to the middle of the track.\n previous_value: float\n Value of Rotation of the actor before the current value.\n track : :class:`Disk2D`\n The circle on which the slider's handle moves.\n handle : :class:`Disk2D`\n The moving part of the slider.\n text : :class:`TextBlock2D`\n The text that shows percentage.\n default_color : (float, float, float)\n Color of the handle when in unpressed state.\n active_color : (float, float, float)\n Color of the handle when it is pressed.\n \"\"\"\n def __init__(self, center=(0, 0),\n initial_value=180, min_value=0, max_value=360,\n slider_inner_radius=40, slider_outer_radius=44,\n handle_inner_radius=0, handle_outer_radius=10,\n font_size=16,\n text_template=\"{ratio:.0%}\"):\n \"\"\"\n Parameters\n ----------\n center : (float, float)\n Position (x, y) of the slider's center.\n initial_value : float\n Initial value of the slider.\n min_value : float\n Minimum value of the slider.\n max_value : float\n Maximum value of the slider.\n slider_inner_radius : int\n Inner radius of the base disk.\n slider_outer_radius : int\n Outer radius of the base disk.\n handle_outer_radius : int\n Outer radius of the slider's handle.\n handle_inner_radius : int\n Inner radius of the slider's handle.\n font_size : int\n Size of the text to display alongside the slider (pt).\n text_template : str, callable\n If str, text template can contain one or multiple of the\n replacement fields: `{value:}`, `{ratio:}`, `{angle:}`.\n If callable, this instance of `:class:RingSlider2D` will be\n passed as argument to the text template function.\n \"\"\"\n self.default_color = (1, 1, 1)\n self.active_color = (0, 0, 1)\n super(RingSlider2D, self).__init__()\n\n self.track.inner_radius = slider_inner_radius\n self.track.outer_radius = slider_outer_radius\n self.handle.inner_radius = handle_inner_radius\n self.handle.outer_radius = handle_outer_radius\n self.center = center\n\n self.min_value = min_value\n self.max_value = max_value\n self.text.font_size = font_size\n self.text_template = text_template\n\n # Offer some standard hooks to the user.\n self.on_change = lambda ui: None\n\n self._value = initial_value\n self.value = initial_value\n self._previous_value = initial_value\n self._angle = 0\n self._ratio = self.angle / TWO_PI\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the slider's circle (Disk2D), the handle (Disk2D) and\n the text (TextBlock2D).\n \"\"\"\n # Slider's track.\n self.track = Disk2D(outer_radius=1)\n self.track.color = (1, 0, 0)\n\n # Slider's handle.\n self.handle = Disk2D(outer_radius=1)\n self.handle.color = self.default_color\n\n # Slider Text\n self.text = TextBlock2D(justification=\"center\",\n vertical_justification=\"middle\")\n\n # Add default events listener for this UI component.\n self.track.on_left_mouse_button_pressed = self.track_click_callback\n self.track.on_left_mouse_button_dragged = self.handle_move_callback\n self.track.on_left_mouse_button_released = \\\n self.handle_release_callback\n self.handle.on_left_mouse_button_dragged = self.handle_move_callback\n self.handle.on_left_mouse_button_released = \\\n self.handle_release_callback\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.track.actors + self.handle.actors + self.text.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.track.add_to_scene(scene)\n self.handle.add_to_scene(scene)\n self.text.add_to_scene(scene)\n\n def _get_size(self):\n return self.track.size + self.handle.size\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.track.position = coords + self.handle.size / 2.\n self.handle.position += coords - self.position\n # Position the text in the center of the slider's track.\n self.text.position = coords + self.size / 2.\n\n @property\n def mid_track_radius(self):\n return (self.track.inner_radius + self.track.outer_radius) / 2.\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n value_range = self.max_value - self.min_value\n self.ratio = (value - self.min_value) / value_range\n\n @property\n def previous_value(self):\n return self._previous_value\n\n @property\n def ratio(self):\n return self._ratio\n\n @ratio.setter\n def ratio(self, ratio):\n self.angle = ratio * TWO_PI\n\n @property\n def angle(self):\n \"\"\" Angle (in rad) the handle makes with x-axis \"\"\"\n return self._angle\n\n @angle.setter\n def angle(self, angle):\n self._angle = angle % TWO_PI # Wraparound\n self.update()\n\n def format_text(self):\n \"\"\" Returns formatted text to display along the slider. \"\"\"\n if callable(self.text_template):\n return self.text_template(self)\n\n return self.text_template.format(ratio=self.ratio, value=self.value,\n angle=np.rad2deg(self.angle))\n\n def update(self):\n \"\"\" Updates the slider. \"\"\"\n\n # Compute the ratio determined by the position of the slider disk.\n self._ratio = self.angle / TWO_PI\n\n # Compute the selected value considering min_value and max_value.\n value_range = self.max_value - self.min_value\n self._previous_value = self.value\n self._value = self.min_value + self.ratio * value_range\n\n # Update text disk actor.\n x = self.mid_track_radius * np.cos(self.angle) + self.center[0]\n y = self.mid_track_radius * np.sin(self.angle) + self.center[1]\n self.handle.center = (x, y)\n\n # Update text.\n text = self.format_text()\n self.text.message = text\n\n self.on_change(self) # Call hook.\n\n def move_handle(self, click_position):\n \"\"\"Moves the slider's handle.\n\n Parameters\n ----------\n click_position: (float, float)\n Position of the mouse click.\n \"\"\"\n x, y = np.array(click_position) - self.center\n angle = np.arctan2(y, x)\n if angle < 0:\n angle += TWO_PI\n\n self.angle = angle\n\n def track_click_callback(self, i_ren, _obj, _slider):\n \"\"\" Update disk position and grab the focus.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n obj : :class:`vtkActor`\n The picked actor\n _slider : :class:`RingSlider2D`\n \"\"\"\n click_position = i_ren.event.position\n self.move_handle(click_position=click_position)\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def handle_move_callback(self, i_ren, _obj, _slider):\n \"\"\" Move the slider's handle.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n obj : :class:`vtkActor`\n The picked actor\n _slider : :class:`RingSlider2D`\n \"\"\"\n click_position = i_ren.event.position\n self.handle.color = self.active_color\n self.move_handle(click_position=click_position)\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def handle_release_callback(self, i_ren, _obj, _slider):\n \"\"\" Change color when handle is released.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n vtkactor : :class:`vtkActor`\n The picked actor\n _slider : :class:`RingSlider2D`\n \"\"\"\n self.handle.color = self.default_color\n i_ren.force_render()\n\n\nclass RangeSlider(UI):\n\n \"\"\" A set of a LineSlider2D and a LineDoubleSlider2D.\n The double slider is used to set the min and max value\n for the LineSlider2D\n\n Attributes\n ----------\n range_slider_center : (float, float)\n Center of the LineDoubleSlider2D object.\n value_slider_center : (float, float)\n Center of the LineSlider2D object.\n range_slider : :class:`LineDoubleSlider2D`\n The line slider which sets the min and max values\n value_slider : :class:`LineSlider2D`\n The line slider which sets the value\n\n \"\"\"\n def __init__(self, line_width=5, inner_radius=0, outer_radius=10,\n handle_side=20, range_slider_center=(450, 400),\n value_slider_center=(450, 300), length=200, min_value=0,\n max_value=100, font_size=16, range_precision=1,\n value_precision=2, shape=\"disk\"):\n \"\"\"\n Parameters\n ----------\n line_width : int\n Width of the slider tracks\n inner_radius : int\n Inner radius of the handles.\n outer_radius : int\n Outer radius of the handles.\n handle_side : int\n Side length of the handles (if square).\n range_slider_center : (float, float)\n Center of the LineDoubleSlider2D object.\n value_slider_center : (float, float)\n Center of the LineSlider2D object.\n length : int\n Length of the sliders.\n min_value : float\n Minimum value of the double slider.\n max_value : float\n Maximum value of the double slider.\n font_size : int\n Size of the text to display alongside the sliders (pt).\n range_precision : int\n Number of decimal places to show the min and max values set.\n value_precision : int\n Number of decimal places to show the value set on slider.\n shape : string\n Describes the shape of the handle.\n Currently supports 'disk' and 'square'.\n \"\"\"\n self.min_value = min_value\n self.max_value = max_value\n self.inner_radius = inner_radius\n self.outer_radius = outer_radius\n self.handle_side = handle_side\n self.length = length\n self.line_width = line_width\n self.font_size = font_size\n self.shape = shape\n\n self.range_slider_text_template = \\\n \"{value:.\" + str(range_precision) + \"f}\"\n self.value_slider_text_template = \\\n \"{value:.\" + str(value_precision) + \"f}\"\n\n self.range_slider_center = range_slider_center\n self.value_slider_center = value_slider_center\n super(RangeSlider, self).__init__()\n\n def _setup(self):\n \"\"\" Setup this UI component.\n \"\"\"\n self.range_slider = \\\n LineDoubleSlider2D(line_width=self.line_width,\n inner_radius=self.inner_radius,\n outer_radius=self.outer_radius,\n handle_side=self.handle_side,\n center=self.range_slider_center,\n length=self.length, min_value=self.min_value,\n max_value=self.max_value,\n initial_values=(self.min_value,\n self.max_value),\n font_size=self.font_size, shape=self.shape,\n text_template=self.range_slider_text_template)\n\n self.value_slider = \\\n LineSlider2D(line_width=self.line_width, length=self.length,\n inner_radius=self.inner_radius,\n outer_radius=self.outer_radius,\n handle_side=self.handle_side,\n center=self.value_slider_center,\n min_value=self.min_value, max_value=self.max_value,\n initial_value=(self.min_value + self.max_value) / 2,\n font_size=self.font_size, shape=self.shape,\n text_template=self.value_slider_text_template)\n\n # Add default events listener for this UI component.\n self.range_slider.handles[0].on_left_mouse_button_dragged = \\\n self.range_slider_handle_move_callback\n self.range_slider.handles[1].on_left_mouse_button_dragged = \\\n self.range_slider_handle_move_callback\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.range_slider.actors + self.value_slider.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.range_slider.add_to_scene(scene)\n self.value_slider.add_to_scene(scene)\n\n def _get_size(self):\n return self.range_slider.size + self.value_slider.size\n\n def _set_position(self, coords):\n pass\n\n def range_slider_handle_move_callback(self, i_ren, obj, _slider):\n \"\"\" Actual movement of range_slider's handles.\n\n Parameters\n ----------\n i_ren : :class:`CustomInteractorStyle`\n obj : :class:`vtkActor`\n The picked actor\n _slider : :class:`RangeSlider`\n\n \"\"\"\n position = i_ren.event.position\n if obj == self.range_slider.handles[0].actors[0]:\n self.range_slider.handles[0].color = \\\n self.range_slider.active_color\n self.range_slider.set_position(position, 0)\n self.value_slider.min_value = self.range_slider.left_disk_value\n self.value_slider.update()\n elif obj == self.range_slider.handles[1].actors[0]:\n self.range_slider.handles[1].color = \\\n self.range_slider.active_color\n self.range_slider.set_position(position, 1)\n self.value_slider.max_value = self.range_slider.right_disk_value\n self.value_slider.update()\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n\nclass ImageContainer2D(UI):\n \"\"\" A 2D container to hold an image.\n Currently Supports:\n - png and jpg/jpeg images\n\n Attributes\n ----------\n size: (float, float)\n Image size (width, height) in pixels.\n img : vtkImageDataGeometryFilters\n The image loaded from the specified path.\n\n \"\"\"\n\n def __init__(self, img_path, position=(0, 0), size=(100, 100)):\n \"\"\"\n Parameters\n ----------\n img_path : string\n Path of the image\n position : (float, float), optional\n Absolute coordinates (x, y) of the lower-left corner of the image.\n size : (int, int), optional\n Width and height in pixels of the image.\n \"\"\"\n super(ImageContainer2D, self).__init__(position)\n self.img = load_image(img_path, as_vtktype=True)\n self.set_img(self.img)\n self.resize(size)\n\n def _get_size(self):\n lower_left_corner = self.texture_points.GetPoint(0)\n upper_right_corner = self.texture_points.GetPoint(2)\n size = np.array(upper_right_corner) - np.array(lower_left_corner)\n return abs(size[:2])\n\n def _setup(self):\n \"\"\" Setup this UI Component.\n Return an image as a 2D actor with a specific position.\n\n Returns\n -------\n :class:`vtkTexturedActor2D`\n \"\"\"\n self.texture_polydata = vtk.vtkPolyData()\n self.texture_points = vtk.vtkPoints()\n self.texture_points.SetNumberOfPoints(4)\n\n polys = vtk.vtkCellArray()\n polys.InsertNextCell(4)\n polys.InsertCellPoint(0)\n polys.InsertCellPoint(1)\n polys.InsertCellPoint(2)\n polys.InsertCellPoint(3)\n self.texture_polydata.SetPolys(polys)\n\n tc = vtk.vtkFloatArray()\n tc.SetNumberOfComponents(2)\n tc.SetNumberOfTuples(4)\n tc.InsertComponent(0, 0, 0.0)\n tc.InsertComponent(0, 1, 0.0)\n tc.InsertComponent(1, 0, 1.0)\n tc.InsertComponent(1, 1, 0.0)\n tc.InsertComponent(2, 0, 1.0)\n tc.InsertComponent(2, 1, 1.0)\n tc.InsertComponent(3, 0, 0.0)\n tc.InsertComponent(3, 1, 1.0)\n self.texture_polydata.GetPointData().SetTCoords(tc)\n\n texture_mapper = vtk.vtkPolyDataMapper2D()\n texture_mapper = set_input(texture_mapper, self.texture_polydata)\n\n image = vtk.vtkTexturedActor2D()\n image.SetMapper(texture_mapper)\n\n self.texture = vtk.vtkTexture()\n image.SetTexture(self.texture)\n\n image_property = vtk.vtkProperty2D()\n image_property.SetOpacity(1.0)\n image.SetProperty(image_property)\n self.actor = image\n\n # Add default events listener to the VTK actor.\n self.handle_events(self.actor)\n\n def _get_actors(self):\n \"\"\" Returns the actors that compose this UI component.\n \"\"\"\n return [self.actor]\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n scene.add(self.actor)\n\n def resize(self, size):\n \"\"\" Resize the image.\n\n Parameters\n ----------\n size : (float, float)\n image size (width, height) in pixels.\n \"\"\"\n # Update actor.\n self.texture_points.SetPoint(0, 0, 0, 0.0)\n self.texture_points.SetPoint(1, size[0], 0, 0.0)\n self.texture_points.SetPoint(2, size[0], size[1], 0.0)\n self.texture_points.SetPoint(3, 0, size[1], 0.0)\n self.texture_polydata.SetPoints(self.texture_points)\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.actor.SetPosition(*coords)\n\n def scale(self, factor):\n \"\"\" Scales the image.\n\n Parameters\n ----------\n factor : (float, float)\n Scaling factor (width, height) in pixels.\n \"\"\"\n self.resize(self.size * factor)\n\n def set_img(self, img):\n \"\"\" Modifies the image used by the vtkTexturedActor2D.\n\n Parameters\n ----------\n img : imageDataGeometryFilter\n\n \"\"\"\n self.texture = set_input(self.texture, img)\n\n\nclass Option(UI):\n\n \"\"\"\n A set of a Button2D and a TextBlock2D to act as a single option\n for checkboxes and radio buttons.\n Clicking the button toggles its checked/unchecked status.\n\n Attributes\n ----------\n label : str\n The label for the option.\n font_size : int\n Font Size of the label.\n \"\"\"\n\n def __init__(self, label, position=(0, 0), font_size=18):\n \"\"\"\n Parameters\n ----------\n label : str\n Text to be displayed next to the option's button.\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of\n the button of the option.\n font_size : int\n Font size of the label.\n \"\"\"\n self.label = label\n self.font_size = font_size\n self.checked = False\n self.button_size = (font_size * 1.2, font_size * 1.2)\n self.button_label_gap = 10\n super(Option, self).__init__(position)\n\n # Offer some standard hooks to the user.\n self.on_change = lambda obj: None\n\n def _setup(self):\n \"\"\" Setup this UI component.\n \"\"\"\n # Option's button\n self.button_icons = []\n self.button_icons.append(('unchecked',\n read_viz_icons(fname=\"stop2.png\")))\n self.button_icons.append(('checked',\n read_viz_icons(fname=\"checkmark.png\")))\n self.button = Button2D(icon_fnames=self.button_icons,\n size=self.button_size)\n\n self.text = TextBlock2D(text=self.label, font_size=self.font_size)\n\n # Add callbacks\n self.button.on_left_mouse_button_clicked = self.toggle\n self.text.on_left_mouse_button_clicked = self.toggle\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.button.actors + self.text.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.button.add_to_scene(scene)\n self.text.add_to_scene(scene)\n\n def _get_size(self):\n width = self.button.size[0] + self.button_label_gap + self.text.size[0]\n height = max(self.button.size[1], self.text.size[1])\n return np.array([width, height])\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n num_newlines = self.label.count('\\n')\n self.button.position = coords + \\\n (0, num_newlines * self.font_size * 0.5)\n offset = (self.button.size[0] + self.button_label_gap, 0)\n self.text.position = coords + offset\n\n def toggle(self, i_ren, _obj, _element):\n if self.checked:\n self.deselect()\n else:\n self.select()\n\n self.on_change(self)\n i_ren.force_render()\n\n def select(self):\n self.checked = True\n self.button.set_icon_by_name(\"checked\")\n\n def deselect(self):\n self.checked = False\n self.button.set_icon_by_name(\"unchecked\")\n\n\nclass Checkbox(UI):\n\n \"\"\" A 2D set of :class:'Option' objects.\n Multiple options can be selected.\n\n Attributes\n ----------\n labels : list(string)\n List of labels of each option.\n options : list(Option)\n List of all the options in the checkbox set.\n padding : float\n Distance between two adjacent options\n \"\"\"\n\n def __init__(self, labels, padding=1, font_size=18,\n font_family='Arial', position=(0, 0)):\n \"\"\"\n Parameters\n ----------\n labels : list(string)\n List of labels of each option.\n padding : float\n The distance between two adjacent options\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of\n the button of the first option.\n \"\"\"\n self.labels = list(reversed(labels))\n self._padding = padding\n self._font_size = font_size\n self.font_family = font_family\n super(Checkbox, self).__init__(position)\n self.on_change = lambda checkbox: None\n self.checked = []\n\n def _setup(self):\n \"\"\" Setup this UI component.\n \"\"\"\n self.options = []\n button_y = self.position[1]\n for label in self.labels:\n option = Option(label=label,\n font_size=self.font_size,\n position=(self.position[0], button_y))\n line_spacing = option.text.actor.GetTextProperty().GetLineSpacing()\n button_y = button_y + self.font_size * \\\n (label.count('\\n') + 1) * (line_spacing + 0.1) + self.padding\n self.options.append(option)\n\n # Set callback\n option.on_change = self._handle_option_change\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n actors = []\n for option in self.options:\n actors = actors + option.actors\n return actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n for option in self.options:\n option.add_to_scene(scene)\n\n def _get_size(self):\n option_width, option_height = self.options[0].get_size()\n height = len(self.labels) * (option_height + self.padding) \\\n - self.padding\n return np.asarray([option_width, height])\n\n def _handle_option_change(self, option):\n \"\"\" Reacts whenever an option changes.\n\n Parameters\n ----------\n option : :class:`Option`\n \"\"\"\n if option.checked:\n self.checked.append(option.label)\n else:\n self.checked.remove(option.label)\n\n self.on_change(self)\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n button_y = coords[1]\n for option_no, option in enumerate(self.options):\n option.position = (coords[0], button_y)\n line_spacing = option.text.actor.GetTextProperty().GetLineSpacing()\n button_y = (button_y + self.font_size\n * (self.labels[option_no].count('\\n') + 1)\n * (line_spacing + 0.1) + self.padding)\n\n @property\n def font_size(self):\n \"\"\" Gets the font size of text.\n \"\"\"\n return self._font_size\n\n @property\n def padding(self):\n \"\"\" Gets the padding between options.\n \"\"\"\n return self._padding\n\n\nclass RadioButton(Checkbox):\n \"\"\" A 2D set of :class:'Option' objects.\n Only one option can be selected.\n\n Attributes\n ----------\n labels : list(string)\n List of labels of each option.\n options : list(Option)\n List of all the options in the checkbox set.\n padding : float\n Distance between two adjacent options\n \"\"\"\n\n def __init__(self, labels, padding=1, font_size=18,\n font_family='Arial', position=(0, 0)):\n \"\"\"\n Parameters\n ----------\n labels : list(string)\n List of labels of each option.\n padding : float\n The distance between two adjacent options\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of\n the button of the first option.\n \"\"\"\n super(RadioButton, self).__init__(labels=labels, position=position,\n padding=padding,\n font_size=font_size,\n font_family=font_family)\n\n def _handle_option_change(self, option):\n for option_ in self.options:\n option_.deselect()\n\n option.select()\n self.checked = [option.label]\n self.on_change(self)\n\n\nclass ListBox2D(UI):\n \"\"\" UI component that allows the user to select items from a list.\n\n Attributes\n ----------\n on_change: function\n Callback function for when the selected items have changed.\n \"\"\"\n\n def __init__(self, values, position=(0, 0), size=(100, 300),\n multiselection=True, reverse_scrolling=False,\n font_size=20, line_spacing=1.4,\n text_color=(0.2, 0.2, 0.2),\n selected_color=(0.9, 0.6, 0.6),\n unselected_color=(0.6, 0.6, 0.6),\n scroll_bar_active_color=(0.6, 0.2, 0.2),\n scroll_bar_inactive_color=(0.9, 0.0, 0.0),\n background_opacity=1.):\n \"\"\"\n\n Parameters\n ----------\n values: list of objects\n Values used to populate this listbox. Objects must be castable\n to string.\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of this\n UI component.\n size : (int, int)\n Width and height in pixels of this UI component.\n multiselection: {True, False}\n Whether multiple values can be selected at once.\n reverse_scrolling: {True, False}\n If True, scrolling up will move the list of files down.\n font_size: int\n The font size in pixels.\n line_spacing: float\n Distance between listbox's items in pixels.\n text_color : tuple of 3 floats\n selected_color : tuple of 3 floats\n unselected_color : tuple of 3 floats\n scroll_bar_active_color : tuple of 3 floats\n scroll_bar_inactive_color : tuple of 3 floats\n background_opacity : float\n \"\"\"\n self.view_offset = 0\n self.slots = []\n self.selected = []\n\n self.panel_size = size\n self.font_size = font_size\n self.line_spacing = line_spacing\n self.slot_height = int(self.font_size * self.line_spacing)\n\n self.text_color = text_color\n self.selected_color = selected_color\n self.unselected_color = unselected_color\n self.background_opacity = background_opacity\n\n # self.panel.resize(size)\n self.values = values\n self.multiselection = multiselection\n self.last_selection_idx = 0\n self.reverse_scrolling = reverse_scrolling\n super(ListBox2D, self).__init__()\n\n denom = len(self.values) - self.nb_slots\n if not denom:\n denom += 1\n self.scroll_step_size = (self.slot_height * self.nb_slots -\n self.scroll_bar.height) / denom\n\n self.scroll_bar_active_color = scroll_bar_active_color\n self.scroll_bar_inactive_color = scroll_bar_inactive_color\n self.scroll_bar.color = self.scroll_bar_inactive_color\n self.scroll_bar.opacity = self.background_opacity\n\n self.position = position\n self.scroll_init_position = 0\n self.update()\n\n # Offer some standard hooks to the user.\n self.on_change = lambda: None\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the ListBox (Panel2D) filled with empty slots (ListBoxItem2D).\n \"\"\"\n self.margin = 10\n size = self.panel_size\n font_size = self.font_size\n # Calculating the number of slots.\n self.nb_slots = int((size[1] - 2 * self.margin) // self.slot_height)\n\n # This panel facilitates adding slots at the right position.\n self.panel = Panel2D(size=size, color=(1, 1, 1))\n\n # Add a scroll bar\n scroll_bar_height = self.nb_slots * (size[1] - 2 * self.margin) \\\n / len(self.values)\n self.scroll_bar = Rectangle2D(size=(int(size[0]/20),\n scroll_bar_height))\n if len(self.values) <= self.nb_slots:\n self.scroll_bar.set_visibility(False)\n self.panel.add_element(\n self.scroll_bar, size - self.scroll_bar.size - self.margin)\n\n # Initialisation of empty text actors\n slot_width = size[0] - self.scroll_bar.size[0] - \\\n 2 * self.margin - self.margin\n x = self.margin\n y = size[1] - self.margin\n for _ in range(self.nb_slots):\n y -= self.slot_height\n item = ListBoxItem2D(list_box=self,\n size=(slot_width, self.slot_height),\n text_color=self.text_color,\n selected_color=self.selected_color,\n unselected_color=self.unselected_color,\n background_opacity=self.background_opacity)\n item.textblock.font_size = font_size\n self.slots.append(item)\n self.panel.add_element(item, (x, y + self.margin))\n\n # Add default events listener for this UI component.\n self.scroll_bar.on_left_mouse_button_pressed = \\\n self.scroll_click_callback\n self.scroll_bar.on_left_mouse_button_released = \\\n self.scroll_release_callback\n self.scroll_bar.on_left_mouse_button_dragged = \\\n self.scroll_drag_callback\n\n # Handle mouse wheel events on the panel.\n up_event = \"MouseWheelForwardEvent\"\n down_event = \"MouseWheelBackwardEvent\"\n if self.reverse_scrolling:\n up_event, down_event = down_event, up_event # Swap events\n\n self.add_callback(self.panel.background.actor, up_event,\n self.up_button_callback)\n self.add_callback(self.panel.background.actor, down_event,\n self.down_button_callback)\n\n # Handle mouse wheel events on the slots.\n for slot in self.slots:\n self.add_callback(slot.background.actor, up_event,\n self.up_button_callback)\n self.add_callback(slot.background.actor, down_event,\n self.down_button_callback)\n self.add_callback(slot.textblock.actor, up_event,\n self.up_button_callback)\n self.add_callback(slot.textblock.actor, down_event,\n self.down_button_callback)\n\n def resize(self, size):\n pass\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.panel.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.panel.add_to_scene(scene)\n\n def _get_size(self):\n return self.panel.size\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.panel.position = coords\n\n def up_button_callback(self, i_ren, _obj, _list_box):\n \"\"\" Pressing up button scrolls up in the combo box.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _list_box: :class:`ListBox2D`\n\n \"\"\"\n if self.view_offset > 0:\n self.view_offset -= 1\n self.update()\n scroll_bar_idx = self.panel._elements.index(self.scroll_bar)\n self.scroll_bar.center = (self.scroll_bar.center[0],\n self.scroll_bar.center[1] +\n self.scroll_step_size)\n self.panel.element_offsets[scroll_bar_idx] = (\n self.scroll_bar,\n (self.scroll_bar.position - self.panel.position))\n\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def down_button_callback(self, i_ren, _obj, _list_box):\n \"\"\" Pressing down button scrolls down in the combo box.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _list_box: :class:`ListBox2D`\n\n \"\"\"\n view_end = self.view_offset + self.nb_slots\n if view_end < len(self.values):\n self.view_offset += 1\n self.update()\n scroll_bar_idx = self.panel._elements.index(self.scroll_bar)\n self.scroll_bar.center = (self.scroll_bar.center[0],\n self.scroll_bar.center[1] -\n self.scroll_step_size)\n self.panel.element_offsets[scroll_bar_idx] = (\n self.scroll_bar,\n (self.scroll_bar.position - self.panel.position))\n\n i_ren.force_render()\n i_ren.event.abort() # Stop propagating the event.\n\n def scroll_click_callback(self, i_ren, _obj, _rect_obj):\n \"\"\" Callback to change the color of the bar when it is clicked.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _rect_obj: :class:`Rectangle2D`\n\n \"\"\"\n self.scroll_bar.color = self.scroll_bar_active_color\n self.scroll_init_position = i_ren.event.position[1]\n i_ren.force_render()\n i_ren.event.abort()\n\n def scroll_release_callback(self, i_ren, _obj, _rect_obj):\n \"\"\" Callback to change the color of the bar when it is released.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n rect_obj: :class:`Rectangle2D`\n\n \"\"\"\n self.scroll_bar.color = self.scroll_bar_inactive_color\n i_ren.force_render()\n\n def scroll_drag_callback(self, i_ren, _obj, _rect_obj):\n \"\"\" Dragging scroll bar in the combo box.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n rect_obj: :class:`Rectangle2D`\n\n \"\"\"\n position = i_ren.event.position\n offset = int((position[1] - self.scroll_init_position) /\n self.scroll_step_size)\n if offset > 0 and self.view_offset > 0:\n offset = min(offset, self.view_offset)\n\n elif offset < 0 and (\n self.view_offset + self.nb_slots < len(self.values)):\n offset = min(-offset,\n len(self.values) - self.nb_slots - self.view_offset)\n offset = - offset\n else:\n return\n\n self.view_offset -= offset\n self.update()\n scroll_bar_idx = self.panel._elements.index(self.scroll_bar)\n self.scroll_bar.center = (self.scroll_bar.center[0],\n self.scroll_bar.center[1] +\n offset * self.scroll_step_size)\n\n self.scroll_init_position += offset * self.scroll_step_size\n\n self.panel.element_offsets[scroll_bar_idx] = (\n self.scroll_bar, (self.scroll_bar.position - self.panel.position))\n i_ren.force_render()\n i_ren.event.abort()\n\n def update(self):\n \"\"\" Refresh listbox's content. \"\"\"\n view_start = self.view_offset\n view_end = view_start + self.nb_slots\n values_to_show = self.values[view_start:view_end]\n\n # Populate slots according to the view.\n for i, choice in enumerate(values_to_show):\n slot = self.slots[i]\n slot.element = choice\n slot.set_visibility(True)\n if slot.element in self.selected:\n slot.select()\n else:\n slot.deselect()\n\n # Flush remaining slots.\n for slot in self.slots[len(values_to_show):]:\n slot.element = None\n slot.set_visibility(False)\n slot.deselect()\n\n def update_scrollbar(self):\n \"\"\" Change the scroll-bar height when the values\n in the listbox change\n \"\"\"\n self.scroll_bar.set_visibility(True)\n\n self.scroll_bar.height = self.nb_slots * \\\n (self.panel_size[1] - 2 * self.margin) / len(self.values)\n\n self.scroll_step_size = (self.slot_height * self.nb_slots -\n self.scroll_bar.height) \\\n / (len(self.values) - self.nb_slots)\n\n self.panel.update_element(\n self.scroll_bar, self.panel_size - self.scroll_bar.size -\n self.margin)\n\n if len(self.values) <= self.nb_slots:\n self.scroll_bar.set_visibility(False)\n\n def clear_selection(self):\n del self.selected[:]\n\n def select(self, item, multiselect=False, range_select=False):\n \"\"\" Select the item.\n\n Parameters\n ----------\n item: ListBoxItem2D's object\n Item to select.\n multiselect: {True, False}\n If True and multiselection is allowed, the item is added to the\n selection.\n Otherwise, the selection will only contain the provided item unless\n range_select is True.\n range_select: {True, False}\n If True and multiselection is allowed, all items between the last\n selected item and the current one will be added to the selection.\n Otherwise, the selection will only contain the provided item unless\n multi_select is True.\n\n \"\"\"\n selection_idx = self.values.index(item.element)\n if self.multiselection and range_select:\n self.clear_selection()\n step = 1 if selection_idx >= self.last_selection_idx else -1\n for i in range(self.last_selection_idx,\n selection_idx + step,\n step):\n self.selected.append(self.values[i])\n\n elif self.multiselection and multiselect:\n if item.element in self.selected:\n self.selected.remove(item.element)\n else:\n self.selected.append(item.element)\n self.last_selection_idx = selection_idx\n\n else:\n self.clear_selection()\n self.selected.append(item.element)\n self.last_selection_idx = selection_idx\n\n self.on_change() # Call hook.\n self.update()\n\n\nclass ListBoxItem2D(UI):\n \"\"\" The text displayed in a listbox. \"\"\"\n\n def __init__(self, list_box, size,\n text_color=(1.0, 0.0, 0.0),\n selected_color=(0.4, 0.4, 0.4),\n unselected_color=(0.9, 0.9, 0.9),\n background_opacity=1.):\n \"\"\" Single ListBox Item\n\n Parameters\n ----------\n list_box : :class:`ListBox`\n The ListBox reference this text belongs to.\n size : tuple of 2 ints\n The size of the listbox item.\n text_color : tuple of 3 floats\n unselected_color : tuple of 3 floats\n selected_color : tuple of 3 floats\n background_opacity : float\n \"\"\"\n super(ListBoxItem2D, self).__init__()\n self._element = None\n self.list_box = list_box\n self.background.resize(size)\n self.background_opacity = background_opacity\n self.selected = False\n self.text_color = text_color\n self.textblock.color = self.text_color\n self.selected_color = selected_color\n self.unselected_color = unselected_color\n self.background.opacity = self.background_opacity\n self.deselect()\n\n def _setup(self):\n \"\"\" Setup this UI component.\n\n Create the ListBoxItem2D with its background (Rectangle2D) and its\n label (TextBlock2D).\n \"\"\"\n self.background = Rectangle2D()\n self.textblock = TextBlock2D(justification=\"left\",\n vertical_justification=\"middle\")\n\n # Add default events listener for this UI component.\n self.add_callback(self.textblock.actor, \"LeftButtonPressEvent\",\n self.left_button_clicked)\n self.add_callback(self.background.actor, \"LeftButtonPressEvent\",\n self.left_button_clicked)\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.background.actors + self.textblock.actors\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.background.add_to_scene(scene)\n self.textblock.add_to_scene(scene)\n\n def _get_size(self):\n return self.background.size\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.textblock.position = coords\n # Center background underneath the text.\n position = coords\n self.background.position = (position[0],\n position[1] - self.background.size[1] / 2.)\n\n def deselect(self):\n self.background.color = self.unselected_color\n self.textblock.bold = False\n self.selected = False\n\n def select(self):\n self.textblock.bold = True\n self.background.color = self.selected_color\n self.selected = True\n\n @property\n def element(self):\n return self._element\n\n @element.setter\n def element(self, element):\n self._element = element\n self.textblock.message = \"\" if self._element is None else str(element)\n\n def left_button_clicked(self, i_ren, _obj, _list_box_item):\n \"\"\" A callback to handle left click for this UI element.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _list_box_item: :class:`ListBoxItem2D`\n\n \"\"\"\n multiselect = i_ren.event.ctrl_key\n range_select = i_ren.event.shift_key\n self.list_box.select(self, multiselect, range_select)\n i_ren.force_render()\n\n\nclass FileMenu2D(UI):\n \"\"\" A menu to select files in the current folder.\n Can go to new folder, previous folder and select multiple files.\n\n Attributes\n ----------\n extensions: ['extension1', 'extension2', ....]\n To show all files, extensions=[\"*\"] or [\"\"]\n List of extensions to be shown as files.\n listbox : :class: 'ListBox2D'\n Container for the menu.\n\n \"\"\"\n\n def __init__(self, directory_path, extensions=None, position=(0, 0),\n size=(100, 300), multiselection=True, reverse_scrolling=False,\n font_size=20, line_spacing=1.4):\n \"\"\"\n\n Parameters\n ----------\n extensions: list(string)\n List of extensions to be shown as files.\n directory_path: string\n Path of the directory where this dialog should open.\n position : (float, float)\n Absolute coordinates (x, y) of the lower-left corner of this\n UI component.\n size : (int, int)\n Width and height in pixels of this UI component.\n multiselection: {True, False}\n Whether multiple values can be selected at once.\n reverse_scrolling: {True, False}\n If True, scrolling up will move the list of files down.\n font_size: int\n The font size in pixels.\n line_spacing: float\n Distance between listbox's items in pixels.\n \"\"\"\n self.font_size = font_size\n self.multiselection = multiselection\n self.reverse_scrolling = reverse_scrolling\n self.line_spacing = line_spacing\n self.extensions = extensions or [\"*\"]\n self.current_directory = directory_path\n self.menu_size = size\n self.directory_contents = []\n\n super(FileMenu2D, self).__init__()\n self.position = position\n self.set_slot_colors()\n\n def _setup(self):\n \"\"\" Setup this UI component.\n Create the ListBox (Panel2D) filled with empty slots (ListBoxItem2D).\n \"\"\"\n self.directory_contents = self.get_all_file_names()\n content_names = [x[0] for x in self.directory_contents]\n self.listbox = ListBox2D(\n values=content_names, multiselection=self.multiselection,\n font_size=self.font_size, line_spacing=self.line_spacing,\n reverse_scrolling=self.reverse_scrolling, size=self.menu_size)\n\n self.add_callback(self.listbox.scroll_bar.actor, \"MouseMoveEvent\",\n self.scroll_callback)\n\n # Handle mouse wheel events on the panel.\n up_event = \"MouseWheelForwardEvent\"\n down_event = \"MouseWheelBackwardEvent\"\n if self.reverse_scrolling:\n up_event, down_event = down_event, up_event # Swap events\n\n self.add_callback(self.listbox.panel.background.actor, up_event,\n self.scroll_callback)\n self.add_callback(self.listbox.panel.background.actor, down_event,\n self.scroll_callback)\n\n # Handle mouse wheel events on the slots.\n for slot in self.listbox.slots:\n self.add_callback(slot.background.actor, up_event,\n self.scroll_callback)\n self.add_callback(slot.background.actor, down_event,\n self.scroll_callback)\n self.add_callback(slot.textblock.actor, up_event,\n self.scroll_callback)\n self.add_callback(slot.textblock.actor, down_event,\n self.scroll_callback)\n slot.add_callback(slot.textblock.actor, \"LeftButtonPressEvent\",\n self.directory_click_callback)\n slot.add_callback(slot.background.actor, \"LeftButtonPressEvent\",\n self.directory_click_callback)\n\n def _get_actors(self):\n \"\"\" Get the actors composing this UI component.\n \"\"\"\n return self.listbox.actors\n\n def resize(self, size):\n pass\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n self.listbox.position = coords\n\n def _add_to_scene(self, scene):\n \"\"\" Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n \"\"\"\n self.listbox.add_to_scene(scene)\n\n def _get_size(self):\n return self.listbox.size\n\n def get_all_file_names(self):\n \"\"\" Gets file and directory names.\n\n Returns\n -------\n all_file_names: list((string, {\"directory\", \"file\"}))\n List of all file and directory names as string.\n \"\"\"\n all_file_names = []\n\n directory_names = self.get_directory_names()\n for directory_name in directory_names:\n all_file_names.append((directory_name, \"directory\"))\n\n file_names = self.get_file_names()\n for file_name in file_names:\n all_file_names.append((file_name, \"file\"))\n\n return all_file_names\n\n def get_directory_names(self):\n \"\"\" Finds names of all directories in the current_directory\n\n Returns\n -------\n directory_names: list(string)\n List of all directory names as string.\n \"\"\"\n # A list of directory names in the current directory\n directory_names = []\n for (_, dirnames, _) in os.walk(self.current_directory):\n directory_names += dirnames\n break\n directory_names.sort(key=lambda s: s.lower())\n directory_names.insert(0, \"../\")\n return directory_names\n\n def get_file_names(self):\n \"\"\" Finds names of all files in the current_directory\n\n Returns\n -------\n file_names: list(string)\n List of all file names as string.\n \"\"\"\n # A list of file names with extension in the current directory\n for (_, _, files) in os.walk(self.current_directory):\n break\n\n file_names = []\n if \"*\" in self.extensions or \"\" in self.extensions:\n file_names = files\n else:\n for ext in self.extensions:\n for file in files:\n if file.endswith(\".\" + ext):\n file_names.append(file)\n file_names.sort(key=lambda s: s.lower())\n return file_names\n\n def set_slot_colors(self):\n \"\"\" Sets the text color of the slots based on the type of element\n they show. Blue for directories and green for files.\n \"\"\"\n for idx, slot in enumerate(self.listbox.slots):\n list_idx = min(self.listbox.view_offset + idx,\n len(self.directory_contents)-1)\n if self.directory_contents[list_idx][1] == \"directory\":\n slot.textblock.color = (0, 0.6, 0)\n elif self.directory_contents[list_idx][1] == \"file\":\n slot.textblock.color = (0, 0, 0.7)\n\n def scroll_callback(self, i_ren, _obj, _filemenu_item):\n \"\"\" A callback to handle scroll and change the slot text colors.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n _filemenu_item: :class:`FileMenu2D`\n \"\"\"\n self.set_slot_colors()\n i_ren.force_render()\n i_ren.event.abort()\n\n def directory_click_callback(self, i_ren, _obj, listboxitem):\n \"\"\" A callback to move into a directory if it has been clicked.\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n listboxitem: :class:`ListBoxItem2D`\n \"\"\"\n if (listboxitem.element, \"directory\") in self.directory_contents:\n new_directory_path = os.path.join(self.current_directory,\n listboxitem.element)\n if os.access(new_directory_path, os.R_OK):\n self.current_directory = new_directory_path\n self.directory_contents = self.get_all_file_names()\n content_names = [x[0] for x in self.directory_contents]\n self.listbox.clear_selection()\n self.listbox.values = content_names\n self.listbox.view_offset = 0\n self.listbox.update()\n self.listbox.update_scrollbar()\n self.set_slot_colors()\n i_ren.force_render()\n i_ren.event.abort()\n\n\nclass GridUI(UI):\n \"\"\" Add actors in a grid and interact with them individually.\n \"\"\"\n\n def __init__(self,\n actors, captions=None, caption_offset=(0, -100, 0),\n cell_padding=0,\n cell_shape=\"rect\", aspect_ratio=16/9., dim=None,\n rotation_speed=1, rotation_axis=(0, 1, 0)):\n\n # TODO: add rotation axis None by default\n\n self.container = grid(actors, captions=captions,\n caption_offset=caption_offset,\n cell_padding=cell_padding,\n cell_shape=cell_shape,\n aspect_ratio=aspect_ratio, dim=dim)\n self._actors = []\n self._actors_dict = {}\n self.rotation_speed = rotation_speed\n self.rotation_axis = rotation_axis\n\n for item in self.container._items:\n self._actors.append(item._items[0])\n self._actors_dict[item._items[0]] = {'x': -np.inf, 'y': -np.inf}\n\n super(GridUI, self).__init__(position=(0, 0, 0))\n\n def _get_size(self):\n return\n\n @staticmethod\n def left_click_callback(istyle, _obj, _what):\n istyle.trackball_actor.OnLeftButtonDown()\n istyle.force_render()\n istyle.event.abort()\n\n @staticmethod\n def left_release_callback(istyle, _obj, _what):\n\n istyle.trackball_actor.OnLeftButtonUp()\n istyle.force_render()\n istyle.event.abort()\n\n @staticmethod\n def mouse_move_callback(istyle, _obj, _what):\n istyle.trackball_actor.OnMouseMove()\n istyle.force_render()\n istyle.event.abort()\n\n @staticmethod\n def left_click_callback2(istyle, obj, self):\n\n rx, ry, rz = self.rotation_axis\n clockwise_rotation = np.array([self.rotation_speed,\n rx, ry, rz])\n rotate(obj, clockwise_rotation)\n\n istyle.force_render()\n istyle.event.abort()\n\n @staticmethod\n def left_release_callback2(istyle, _obj, _what):\n\n istyle.force_render()\n istyle.event.abort()\n\n @staticmethod\n def mouse_move_callback2(istyle, obj, self):\n\n if self._actors_dict[obj]['y'] == - np.inf:\n\n iren = istyle.GetInteractor()\n event_pos = iren.GetEventPosition()\n self._actors_dict[obj]['y'] = event_pos[1]\n\n else:\n\n iren = istyle.GetInteractor()\n event_pos = iren.GetEventPosition()\n rx, ry, rz = self.rotation_axis\n\n if event_pos[1] >= self._actors_dict[obj]['y']:\n clockwise_rotation = np.array([-self.rotation_speed,\n rx, ry, rz])\n rotate(obj, clockwise_rotation)\n else:\n anti_clockwise_rotation = np.array(\n [self.rotation_speed, rx, ry, rz])\n rotate(obj, anti_clockwise_rotation)\n\n self._actors_dict[obj]['y'] = event_pos[1]\n\n istyle.force_render()\n istyle.event.abort()\n\n ANTICLOCKWISE_ROTATION_Y = np.array([-10, 0, 1, 0])\n CLOCKWISE_ROTATION_Y = np.array([10, 0, 1, 0])\n ANTICLOCKWISE_ROTATION_X = np.array([-10, 1, 0, 0])\n CLOCKWISE_ROTATION_X = np.array([10, 1, 0, 0])\n\n def key_press_callback(self, istyle, obj, _what):\n has_changed = False\n if istyle.event.key == \"Left\":\n has_changed = True\n for a in self._actors:\n rotate(a, self.ANTICLOCKWISE_ROTATION_Y)\n elif istyle.event.key == \"Right\":\n has_changed = True\n for a in self._actors:\n rotate(a, self.CLOCKWISE_ROTATION_Y)\n elif istyle.event.key == \"Up\":\n has_changed = True\n for a in self._actors:\n rotate(a, self.ANTICLOCKWISE_ROTATION_X)\n elif istyle.event.key == \"Down\":\n has_changed = True\n for a in self._actors:\n rotate(a, self.CLOCKWISE_ROTATION_X)\n\n if has_changed:\n istyle.force_render()\n\n def _setup(self):\n \"\"\"Set up this UI component and the events of its actor\n \"\"\"\n # Add default events listener to the VTK actor.\n for actor in self._actors:\n # self.handle_events(actor)\n\n if self.rotation_axis is None:\n self.add_callback(actor, \"LeftButtonPressEvent\",\n self.left_click_callback)\n self.add_callback(actor, \"LeftButtonReleaseEvent\",\n self.left_release_callback)\n self.add_callback(actor, \"MouseMoveEvent\",\n self.mouse_move_callback)\n else:\n self.add_callback(actor, \"LeftButtonPressEvent\",\n self.left_click_callback2)\n # TODO: possibly add this too\n self.add_callback(actor, \"LeftButtonReleaseEvent\",\n self.left_release_callback2)\n self.add_callback(actor, \"MouseMoveEvent\",\n self.mouse_move_callback2)\n\n # TODO: this is currently not running\n self.add_callback(actor, \"KeyPressEvent\",\n self.key_press_callback)\n # self.on_key_press = self.key_press_callback2\n\n def _get_actors(self):\n \"\"\"Get the actors composing this UI component.\"\"\"\n return self._actors\n\n def _add_to_scene(self, scene):\n \"\"\"Add all subcomponents or VTK props that compose this UI component.\n\n Parameters\n ----------\n scene : scene\n\n \"\"\"\n self.container.add_to_scene(scene)\n\n def resize(self, size):\n \"\"\"Resize the button.\n\n Parameters\n ----------\n size : (float, float)\n Button size (width, height) in pixels.\n\n \"\"\"\n # Update actor.\n pass\n\n def _set_position(self, coords):\n \"\"\" Position the lower-left corner of this UI component.\n\n Parameters\n ----------\n coords: (float, float)\n Absolute pixel coordinates (x, y).\n \"\"\"\n # coords = (0, 0, 0)\n pass\n # self.actor.SetPosition(*coords)\n # self.container.SetPosition(*coords)\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.asarray", "numpy.rad2deg", "numpy.any", "numpy.arctan2", "numpy.cos", "numpy.issubdtype" ] ]
Romero027/OmniNet
[ "c1cda1738c80925e5468b3ffc7aae2153bcd9e62" ]
[ "libs/omninet/omninet.py" ]
[ "#\n# Copyright 2019 Subhojeet Pramanik, Aman Husain, Priyanka Agrawal\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ======================================================================\n\"\"\"\nAuthors: Subhojeet Pramanik\n\nOmniNet API\n\n\"\"\"\nimport torch\nimport os\nimport torch.nn as nn\n\nfrom .peripherals import *\nfrom .util import *\nfrom .cnp import CNP\n\nclass OmniNet(nn.Module):\n\n def __init__(self, config=None, gpu_id=-1,dropout=None):\n super(OmniNet, self).__init__()\n if config is None:\n cc, pc, d = self.__defaultconf__()\n else:\n cc, pc, d = config\n if dropout is not None:\n cc['dropout']=dropout\n pc['dropout']=dropout\n self.gpu_id = gpu_id\n tasks = {'PENN': pc['penn_output_classes'], 'HMDB':pc['hmdb_output_classes'],\n 'IMAGE_CAPTION':pc['english_language_output_vocab'],'VQA':pc['vqa_output_vocab']}\n self.cnp = CNP(tasks,conf=cc,domains=d, gpu_id=gpu_id)\n \n self.image_input_perph = ImageInputPeripheral(output_dim=cc['input_dim'],\n dropout=pc['dropout'],freeze_layers=True)\n self.english_language_perph = LanguagePeripheral(vocab_size=pc['english_language_input_vocab'],\n embed_dim=pc['english_language_input_embed'],\n output_dim=cc['input_dim'],\n lang='en',\n gpu_id=gpu_id,dropout=pc['dropout'])\n self.german_language_perph = LanguagePeripheral(vocab_size=pc['german_language_input_vocab'],\n embed_dim=pc['german_language_input_embed'],\n output_dim=cc['input_dim'],\n lang='de',\n gpu_id=gpu_id)\n def reset(self,batch_size):\n self.cnp.reset(batch_size)\n\n def encode_videos(self,videos,domain='IMAGE'):\n video_encodings = self.image_input_perph.encode(videos)\n # print(f'Video encoding output tensor shape is {video_encodings.size()}')\n print(f'Video encoding output tensor size is {get_tensor_size(video_encodings):.3f}')\n self.cnp.encode(video_encodings,domain=domain)\n\n def encode_images(self,images,domain='IMAGE'):\n image_encodings = self.image_input_perph.encode(images)\n print(f'Image encoding output tensor shape is {image_encodings.size()}')\n print(f'Image encoding output tensor size is {get_tensor_size(image_encodings):.3f}')\n self.cnp.encode(image_encodings,domain=domain)\n \n def encode_englishtexts(self,texts,domain='ENGLISH'):\n sent_encodings,input_pad_mask=self.english_language_perph.embed_sentences(texts)\n print(f'Text encoding output tensor shape is {sent_encodings.size()}')\n print(f'Text encoding output tensor size is {get_tensor_size(sent_encodings):.3f}')\n self.cnp.encode(sent_encodings, pad_mask=input_pad_mask, domain=domain)\n \n def decode_from_targets(self,task,targets,target_pad_mask=None):\n return self.cnp.decode(task, targets=targets,pad_mask=target_pad_mask)\n \n def decode_greedy(self,task, num_steps):\n return self.cnp.decode(task, targets=None, num_steps=num_steps)\n\n def save(self, checkpoint_dir, iterations):\n save_dir = os.path.join(checkpoint_dir, str(iterations))\n try:\n os.stat(save_dir)\n except:\n os.mkdir(save_dir)\n torch.save(self.state_dict(), os.path.join(save_dir, 'model.pth'))\n print('Model saved, iterations: %d' % iterations)\n\n def restore(self, checkpoint_dir, iterations):\n save_dir = os.path.join(checkpoint_dir, str(iterations), 'model.pth')\n pretrained_dict=torch.load(save_dir)\n model_dict=self.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if\n (k in model_dict) and (model_dict[k].shape == pretrained_dict[k].shape)}\n self.load_state_dict(pretrained_dict,strict=False)\n print('Restored existing model with iterations: %d' % (iterations))\n \n def restore_file(self, file):\n pretrained_dict=torch.load(file)\n model_dict=self.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if\n (k in model_dict) and (model_dict[k].shape == pretrained_dict[k].shape)}\n self.load_state_dict(pretrained_dict,strict=False)\n \n @staticmethod\n def __defaultconf__():\n \"\"\"\n The default confurigation as specified in the original paper\n\n \"\"\"\n\n cnp_conf = {\n 'input_dim':512,\n 'control_dim':32,\n 'output_dim':512,\n 'spatial_dim':512,\n 'temporal_dim':512,\n 'temporal_n_layers':6,\n 'temporal_n_heads':8,\n 'temporal_d_k':64,\n 'temporal_d_v':64,\n 'temporal_hidden_dim':2048,\n 'decoder_dim':512,\n 'decoder_n_layers':6,\n 'decoder_n_heads':8,\n 'decoder_d_k':64,\n 'decoder_d_v':64,\n 'decoder_hidden_dim':2048,\n 'max_seq_len':500,\n 'output_embedding_dim':300,\n 'dropout':0.1}\n perph_conf = {\n 'german_language_input_vocab': 25000,\n 'german_language_input_embed': 300,\n 'english_language_input_vocab': 25000,\n 'english_language_input_embed': 300,\n 'english_language_output_vocab': 25000,\n 'german_language_output_vocab': 25000,\n 'dropout': 0.1 ,\n 'vqa_output_vocab':3500,\n 'hmdb_output_classes':52,\n 'penn_output_classes':48\n }\n\n domains = ['ENGLISH','GERMAN','IMAGE']\n\n return cnp_conf, perph_conf, domains\n" ]
[ [ "torch.load" ] ]
joannetruong/habitat-lab
[ "33654923dc733f5fcea23aea6391034c3f694a67" ]
[ "habitat_baselines/rl/ppo/ppo.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch import nn as nn\nfrom torch import optim as optim\n\nfrom habitat.utils import profiling_wrapper\nfrom habitat_baselines.common.rollout_storage import RolloutStorage\nfrom habitat_baselines.rl.ppo.policy import Policy\n\nEPS_PPO = 1e-5\n\n\nclass PPO(nn.Module):\n def __init__(\n self,\n actor_critic: Policy,\n clip_param: float,\n ppo_epoch: int,\n num_mini_batch: int,\n value_loss_coef: float,\n entropy_coef: float,\n lr: Optional[float] = None,\n eps: Optional[float] = None,\n max_grad_norm: Optional[float] = None,\n use_clipped_value_loss: bool = True,\n use_normalized_advantage: bool = True,\n ) -> None:\n\n super().__init__()\n\n self.actor_critic = actor_critic\n\n self.clip_param = clip_param\n self.ppo_epoch = ppo_epoch\n self.num_mini_batch = num_mini_batch\n\n self.value_loss_coef = value_loss_coef\n self.entropy_coef = entropy_coef\n\n self.max_grad_norm = max_grad_norm\n self.use_clipped_value_loss = use_clipped_value_loss\n\n self.optimizer = optim.Adam(\n list(filter(lambda p: p.requires_grad, actor_critic.parameters())),\n lr=lr,\n eps=eps,\n )\n self.device = next(actor_critic.parameters()).device\n self.use_normalized_advantage = use_normalized_advantage\n\n def forward(self, *x):\n raise NotImplementedError\n\n def get_advantages(self, rollouts: RolloutStorage) -> Tensor:\n advantages = (\n rollouts.buffers[\"returns\"][:-1] # type: ignore\n - rollouts.buffers[\"value_preds\"][:-1]\n )\n if not self.use_normalized_advantage:\n return advantages\n\n return (advantages - advantages.mean()) / (advantages.std() + EPS_PPO)\n\n def update(self, rollouts: RolloutStorage) -> Tuple[float, float, float]:\n advantages = self.get_advantages(rollouts)\n\n value_loss_epoch = 0.0\n action_loss_epoch = 0.0\n dist_entropy_epoch = 0.0\n\n for _e in range(self.ppo_epoch):\n profiling_wrapper.range_push(\"PPO.update epoch\")\n data_generator = rollouts.recurrent_generator(\n advantages, self.num_mini_batch\n )\n\n for batch in data_generator:\n (\n values,\n action_log_probs,\n dist_entropy,\n _,\n ) = self._evaluate_actions(\n batch[\"observations\"],\n batch[\"recurrent_hidden_states\"],\n batch[\"prev_actions\"],\n batch[\"masks\"],\n batch[\"actions\"],\n )\n\n ratio = torch.exp(action_log_probs - batch[\"action_log_probs\"])\n surr1 = ratio * batch[\"advantages\"]\n surr2 = (\n torch.clamp(\n ratio, 1.0 - self.clip_param, 1.0 + self.clip_param\n )\n * batch[\"advantages\"]\n )\n action_loss = -(torch.min(surr1, surr2).mean())\n\n if self.use_clipped_value_loss:\n value_pred_clipped = batch[\"value_preds\"] + (\n values - batch[\"value_preds\"]\n ).clamp(-self.clip_param, self.clip_param)\n value_losses = (values - batch[\"returns\"]).pow(2)\n value_losses_clipped = (\n value_pred_clipped - batch[\"returns\"]\n ).pow(2)\n value_loss = 0.5 * torch.max(\n value_losses, value_losses_clipped\n )\n else:\n value_loss = 0.5 * (batch[\"returns\"] - values).pow(2)\n\n value_loss = value_loss.mean()\n dist_entropy = dist_entropy.mean()\n\n self.optimizer.zero_grad()\n total_loss = (\n value_loss * self.value_loss_coef\n + action_loss\n - dist_entropy * self.entropy_coef\n )\n\n self.before_backward(total_loss)\n total_loss.backward()\n self.after_backward(total_loss)\n\n self.before_step()\n self.optimizer.step()\n self.after_step()\n\n value_loss_epoch += value_loss.item()\n action_loss_epoch += action_loss.item()\n dist_entropy_epoch += dist_entropy.item()\n\n profiling_wrapper.range_pop() # PPO.update epoch\n\n num_updates = self.ppo_epoch * self.num_mini_batch\n\n value_loss_epoch /= num_updates\n action_loss_epoch /= num_updates\n dist_entropy_epoch /= num_updates\n\n return value_loss_epoch, action_loss_epoch, dist_entropy_epoch\n\n def _evaluate_actions(\n self, observations, rnn_hidden_states, prev_actions, masks, action\n ):\n r\"\"\"Internal method that calls Policy.evaluate_actions. This is used instead of calling\n that directly so that that call can be overrided with inheritance\n \"\"\"\n return self.actor_critic.evaluate_actions(\n observations, rnn_hidden_states, prev_actions, masks, action\n )\n\n def before_backward(self, loss: Tensor) -> None:\n pass\n\n def after_backward(self, loss: Tensor) -> None:\n pass\n\n def before_step(self) -> None:\n nn.utils.clip_grad_norm_(\n self.actor_critic.parameters(), self.max_grad_norm\n )\n\n def after_step(self) -> None:\n pass\n" ]
[ [ "torch.exp", "torch.min", "torch.clamp", "torch.max" ] ]
slayoo/pyodesys
[ "8e1afb195dadf6c6f8e765873bc9dd0fae067c39" ]
[ "pyodesys/integrators.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nThis module is for demonstration purposes only and the integrators here\nare not meant for production use. Consider them provisional, i.e., API here\nmay break without prior deprecation.\n\"\"\"\n\nimport math\nimport warnings\n\nimport numpy as np\nfrom .util import import_\n\nlu_factor, lu_solve = import_('scipy.linalg', 'lu_factor', 'lu_solve')\n\n\nclass RK4_example_integrator:\n \"\"\"\n This is an example of how to implement a custom integrator.\n It uses fixed step size and is usually not useful for real problems.\n \"\"\"\n\n with_jacobian = False\n\n @staticmethod\n def integrate_adaptive(rhs, jac, y0, x0, xend, dx0, **kwargs):\n if kwargs:\n warnings.warn(\"Ignoring keyword-argumtents: %s\" % ', '.join(kwargs.keys()))\n xspan = xend - x0\n n = int(math.ceil(xspan/dx0))\n yout = [y0[:]]\n xout = [x0]\n k = [np.empty(len(y0)) for _ in range(4)]\n for i in range(0, n+1):\n x, y = xout[-1], yout[-1]\n h = min(dx0, xend-x)\n rhs(x, y, k[0])\n rhs(x + h/2, y + h/2*k[0], k[1])\n rhs(x + h/2, y + h/2*k[1], k[2])\n rhs(x + h, y + h*k[2], k[3])\n yout.append(y + h/6 * (k[0] + 2*k[1] + 2*k[2] + k[3]))\n xout.append(x+h)\n return np.array(xout), np.array(yout), {'nfev': n*4}\n\n @staticmethod\n def integrate_predefined(rhs, jac, y0, xout, **kwargs):\n if kwargs:\n warnings.warn(\"Ignoring keyword-argumtents: %s\" % ', '.join(kwargs.keys()))\n x_old = xout[0]\n yout = [y0[:]]\n k = [np.empty(len(y0)) for _ in range(4)]\n for i, x in enumerate(xout[1:], 1):\n y = yout[-1]\n h = x - x_old\n rhs(x_old, y, k[0])\n rhs(x_old + h/2, y + h/2*k[0], k[1])\n rhs(x_old + h/2, y + h/2*k[1], k[2])\n rhs(x_old + h, y + h*k[2], k[3])\n yout.append(y + h/6 * (k[0] + 2*k[1] + 2*k[2] + k[3]))\n x_old = x\n return np.array(yout), {'nfev': (len(xout)-1)*4}\n\n\nclass EulerForward_example_integrator:\n\n with_jacobian = False\n integrate_adaptive = None\n\n @staticmethod\n def integrate_predefined(rhs, jac, y0, xout, **kwargs):\n if kwargs:\n warnings.warn(\"Ignoring keyword-argumtents: %s\" % ', '.join(kwargs.keys()))\n x_old = xout[0]\n yout = [y0[:]]\n f = np.empty(len(y0))\n for i, x in enumerate(xout[1:], 1):\n y = yout[-1]\n h = x - x_old\n rhs(x_old, y, f)\n yout.append(y + h*f)\n x_old = x\n return np.array(yout), {'nfev': (len(xout)-1)}\n\n\nclass Midpoint_example_integrator:\n\n with_jacobian = False\n integrate_adaptive = None\n\n @staticmethod\n def integrate_predefined(rhs, jac, y0, xout, **kwargs):\n if kwargs:\n warnings.warn(\"Ignoring keyword-argumtents: %s\" % ', '.join(kwargs.keys()))\n x_old = xout[0]\n yout = [y0[:]]\n f = np.empty(len(y0))\n for i, x in enumerate(xout[1:], 1):\n y = yout[-1]\n h = x - x_old\n rhs(x_old, y, f)\n dy_efw = h*f\n rhs(x_old + h/2, y + dy_efw/2, f)\n yout.append(y + h*f)\n x_old = x\n return np.array(yout), {'nfev': (len(xout)-1)}\n\n\nclass EulerBackward_example_integrator:\n\n with_jacobian = True\n integrate_adaptive = None\n\n @staticmethod\n def integrate_predefined(rhs, jac, y0, xout, **kwargs):\n if kwargs:\n warnings.warn(\"Ignoring keyword-argumtents: %s\" % ', '.join(kwargs.keys()))\n x_old = xout[0]\n yout = [y0[:]]\n f = np.empty(len(y0))\n j = np.empty((len(y0), len(y0)))\n I = np.eye(len(y0))\n for i, x in enumerate(xout[1:], 1):\n y = yout[-1]\n h = x - x_old\n jac(x_old, y, j)\n lu_piv = lu_factor(h*j - I)\n rhs(x, y, f)\n ynew = y + f*h\n norm_delta_ynew = float('inf')\n while norm_delta_ynew > 1e-12:\n rhs(x, ynew, f)\n delta_ynew = lu_solve(lu_piv, ynew - y - f*h)\n ynew += delta_ynew\n norm_delta_ynew = np.sqrt(np.sum(np.square(delta_ynew)))\n\n yout.append(ynew)\n x_old = x\n return np.array(yout), {'nfev': (len(xout)-1)}\n\n\nclass Trapezoidal_example_integrator:\n\n with_jacobian = True\n integrate_adaptive = None\n\n @staticmethod\n def integrate_predefined(rhs, jac, y0, xout, **kwargs):\n if kwargs:\n warnings.warn(\"Ignoring keyword-argumtents: %s\" % ', '.join(kwargs.keys()))\n x_old = xout[0]\n yout = [y0[:]]\n f = np.empty(len(y0))\n j = np.empty((len(y0), len(y0)))\n I = np.eye(len(y0))\n for i, x in enumerate(xout[1:], 1):\n y = yout[-1]\n h = x - x_old\n jac(x_old, y, j)\n lu_piv = lu_factor(h*j - I)\n rhs(x, y, f)\n euler_fw_dy = f*h\n ynew = y + euler_fw_dy\n norm_delta_ynew = float('inf')\n while norm_delta_ynew > 1e-12:\n rhs(x, ynew, f)\n delta_ynew = lu_solve(lu_piv, ynew - y - f*h)\n ynew += delta_ynew\n norm_delta_ynew = np.sqrt(np.sum(np.square(delta_ynew)))\n\n yout.append((ynew + y + euler_fw_dy)/2)\n x_old = x\n return np.array(yout), {'nfev': (len(xout)-1)}\n" ]
[ [ "numpy.square", "numpy.array" ] ]
pgmadonia/codeastro_project
[ "4d1d15a2d1eef67b07a8c5ab4f81e5aa1efb601c" ]
[ "build/lib/edgedec/picture.py" ]
[ "\"\"\"\nThis module recognizes shapes in pictures\n\"\"\"\nimport numpy as np\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\nimport matplotlib.image as img\nimport matplotlib.pyplot as plt\n\n# sample user interaction idea\n# img = library.image('pic1.png')\n# img_contour = img.draw_contours()\n\nclass Picture:\n \"\"\"\n Runs through ``orbitize`` methods in a standardized way.\n\n Args:\n input_data: Either a relative path to data file or astropy.table.Table object\n in the orbitize format. See ``orbitize.read_input``\n sampler_str (str): algorithm to use for orbit computation. \"MCMC\" for\n Markov Chain Monte Carlo, \"OFTI\" for Orbits for the Impatient\n num_secondary_bodies (int): number of secondary bodies in the system.\n Should be at least 1.\n system_mass (float): mean total mass of the system [M_sol]\n plx (float): mean parallax of the system [mas]\n mass_err (float, optional): uncertainty on ``system_mass`` [M_sol]\n plx_err (float, optional): uncertainty on ``plx`` [mas]\n lnlike (str, optional): name of function in ``orbitize.lnlike`` that will\n be used to compute likelihood. (default=\"chi2_lnlike\")\n system_kwargs (dict, optional): ``restrict_angle_ranges``, ``ref_tau_epoch``,\n ``results`` for ``orbitize.system.System``.\n mcmc_kwargs (dict, optional): ``num_temps``, ``num_walkers``, and ``num_threads``\n kwargs for ``orbitize.sampler.MCMC``\n\n Written: Sarah Blunt, 2018\n \"\"\"\n\n def __init__(self, file_name, threshold = 0.1):\n \"\"\"class __init__ method \n\n Note:\n Do not include the `self` parameter in the ``Args`` section.\n\n Args:\n file_name (str): The name of the image file to be loaded as a Picture object.\n threshold (float): Threshold value to determine whether or not an edge is present. This is passed as an attribute and then called in by the highlight_edges_grad method. Default value is 0.1 .\n\n Attributes:\n file_name (str): file name of the loaded image\n image (np.array): [R G B] values of each pixel in the image\n contours (np.array): copy of the [R G B] values of each pixel in the image, will be used to draw the detected edge over the original image\n height (int): height of the image in px \n width (int): width of the image in px\n edges (np.array): array of zeros with same dimensions as the image. whenever an edge is found, the \"color difference\" value is stored in the corresponding pixel \n threshold (float): threshold to the \"color difference\" to determine the presence of an edge\n alpha (bool): True if the loaded image has an alpha channel, False otherwise\n \"\"\"\n self.file_name = file_name \n self.image = img.imread(file_name) # numpy array of r-g-b values\n self.contours = img.imread(file_name) # image copy for including highligthed edges\n self.height= len(self.image)\n self.width = len(self.image[0])\n self.edges = np.zeros((self.height, self.width)) # numpy array with 1s as edges\n self.threshold = threshold\n\n if len(self.image[0][0]) == 4:\n self.alpha = True\n else:\n self.alpha = False\n \n def __len__(self):\n \"\"\"\n Special len method \n \n Returns:\n int: total number of pixels \n \"\"\"\n return self.height * self.width\n\n def __str__(self):\n \"\"\"\n Special str method \n \n Returns:\n str: string with info on filename and image size \n \"\"\"\n return f'File name: {self.file_name}; width: {self.width}px, height: {self.height}px'\n\n def __del__(self):\n \"\"\"\n Special del method \n \n It deletes a picture object and prints a report of the deletion\n \"\"\"\n print(f'I just deleted {self.file_name}')\n \n \n def assess_difference(self, pixel_a, pixel_b):\n \"\"\"\n This function checks if two adjacent pixels have the exact same RGB value\n \n Args:\n pixel_a (float, list): [r,g,b] values for pixel A \n pixel_b (float, list): [r,g,b] values for pixel B\n\n Returns:\n bool: True if the two pixel have the same RGB values \n \"\"\"\n \n return np.array_equal(pixel_a, pixel_b)\n\n def horizontal_scan(self, row_index):\n \"\"\"\n This function performs a linear scan over a given row\n \n Args:\n row_index (int): index of row to scan in self.image \n \"\"\"\n for i in range(1, self.width):\n # compare each pixel to the previous one\n if not self.assess_difference(self.image[row_index][i-1], self.image[row_index][i]):\n self.edges[row_index][i] = 1\n \n def vertical_scan(self, col_index):\n '''\n This function performs a linear scan over a given column\n Args:\n col_index - index of column in self.image to scan\n '''\n \n for i in range(1, self.height):\n # compare each pixel to the previous one\n if not self.assess_difference(self.image[i-1][col_index], self.image[i][col_index]):\n self.edges[i][col_index] = 1\n \n def find_edges(self):\n '''\n ...\n '''\n for i in range(self.width):\n self.vertical_scan(i)\n \n for i in range(self.height):\n self.horizontal_scan(i)\n\n def highlight_edges(self): \n '''\n ''' \n if self.alpha:\n for i in range(self.height): \n for j in range(self.width): \n if self.edges[i][j] == 0:\n # print(self.contours[i][j])\n self.contours[i][j] = [0, 0, 0, 1]\n else:\n # print(self.contours[i][j])\n self.contours[i][j] = [1, 1, 1, 1]\n else: \n for i in range(self.height): \n for j in range(self.width): \n if self.edges[i][j] == 0:\n # print(self.contours[i][j])\n self.contours[i][j] = [0, 0, 0]\n else:\n # print(self.contours[i][j])\n self.contours[i][j] = [1, 1, 1]\n\n\n def assess_gradient(self, pixel_a, pixel_b):\n '''\n This function checks if two adjacent pixels have the exact same RGB value\n \n Args:\n pixel_a - list: [r,g,b] values for pixel A \n pixel_b - list: [r,g,b] values for pixel B\n '''\n diff = np.abs(pixel_a - pixel_b).sum()\n # grad = diff / self.scale \n\n return diff\n\n def horizontal_scan_grad(self, row_index):\n '''\n This function performs a linear scan over a given row\n \n Args:\n row_index - index of row in self.image to scan\n '''\n \n for i in range(1, self.width):\n # compare each pixel to the previous one\n hor_grad = self.assess_gradient(self.image[row_index][i-1], self.image[row_index][i])\n self.edges[row_index][i] = max(self.edges[row_index][i], hor_grad)\n \n def vertical_scan_grad(self, col_index):\n '''\n This function performs a linear scan over a given column\n Args:\n col_index - index of column in self.image to scan\n '''\n \n for i in range(1, self.height):\n # compare each pixel to the previous one\n vert_grad = self.assess_gradient(self.image[i-1][col_index], self.image[i][col_index])\n self.edges[i][col_index] = max(self.edges[i][col_index], vert_grad)\n\n def find_edges_grad(self):\n '''\n ...\n '''\n for i in range(self.width):\n self.vertical_scan_grad(i)\n \n for i in range(self.height):\n self.horizontal_scan_grad(i)\n\n def highlight_edges_grad(self): \n '''\n ''' \n if self.alpha:\n for i in range(self.height): \n for j in range(self.width): \n if self.edges[i][j] < self.threshold:\n # print(self.contours[i][j])\n self.contours[i][j] = [0, 0, 0, 1] #drawing black\n else:\n # print(self.contours[i][j])\n # if self.contours[i][j]\n self.contours[i][j] = [1, 1, 1, 1] #drawing the edge \n else: \n for i in range(self.height): \n for j in range(self.width): \n if self.edges[i][j] < self.threshold:\n # print(self.contours[i][j])\n self.contours[i][j] = [0, 0, 0]\n else:\n # print(self.contours[i][j])\n self.contours[i][j] = [1, 1, 1]\n\n\n def check_num_edges(self):\n '''\n '''\n tot_edges = 0\n for row in self.edges:\n tot_edges += sum(row)\n return tot_edges\n\n\n def paint_contours(self, grad = False):\n print('Working on the following image:')\n print(self)\n if grad: \n self.find_edges_grad()\n print(f'I found {self.check_num_edges()} edges.')\n self.highlight_edges_grad()\n else:\n self.find_edges() \n print(f'I found {self.check_num_edges()} edges.')\n self.highlight_edges() \n \n plt.imshow(self.contours)\n \n \n# for th in np.linspace(0.1, 1., 10, endpoint=True):\n# print(f'Threshold is {th}')\n# image_to_process = Picture('pic6.png', threshold = th)\n\n\nimage_to_process = Picture('pic6.png')\n\n\nimage_to_process.paint_contours(grad=True)\nplt.show()" ]
[ [ "numpy.array_equal", "numpy.zeros", "numpy.set_printoptions", "matplotlib.image.imread", "numpy.abs", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
viethuong12/NLP
[ "c3d1f1cc8b1eb2c64302a88cfd2223c1b9823a45" ]
[ "classifier/prepare_data.py" ]
[ "from sklearn.model_selection import train_test_split\nimport os\n\n\ndef save_fasttext_format(X_data, y_data, output_file, prefix='__lb__'):\n with open(output_file, 'w', encoding='utf8') as fp:\n for x, y in zip(X_data, y_data):\n fp.write(prefix + y + ' ' + x + '\\n')\n\n\nXs = []\nys = []\n\ndata_dir = \"../data_crawler/data\"\n\nfor file in os.listdir(data_dir):\n if file.endswith('.txt'):\n label = file[:-4]\n for line in open(os.path.join(data_dir, file), encoding='utf8').readlines():\n Xs.append(line.strip())\n ys.append(label)\n\nX_train, X_test, y_train, y_test = train_test_split(Xs, ys, test_size=0.2, random_state=42)\n\nsave_fasttext_format(X_train, y_train, 'data/train.txt')\nsave_fasttext_format(X_test, y_test, 'data/test.txt')\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]
jieralice13/forte
[ "4b5bb810c3cdd99de2fa3096e327e50caff68d7f" ]
[ "forte/processors/srl_predictor.py" ]
[ "# Copyright 2019 The Forte Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom typing import Dict, List, Tuple, Optional\n\nimport texar.torch as tx\nimport torch\n\nfrom forte.common.configuration import Config\nfrom forte.common.resources import Resources\nfrom forte.data.data_pack import DataPack\nfrom forte.data.span import Span\nfrom forte.data.types import DataRequest\nfrom forte.models.srl.model import LabeledSpanGraphNetwork\nfrom forte.processors.base.batch_processor import FixedSizeBatchProcessor\nfrom ft.onto.base_ontology import (\n Token, Sentence, PredicateLink, PredicateMention, PredicateArgument)\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\n \"SRLPredictor\",\n]\n\nPrediction = List[\n Tuple[Span, List[Tuple[Span, str]]]\n]\n\n\nclass SRLPredictor(FixedSizeBatchProcessor):\n \"\"\"\n An Semantic Role labeler trained according to `He, Luheng, et al.\n \"Jointly predicting predicates and arguments in neural semantic role\n labeling.\" <https://aclweb.org/anthology/P18-2058>`_.\n \"\"\"\n\n word_vocab: tx.data.Vocab\n char_vocab: tx.data.Vocab\n model: LabeledSpanGraphNetwork\n\n def __init__(self):\n super().__init__()\n self.device = torch.device(\n torch.cuda.current_device() if torch.cuda.is_available() else 'cpu')\n\n def initialize(self,\n resources: Resources,\n configs: Optional[Config]):\n super().initialize(resources, configs)\n\n model_dir = configs.storage_path if configs is not None else None\n logger.info(\"restoring SRL model from %s\", model_dir)\n\n # initialize the batcher\n if configs:\n self._batcher.initialize(configs.batcher)\n\n self.word_vocab = tx.data.Vocab(\n os.path.join(model_dir, \"embeddings/word_vocab.english.txt\"))\n self.char_vocab = tx.data.Vocab(\n os.path.join(model_dir, \"embeddings/char_vocab.english.txt\"))\n model_hparams = LabeledSpanGraphNetwork.default_hparams()\n model_hparams[\"context_embeddings\"][\"path\"] = os.path.join(\n model_dir, model_hparams[\"context_embeddings\"][\"path\"])\n model_hparams[\"head_embeddings\"][\"path\"] = os.path.join(\n model_dir, model_hparams[\"head_embeddings\"][\"path\"])\n self.model = LabeledSpanGraphNetwork(\n self.word_vocab, self.char_vocab, model_hparams)\n self.model.load_state_dict(torch.load(\n os.path.join(model_dir, \"pretrained/model.pt\"),\n map_location=self.device))\n self.model.eval()\n\n @staticmethod\n def _define_context():\n return Sentence\n\n @staticmethod\n def _define_input_info() -> DataRequest:\n input_info: DataRequest = {Token: []}\n return input_info\n\n def predict(self, data_batch: Dict) -> Dict[str, List[Prediction]]:\n text: List[List[str]] = [\n sentence.tolist() for sentence in data_batch[\"Token\"][\"text\"]]\n text_ids, length = tx.data.padded_batch([\n self.word_vocab.map_tokens_to_ids_py(sentence)\n for sentence in text])\n text_ids = torch.from_numpy(text_ids).to(device=self.device)\n length = torch.tensor(length, dtype=torch.long, device=self.device)\n batch_size = len(text)\n batch = tx.data.Batch(batch_size, text=text, text_ids=text_ids,\n length=length, srl=[[]] * batch_size)\n self.model = self.model.to(self.device)\n batch_srl_spans = self.model.decode(batch)\n\n # Convert predictions into annotations.\n batch_predictions: List[Prediction] = []\n for idx, srl_spans in enumerate(batch_srl_spans):\n word_spans = data_batch[\"Token\"][\"span\"][idx]\n predictions: Prediction = []\n for pred_idx, pred_args in srl_spans.items():\n begin, end = word_spans[pred_idx]\n # TODO cannot create annotation here.\n # Need to convert from Numpy numbers to int.\n pred_span = Span(begin.item(), end.item())\n arguments = []\n for arg in pred_args:\n begin = word_spans[arg.start][0].item()\n end = word_spans[arg.end][1].item()\n arg_annotation = Span(begin, end)\n arguments.append((arg_annotation, arg.label))\n predictions.append((pred_span, arguments))\n batch_predictions.append(predictions)\n return {\"predictions\": batch_predictions}\n\n def pack(self, data_pack: DataPack,\n inputs: Dict[str, List[Prediction]]) -> None:\n batch_predictions = inputs[\"predictions\"]\n for predictions in batch_predictions:\n for pred_span, arg_result in predictions:\n\n pred = PredicateMention(data_pack, pred_span.begin,\n pred_span.end)\n\n for arg_span, label in arg_result:\n arg = PredicateArgument(\n data_pack, arg_span.begin, arg_span.end\n )\n link = PredicateLink(data_pack, pred, arg)\n link.arg_type = label\n\n @classmethod\n def default_configs(cls):\n \"\"\"\n This defines a basic config structure\n :return:\n \"\"\"\n configs = super().default_configs()\n configs.update({\n 'storage_path': None,\n \"batcher\": {\n \"batch_size\": 4\n }\n })\n return configs\n" ]
[ [ "torch.cuda.is_available", "torch.tensor", "torch.from_numpy", "torch.cuda.current_device" ] ]
Marco-Sulla/PyTables
[ "c06642ed12b1c99df76feb11f08a37b3e479ffbc" ]
[ "tables/tests/test_numpy.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport tempfile\n\nimport numpy as np\n\nimport tables\nfrom tables import (\n StringCol, BoolCol, FloatCol, ComplexCol, EnumCol,\n Int8Col, UInt8Col, Int16Col, UInt16Col, Int32Col, UInt32Col,\n Int64Col, Float32Col, Float64Col, Time64Col\n)\nfrom tables.tests import common\nfrom tables.tests.common import allequal\nfrom tables.tests.common import unittest\nfrom tables.tests.common import PyTablesTestCase as TestCase\n\n\ntypecodes = ['b', 'h', 'i', 'l', 'q', 'f', 'd']\n# UInt64 checking disabled on win platforms\n# because this type is not supported\nif sys.platform != 'win32':\n typecodes += ['B', 'H', 'I', 'L', 'Q', 'F', 'D']\nelse:\n typecodes += ['B', 'H', 'I', 'L', 'F', 'D']\ntypecodes += ['b1'] # boolean\n\nif hasattr(tables, 'Float16Atom'):\n typecodes.append('e')\nif hasattr(tables, 'Float96Atom') or hasattr(tables, 'Float128Atom'):\n typecodes.append('g')\nif hasattr(tables, 'Complex192Atom') or hasattr(tables, 'Conplex256Atom'):\n typecodes.append('G')\n\nbyteorder = {'little': '<', 'big': '>'}[sys.byteorder]\n\n\nclass BasicTestCase(TestCase):\n \"\"\"Basic test for all the supported typecodes present in NumPy.\n\n All of them are included on PyTables.\n\n \"\"\"\n endiancheck = 0\n\n def WriteRead(self, testArray):\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running test for array with typecode '%s'\" %\n testArray.dtype.char, end=' ')\n print(\"for class check:\", self.title)\n\n # Create an instance of HDF5 Table\n self.h5fname = tempfile.mktemp(\".h5\")\n try:\n with tables.open_file(self.h5fname, mode=\"w\") as self.h5file:\n self.root = self.h5file.root\n\n # Create the array under root and name 'somearray'\n a = testArray\n self.h5file.create_array(self.root, 'somearray', a,\n \"Some array\")\n\n # Re-open the file in read-only mode\n with tables.open_file(self.h5fname, mode=\"r\") as self.h5file:\n self.root = self.h5file.root\n\n # Read the saved array\n b = self.root.somearray.read()\n\n # For cases that read returns a python type instead of a\n # numpy type\n if not hasattr(b, \"shape\"):\n b = np.np.array(b, dtype=a.dtype.str)\n\n # Compare them. They should be equal.\n # if not allequal(a,b, \"numpy\") and common.verbose:\n if common.verbose:\n print(\"Array written:\", a)\n print(\"Array written shape:\", a.shape)\n print(\"Array written itemsize:\", a.itemsize)\n print(\"Array written type:\", a.dtype.char)\n print(\"Array read:\", b)\n print(\"Array read shape:\", b.shape)\n print(\"Array read itemsize:\", b.itemsize)\n print(\"Array read type:\", b.dtype.char)\n\n type_ = self.root.somearray.atom.type\n\n # Check strictly the array equality\n self.assertEqual(type(a), type(b))\n self.assertEqual(a.shape, b.shape)\n self.assertEqual(a.shape, self.root.somearray.shape)\n self.assertEqual(a.dtype, b.dtype)\n if a.dtype.char[0] == \"S\":\n self.assertEqual(type_, \"string\")\n else:\n self.assertEqual(a.dtype.base.name, type_)\n\n self.assertTrue(allequal(a, b, \"numpy\"))\n finally:\n # Then, delete the file\n if os.path.exists(self.h5fname):\n os.remove(self.h5fname)\n\n def test00_char(self):\n \"\"\"Data integrity during recovery (character objects)\"\"\"\n\n a = np.array(self.tupleChar, 'S'+str(len(self.tupleChar)))\n self.WriteRead(a)\n\n def test01_char_nc(self):\n \"\"\"Data integrity during recovery (non-contiguous character objects)\"\"\"\n\n a = np.array(self.tupleChar, 'S'+str(len(self.tupleChar)))\n if a.shape == ():\n b = a # We cannot use the indexing notation\n else:\n b = a[::2]\n # Ensure that this numpy string is non-contiguous\n if a.shape[0] > 2:\n self.assertEqual(b.flags['CONTIGUOUS'], False)\n self.WriteRead(b)\n\n def test02_types(self):\n \"\"\"Data integrity during recovery (numerical types)\"\"\"\n\n for typecode in typecodes:\n if self.tupleInt.shape:\n a = self.tupleInt.astype(typecode)\n else:\n # shape is the empty tuple ()\n a = np.array(self.tupleInt, dtype=typecode)\n self.WriteRead(a)\n\n def test03_types_nc(self):\n \"\"\"Data integrity during recovery (non-contiguous numerical types)\"\"\"\n\n for typecode in typecodes:\n if self.tupleInt.shape:\n a = self.tupleInt.astype(typecode)\n else:\n # shape is the empty tuple ()\n a = np.array(self.tupleInt, dtype=typecode)\n\n # This should not be tested for the rank-0 case\n if len(a.shape) == 0:\n raise unittest.SkipTest\n b = a[::2]\n\n # Ensure that this array is non-contiguous (for non-trivial case)\n if a.shape[0] > 2:\n self.assertEqual(b.flags['CONTIGUOUS'], False)\n self.WriteRead(b)\n\n\nclass Basic0DOneTestCase(BasicTestCase):\n # Rank-0 case\n title = \"Rank-0 case 1\"\n tupleInt = np.array(3)\n tupleChar = \"4\"\n\n\nclass Basic0DTwoTestCase(BasicTestCase):\n # Rank-0 case\n title = \"Rank-0 case 2\"\n tupleInt = np.array(33)\n tupleChar = \"44\"\n\n\nclass Basic1DOneTestCase(BasicTestCase):\n # 1D case\n title = \"Rank-1 case 1\"\n tupleInt = np.array((3,))\n tupleChar = (\"a\",)\n\n\nclass Basic1DTwoTestCase(BasicTestCase):\n # 1D case\n title = \"Rank-1 case 2\"\n tupleInt = np.array((0, 4))\n tupleChar = (\"aaa\",)\n\n\nclass Basic1DThreeTestCase(BasicTestCase):\n # 1D case\n title = \"Rank-1 case 3\"\n tupleInt = np.array((3, 4, 5))\n tupleChar = (\"aaaa\", \"bbb\",)\n\n\nclass Basic2DTestCase(BasicTestCase):\n # 2D case\n title = \"Rank-2 case 1\"\n # tupleInt = reshape(np.array(np.arange((4)**2)), (4,)*2)\n tupleInt = np.ones((4,)*2)\n tupleChar = [[\"aaa\", \"ddddd\"], [\"d\", \"ss\"], [\"s\", \"tt\"]]\n\n\nclass Basic10DTestCase(BasicTestCase):\n # 10D case\n title = \"Rank-10 case 1\"\n # tupleInt = reshape(np.array(np.arange((2)**10)), (2,)*10)\n tupleInt = np.ones((2,)*10)\n # tupleChar = reshape(np.array([1],dtype=\"S1\"),(1,)*10)\n # The next tuple consumes far more time, so this\n # test should be run in common.heavy mode.\n tupleChar = np.array(tupleInt, dtype=\"S1\")\n\n\n# class Basic32DTestCase(BasicTestCase):\n# # 32D case (maximum)\n# tupleInt = reshape(np.array((22,)), (1,)*32)\n# # Strings seems to be very slow with somewhat large dimensions\n# # This should not be run unless the numarray people address this problem\n# # F. Alted 2006-01-04\n# tupleChar = np.array(tupleInt, dtype=\"S1\")\n\n\nclass GroupsArrayTestCase(common.TempFileMixin, TestCase):\n \"\"\"This test class checks combinations of arrays with groups.\n\n It also uses arrays ranks which ranges until 10.\n\n \"\"\"\n\n def test00_iterativeGroups(self):\n \"\"\"Checking combinations of arrays with groups\n\n It also uses arrays ranks which ranges until 10.\n\n \"\"\"\n\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running %s.test00_iterativeGroups...\" %\n self.__class__.__name__)\n\n # Get the root group\n group = self.h5file.root\n\n i = 1\n for typecode in typecodes:\n # Create an array of typecode, with incrementally bigger ranges\n a = np.ones((2,) * i, typecode)\n # Save it on the HDF5 file\n dsetname = 'array_' + typecode\n if common.verbose:\n print(\"Creating dataset:\", group._g_join(dsetname))\n self.h5file.create_array(group, dsetname, a, \"Large array\")\n # Create a new group\n group = self.h5file.create_group(group, 'group' + str(i))\n # increment the range for next iteration\n i += 1\n\n self._reopen()\n\n # Get the root group\n group = self.h5file.root\n\n # Get the metadata on the previosly saved arrays\n for i in range(1, len(typecodes)):\n # Create an array for later comparison\n a = np.ones((2,) * i, typecodes[i - 1])\n # Get the dset object hanging from group\n dset = getattr(group, 'array_' + typecodes[i-1])\n # Get the actual array\n b = dset.read()\n if not allequal(a, b, \"numpy\") and common.verbose:\n print(\"Array a original. Shape: ==>\", a.shape)\n print(\"Array a original. Data: ==>\", a)\n print(\"Info from dataset:\", dset._v_pathname)\n print(\" shape ==>\", dset.shape, end=' ')\n print(\" dtype ==> %s\" % dset.dtype)\n print(\"Array b read from file. Shape: ==>\", b.shape, end=' ')\n print(\". Type ==> %s\" % b.dtype.char)\n\n self.assertEqual(a.shape, b.shape)\n if np.dtype('l').itemsize == 4:\n if (a.dtype.char == \"i\" or a.dtype.char == \"l\"):\n # Special expection. We have no way to distinguish between\n # \"l\" and \"i\" typecode, and we can consider them the same\n # to all practical effects\n self.assertIn(b.dtype.char, (\"l\", \"i\"))\n elif (a.dtype.char == \"I\" or a.dtype.char == \"L\"):\n # Special expection. We have no way to distinguish between\n # \"L\" and \"I\" typecode, and we can consider them the same\n # to all practical effects\n self.assertIn(b.dtype.char, (\"L\", \"I\"))\n else:\n self.assertTrue(allequal(a, b, \"numpy\"))\n elif np.dtype('l').itemsize == 8:\n if (a.dtype.char == \"q\" or a.dtype.char == \"l\"):\n # Special expection. We have no way to distinguish between\n # \"q\" and \"l\" typecode in 64-bit platforms, and we can\n # consider them the same to all practical effects\n self.assertIn(b.dtype.char, (\"l\", \"q\"))\n elif (a.dtype.char == \"Q\" or a.dtype.char == \"L\"):\n # Special expection. We have no way to distinguish between\n # \"Q\" and \"L\" typecode in 64-bit platforms, and we can\n # consider them the same to all practical effects\n self.assertIn(b.dtype.char, (\"L\", \"Q\"))\n else:\n self.assertTrue(allequal(a, b, \"numpy\"))\n\n # Iterate over the next group\n group = getattr(group, 'group' + str(i))\n\n def test01_largeRankArrays(self):\n \"\"\"Checking creation of large rank arrays (0 < rank <= 32)\n\n It also uses arrays ranks which ranges until maxrank.\n\n \"\"\"\n\n # maximum level of recursivity (deepest group level) achieved:\n # maxrank = 32 (for a effective maximum rank of 32)\n # This limit is due to a limit in the HDF5 library.\n minrank = 1\n maxrank = 32\n\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running %s.test01_largeRankArrays...\" %\n self.__class__.__name__)\n print(\"Maximum rank for tested arrays:\", maxrank)\n\n group = self.h5file.root\n if common.verbose:\n print(\"Rank array writing progress: \", end=' ')\n for rank in range(minrank, maxrank + 1):\n # Create an array of integers, with incrementally bigger ranges\n a = np.ones((1,) * rank, 'i')\n if common.verbose:\n print(\"%3d,\" % (rank), end=' ')\n self.h5file.create_array(group, \"array\", a, \"Rank: %s\" % rank)\n group = self.h5file.create_group(group, 'group' + str(rank))\n\n # Flush the buffers\n self.h5file.flush()\n\n self._reopen()\n\n group = self.h5file.root\n if common.verbose:\n print()\n print(\"Rank array reading progress: \")\n # Get the metadata on the previosly saved arrays\n for rank in range(minrank, maxrank + 1):\n # Create an array for later comparison\n a = np.ones((1,) * rank, 'i')\n # Get the actual array\n b = group.array.read()\n if common.verbose:\n print(\"%3d,\" % (rank), end=' ')\n if not a.tolist() == b.tolist() and common.verbose:\n dset = group.array\n print(\"Info from dataset:\", dset._v_pathname)\n print(\" Shape: ==>\", dset.shape, end=' ')\n print(\" typecode ==> %c\" % dset.typecode)\n print(\"Array b read from file. Shape: ==>\", b.shape, end=' ')\n print(\". Type ==> %c\" % b.dtype.char)\n self.assertEqual(a.shape, b.shape)\n if a.dtype.char == \"i\":\n # Special expection. We have no way to distinguish between\n # \"l\" and \"i\" typecode, and we can consider them the same\n # to all practical effects\n self.assertIn(b.dtype.char, (\"l\", \"i\"))\n else:\n self.assertEqual(a.dtype.char, b.dtype.char)\n\n self.assertEqual(a, b)\n\n # Iterate over the next group\n group = self.h5file.get_node(group, 'group' + str(rank))\n\n if common.verbose:\n print() # This flush the stdout buffer\n\n\n# Test Record class\nclass Record(tables.IsDescription):\n var1 = StringCol(itemsize=4, dflt=b\"abcd\", pos=0)\n var2 = StringCol(itemsize=1, dflt=b\"a\", pos=1)\n var3 = BoolCol(dflt=1)\n var4 = Int8Col(dflt=1)\n var5 = UInt8Col(dflt=1)\n var6 = Int16Col(dflt=1)\n var7 = UInt16Col(dflt=1)\n var8 = Int32Col(dflt=1)\n var9 = UInt32Col(dflt=1)\n var10 = Int64Col(dflt=1)\n var11 = Float32Col(dflt=1.0)\n var12 = Float64Col(dflt=1.0)\n var13 = ComplexCol(itemsize=8, dflt=(1.+0.j))\n var14 = ComplexCol(itemsize=16, dflt=(1.+0.j))\n if hasattr(tables, 'Float16Col'):\n var15 = tables.Float16Col(dflt=1.0)\n if hasattr(tables, 'Float96Col'):\n var16 = tables.Float96Col(dflt=1.0)\n if hasattr(tables, 'Float128Col'):\n var17 = tables.Float128Col(dflt=1.0)\n if hasattr(tables, 'Complex196Col'):\n var18 = tables.ComplexCol(itemsize=24, dflt=(1.+0.j))\n if hasattr(tables, 'Complex256Col'):\n var19 = tables.ComplexCol(itemsize=32, dflt=(1.+0.j))\n\n\nclass TableReadTestCase(common.TempFileMixin, TestCase):\n nrows = 100\n\n def setUp(self):\n super(TableReadTestCase, self).setUp()\n\n # Create an instance of an HDF5 Table\n table = self.h5file.create_table(self.h5file.root, 'table', Record)\n for i in range(self.nrows):\n table.row.append() # Fill 100 rows with default values\n\n self._reopen(mode='a')\n\n def test01_readTableChar(self):\n \"\"\"Checking column conversion into NumPy in read().\n\n Char flavor\n\n \"\"\"\n\n table = self.h5file.root.table\n table.flavor = \"numpy\"\n for colname in table.colnames:\n numcol = table.read(field=colname)\n typecol = table.coltypes[colname]\n itemsizecol = table.description._v_dtypes[colname].base.itemsize\n nctypecode = numcol.dtype.char\n if typecol == \"string\":\n if itemsizecol > 1:\n orignumcol = np.array(['abcd']*self.nrows, dtype='S4')\n else:\n orignumcol = np.array(['a']*self.nrows, dtype='S1')\n if common.verbose:\n print(\"Typecode of NumPy column read:\", nctypecode)\n print(\"Should look like:\", 'c')\n print(\"Itemsize of column:\", itemsizecol)\n print(\"Shape of NumPy column read:\", numcol.shape)\n print(\"Should look like:\", orignumcol.shape)\n print(\"First 3 elements of read col:\", numcol[:3])\n # Check that both NumPy objects are equal\n self.assertTrue(allequal(numcol, orignumcol, \"numpy\"))\n\n def test01_readTableNum(self):\n \"\"\"Checking column conversion into NumPy in read().\n\n NumPy flavor\n\n \"\"\"\n\n table = self.h5file.root.table\n table.flavor = \"numpy\"\n for colname in table.colnames:\n numcol = table.read(field=colname)\n typecol = table.coltypes[colname]\n #nctypecode = np.typeNA[numcol.dtype.char[0]]\n nctypecode = np.sctypeDict[numcol.dtype.char[0]]\n if typecol != \"string\":\n if common.verbose:\n print(\"Typecode of NumPy column read:\", nctypecode)\n print(\"Should look like:\", typecol)\n orignumcol = np.ones(shape=self.nrows, dtype=numcol.dtype.char)\n # Check that both NumPy objects are equal\n self.assertTrue(allequal(numcol, orignumcol, \"numpy\"))\n\n def test02_readCoordsChar(self):\n \"\"\"Column conversion into NumPy in readCoords().\n\n Chars\n\n \"\"\"\n\n table = self.h5file.root.table\n table.flavor = \"numpy\"\n coords = [1, 2, 3]\n self.nrows = len(coords)\n for colname in table.colnames:\n numcol = table.read_coordinates(coords, field=colname)\n typecol = table.coltypes[colname]\n itemsizecol = table.description._v_dtypes[colname].base.itemsize\n nctypecode = numcol.dtype.char\n if typecol == \"string\":\n if itemsizecol > 1:\n orignumcol = np.array(['abcd']*self.nrows, dtype='S4')\n else:\n orignumcol = np.array(['a']*self.nrows, dtype='S1')\n if common.verbose:\n print(\"Typecode of NumPy column read:\", nctypecode)\n print(\"Should look like:\", 'c')\n print(\"Itemsize of column:\", itemsizecol)\n print(\"Shape of NumPy column read:\", numcol.shape)\n print(\"Should look like:\", orignumcol.shape)\n print(\"First 3 elements of read col:\", numcol[:3])\n # Check that both NumPy objects are equal\n self.assertTrue(allequal(numcol, orignumcol, \"numpy\"))\n\n def test02_readCoordsNum(self):\n \"\"\"Column conversion into NumPy in read_coordinates().\n\n NumPy.\n\n \"\"\"\n\n table = self.h5file.root.table\n table.flavor = \"numpy\"\n coords = [1, 2, 3]\n self.nrows = len(coords)\n for colname in table.colnames:\n numcol = table.read_coordinates(coords, field=colname)\n typecol = table.coltypes[colname]\n type_ = numcol.dtype.type\n if typecol != \"string\":\n if typecol == \"int64\":\n return\n if common.verbose:\n print(\"Type of read NumPy column:\", type_)\n print(\"Should look like:\", typecol)\n orignumcol = np.ones(shape=self.nrows, dtype=numcol.dtype.char)\n # Check that both NumPy objects are equal\n self.assertTrue(allequal(numcol, orignumcol, \"numpy\"))\n\n def test03_getIndexNumPy(self):\n \"\"\"Getting table rows specifyied as NumPy scalar integers.\"\"\"\n\n table = self.h5file.root.table\n coords = np.array([1, 2, 3], dtype='int8')\n for colname in table.colnames:\n numcol = [table[coord][colname] for coord in coords]\n typecol = table.coltypes[colname]\n if typecol != \"string\":\n if typecol == \"int64\":\n return\n numcol = np.array(numcol, typecol)\n if common.verbose:\n type_ = numcol.dtype.type\n print(\"Type of read NumPy column:\", type_)\n print(\"Should look like:\", typecol)\n orignumcol = np.ones(shape=len(numcol),\n dtype=numcol.dtype.char)\n # Check that both NumPy objects are equal\n self.assertTrue(allequal(numcol, orignumcol, \"numpy\"))\n\n def test04_setIndexNumPy(self):\n \"\"\"Setting table rows specifyied as NumPy integers.\"\"\"\n\n self._reopen(mode='a')\n table = self.h5file.root.table\n table.flavor = \"numpy\"\n coords = np.array([1, 2, 3], dtype='int8')\n # Modify row 1\n # From PyTables 2.0 on, assignments to records can be done\n # only as tuples (see http://projects.scipy.org/scipy/numpy/ticket/315)\n # table[coords[0]] = [\"aasa\",\"x\"]+[232]*12\n\n n = len(Record.columns) - 2\n\n table[coords[0]] = tuple([\"aasa\", \"x\"]+[232]*n) # XXX\n # record = list(table[coords[0]])\n record = table.read(coords[0], coords[0] + 1)\n if common.verbose:\n print(\"Original row:\\n\"\n \"['aasa', 'x', True, -24, 232, 232, 232, 232, 232L, \"\n \"232, 232.0, 232.0, (232 + 0j), (232+0j), 232.0, \"\n \"(232+0j)]\\n\")\n print(\"Read row:\\n\", record)\n self.assertEqual(record['var1'], b'aasa')\n self.assertEqual(record['var2'], b'x')\n self.assertEqual(record['var3'], True)\n self.assertEqual(record['var4'], -24)\n self.assertEqual(record['var7'], 232)\n\n\n# The declaration of the nested table:\nclass Info(tables.IsDescription):\n _v_pos = 3\n Name = StringCol(itemsize=2)\n Value = ComplexCol(itemsize=16)\n\n\nclass TestTDescr(tables.IsDescription):\n\n \"\"\"A description that has several nested columns.\"\"\"\n\n x = Int32Col(dflt=0, shape=2, pos=0) # 0\n y = FloatCol(dflt=1, shape=(2, 2))\n z = UInt8Col(dflt=1)\n z3 = EnumCol({'r': 4, 'g': 2, 'b': 1}, 'r', 'int32', shape=2)\n color = StringCol(itemsize=4, dflt=b\"ab\", pos=2)\n info = Info()\n\n class Info(tables.IsDescription): # 1\n _v_pos = 1\n name = StringCol(itemsize=2)\n value = ComplexCol(itemsize=16, pos=0) # 0\n y2 = FloatCol(pos=1) # 1\n z2 = UInt8Col()\n\n class Info2(tables.IsDescription):\n y3 = Time64Col(shape=2)\n name = StringCol(itemsize=2)\n value = ComplexCol(itemsize=16, shape=2)\n\n\nclass TableNativeFlavorTestCase(common.TempFileMixin, TestCase):\n nrows = 100\n\n def setUp(self):\n super(TableNativeFlavorTestCase, self).setUp()\n\n # Create an instance of an HDF5 Table\n table = self.h5file.create_table(self.h5file.root, 'table', TestTDescr,\n expectedrows=self.nrows)\n table.flavor = \"numpy\"\n for i in range(self.nrows):\n table.row.append() # Fill 100 rows with default values\n table.flush()\n\n def test01a_basicTableRead(self):\n \"\"\"Checking the return of a NumPy in read().\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table[:]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the value of some columns\n # A flat column\n col = table.cols.x[:3]\n self.assertIsInstance(col, np.ndarray)\n npcol = np.zeros((3, 2), dtype=\"int32\")\n self.assertTrue(allequal(col, npcol, \"numpy\"))\n\n # A nested column\n col = table.cols.Info[:3]\n self.assertIsInstance(col, np.ndarray)\n dtype = [('value', 'c16'),\n ('y2', 'f8'),\n ('Info2',\n [('name', 'S2'),\n ('value', 'c16', (2,)),\n ('y3', 'f8', (2,))]),\n ('name', 'S2'),\n ('z2', 'u1')]\n npcol = np.zeros((3,), dtype=dtype)\n self.assertEqual(col.dtype.descr, npcol.dtype.descr)\n if common.verbose:\n print(\"col-->\", col)\n print(\"npcol-->\", npcol)\n\n # A copy() is needed in case the buffer can be in different segments\n self.assertEqual(bytes(col.copy().data), bytes(npcol.data))\n\n def test01b_basicTableRead(self):\n \"\"\"Checking the return of a NumPy in read() (strided version).\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table[::3]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the value of some columns\n # A flat column\n col = table.cols.x[:9:3]\n self.assertIsInstance(col, np.ndarray)\n npcol = np.zeros((3, 2), dtype=\"int32\")\n self.assertTrue(allequal(col, npcol, \"numpy\"))\n\n # A nested column\n col = table.cols.Info[:9:3]\n self.assertIsInstance(col, np.ndarray)\n dtype = [('value', '%sc16' % byteorder),\n ('y2', '%sf8' % byteorder),\n ('Info2',\n [('name', '|S2'),\n ('value', '%sc16' % byteorder, (2,)),\n ('y3', '%sf8' % byteorder, (2,))]),\n ('name', '|S2'),\n ('z2', '|u1')]\n npcol = np.zeros((3,), dtype=dtype)\n self.assertEqual(col.dtype.descr, npcol.dtype.descr)\n if common.verbose:\n print(\"col-->\", col)\n print(\"npcol-->\", npcol)\n\n # A copy() is needed in case the buffer can be in different segments\n self.assertEqual(bytes(col.copy().data), bytes(npcol.data))\n\n def test02_getWhereList(self):\n \"\"\"Checking the return of NumPy in get_where_list method.\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.get_where_list('z == 1')\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), 100)\n\n # Finally, check that the contents are ok\n self.assertTrue(allequal(data, np.arange(100, dtype=\"i8\"), \"numpy\"))\n\n def test03a_readWhere(self):\n \"\"\"Checking the return of NumPy in read_where method (strings).\"\"\"\n\n table = self.h5file.root.table\n table.cols.color.create_index()\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.read_where('color == b\"ab\"')\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), self.nrows)\n\n def test03b_readWhere(self):\n \"\"\"Checking the return of NumPy in read_where method (numeric).\"\"\"\n\n table = self.h5file.root.table\n table.cols.z.create_index()\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.read_where('z == 0')\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), 0)\n\n def test04a_createTable(self):\n \"\"\"Checking the Table creation from a numpy recarray.\"\"\"\n\n dtype = [('value', '%sc16' % byteorder),\n ('y2', '%sf8' % byteorder),\n ('Info2',\n [('name', '|S2'),\n ('value', '%sc16' % byteorder, (2,)),\n ('y3', '%sf8' % byteorder, (2,))]),\n ('name', '|S2'),\n ('z2', '|u1')]\n npdata = np.zeros((3,), dtype=dtype)\n table = self.h5file.create_table(self.h5file.root, 'table2', npdata)\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table2\n data = table[:]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, npdata.dtype.descr)\n if common.verbose:\n print(\"npdata-->\", npdata)\n print(\"data-->\", data)\n\n # A copy() is needed in case the buffer would be in different segments\n self.assertEqual(bytes(data.copy().data), bytes(npdata.data))\n\n def test04b_appendTable(self):\n \"\"\"Checking appending a numpy recarray.\"\"\"\n\n table = self.h5file.root.table\n npdata = table[3:6]\n table.append(npdata)\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table[-3:]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"Last 3 elements of read:\", data[-3:])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, npdata.dtype.descr)\n if common.verbose:\n print(\"npdata-->\", npdata)\n print(\"data-->\", data)\n\n # A copy() is needed in case the buffer would be in different segments\n self.assertEqual(bytes(data.copy().data), bytes(npdata.data))\n\n def test05a_assignColumn(self):\n \"\"\"Checking assigning to a column.\"\"\"\n\n table = self.h5file.root.table\n table.cols.z[:] = np.zeros((100,), dtype='u1')\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.cols.z[:]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), 100)\n\n # Finally, check that the contents are ok\n self.assertTrue(allequal(data, np.zeros((100,), dtype=\"u1\"), \"numpy\"))\n\n def test05b_modifyingColumns(self):\n \"\"\"Checking modifying several columns at once.\"\"\"\n\n table = self.h5file.root.table\n xcol = np.ones((3, 2), 'int32')\n ycol = np.zeros((3, 2, 2), 'float64')\n zcol = np.zeros((3,), 'uint8')\n table.modify_columns(3, 6, 1, [xcol, ycol, zcol], ['x', 'y', 'z'])\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.cols.y[3:6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, ycol.dtype.descr)\n if common.verbose:\n print(\"ycol-->\", ycol)\n print(\"data-->\", data)\n\n # A copy() is needed in case the buffer would be in different segments\n self.assertEqual(data.copy().data, ycol.data)\n\n def test05c_modifyingColumns(self):\n \"\"\"Checking modifying several columns using a single numpy buffer.\"\"\"\n\n table = self.h5file.root.table\n dtype = [('x', 'i4', (2,)), ('y', 'f8', (2, 2)), ('z', 'u1')]\n nparray = np.zeros((3,), dtype=dtype)\n table.modify_columns(3, 6, 1, nparray, ['x', 'y', 'z'])\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n ycol = np.zeros((3, 2, 2), 'float64')\n data = table.cols.y[3:6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, ycol.dtype.descr)\n if common.verbose:\n print(\"ycol-->\", ycol)\n print(\"data-->\", data)\n\n # A copy() is needed in case the buffer would be in different segments\n self.assertEqual(data.copy().data, ycol.data)\n\n def test06a_assignNestedColumn(self):\n \"\"\"Checking assigning a nested column (using modify_column).\"\"\"\n\n table = self.h5file.root.table\n dtype = [('value', '%sc16' % byteorder),\n ('y2', '%sf8' % byteorder),\n ('Info2',\n [('name', '|S2'),\n ('value', '%sc16' % byteorder, (2,)),\n ('y3', '%sf8' % byteorder, (2,))]),\n ('name', '|S2'),\n ('z2', '|u1')]\n npdata = np.zeros((3,), dtype=dtype)\n data = table.cols.Info[3:6]\n table.modify_column(3, 6, 1, column=npdata, colname='Info')\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.cols.Info[3:6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, npdata.dtype.descr)\n if common.verbose:\n print(\"npdata-->\", npdata)\n print(\"data-->\", data)\n\n # A copy() is needed in case the buffer would be in different segments\n self.assertEqual(bytes(data.copy().data), bytes(npdata.data))\n\n def test06b_assignNestedColumn(self):\n \"\"\"Checking assigning a nested column (using the .cols accessor).\"\"\"\n\n table = self.h5file.root.table\n dtype = [('value', '%sc16' % byteorder),\n ('y2', '%sf8' % byteorder),\n ('Info2',\n [('name', '|S2'),\n ('value', '%sc16' % byteorder, (2,)),\n ('y3', '%sf8' % byteorder, (2,))]),\n ('name', '|S2'),\n ('z2', '|u1')]\n npdata = np.zeros((3,), dtype=dtype)\n #self.assertRaises(NotImplementedError,\n # table.cols.Info.__setitem__, slice(3,6,1), npdata)\n table.cols.Info[3:6] = npdata\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n data = table.cols.Info[3:6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, npdata.dtype.descr)\n if common.verbose:\n print(\"npdata-->\", npdata)\n print(\"data-->\", data)\n\n # A copy() is needed in case the buffer would be in different segments\n self.assertEqual(bytes(data.copy().data), bytes(npdata.data))\n\n def test07a_modifyingRows(self):\n \"\"\"Checking modifying several rows at once (using modify_rows).\"\"\"\n\n table = self.h5file.root.table\n\n # Read a chunk of the table\n chunk = table[0:3]\n\n # Modify it somewhat\n chunk['y'][:] = -1\n table.modify_rows(3, 6, 1, rows=chunk)\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n ycol = np.zeros((3, 2, 2), 'float64')-1\n data = table.cols.y[3:6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, ycol.dtype.descr)\n if common.verbose:\n print(\"ycol-->\", ycol)\n print(\"data-->\", data)\n self.assertTrue(allequal(ycol, data, \"numpy\"))\n\n def test07b_modifyingRows(self):\n \"\"\"Checking modifying several rows at once (using cols accessor).\"\"\"\n\n table = self.h5file.root.table\n\n # Read a chunk of the table\n chunk = table[0:3]\n\n # Modify it somewhat\n chunk['y'][:] = -1\n table.cols[3:6] = chunk\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n\n # Check that some column has been actually modified\n ycol = np.zeros((3, 2, 2), 'float64')-1\n data = table.cols.y[3:6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, ycol.dtype.descr)\n if common.verbose:\n print(\"ycol-->\", ycol)\n print(\"data-->\", data)\n self.assertTrue(allequal(ycol, data, \"numpy\"))\n\n def test08a_modifyingRows(self):\n \"\"\"Checking modifying just one row at once (using modify_rows).\"\"\"\n\n table = self.h5file.root.table\n\n # Read a chunk of the table\n chunk = table[3:4]\n\n # Modify it somewhat\n chunk['y'][:] = -1\n table.modify_rows(6, 7, 1, chunk)\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n\n # Check that some column has been actually modified\n ycol = np.zeros((2, 2), 'float64')-1\n data = table.cols.y[6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, ycol.dtype.descr)\n if common.verbose:\n print(\"ycol-->\", ycol)\n print(\"data-->\", data)\n self.assertTrue(allequal(ycol, data, \"numpy\"))\n\n def test08b_modifyingRows(self):\n \"\"\"Checking modifying just one row at once (using cols accessor).\"\"\"\n\n table = self.h5file.root.table\n\n # Read a chunk of the table\n chunk = table[3:4]\n\n # Modify it somewhat\n chunk['y'][:] = -1\n table.cols[6] = chunk\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n\n # Check that some column has been actually modified\n ycol = np.zeros((2, 2), 'float64')-1\n data = table.cols.y[6]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n print(\"Length of the data read:\", len(data))\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, ycol.dtype.descr)\n if common.verbose:\n print(\"ycol-->\", ycol)\n print(\"data-->\", data)\n self.assertTrue(allequal(ycol, data, \"numpy\"))\n\n def test09a_getStrings(self):\n \"\"\"Checking the return of string columns with spaces.\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n rdata = table.get_where_list('color == b\"ab\"')\n data = table.read_coordinates(rdata)\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), 100)\n\n # Finally, check that the contents are ok\n for idata in data['color']:\n self.assertEqual(idata, np.array(\"ab\", dtype=\"|S4\"))\n\n def test09b_getStrings(self):\n \"\"\"Checking the return of string columns with spaces.\n\n (modify)\n\n \"\"\"\n\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n for i in range(50):\n table.cols.color[i] = \"a \"\n table.flush()\n data = table[:]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), 100)\n\n # Finally, check that the contents are ok\n for i in range(100):\n idata = data['color'][i]\n if i >= 50:\n self.assertEqual(idata, np.array(\"ab\", dtype=\"|S4\"))\n else:\n self.assertEqual(idata, np.array(\"a \", dtype=\"|S4\"))\n\n def test09c_getStrings(self):\n \"\"\"Checking the return of string columns with spaces.\n\n (append)\n\n \"\"\"\n\n if self.close:\n self._reopen(mode='a')\n table = self.h5file.root.table\n row = table.row\n for i in range(50):\n row[\"color\"] = \"a \" # note the trailing spaces\n row.append()\n table.flush()\n if self.close:\n self.h5file.close()\n self.h5file = tables.open_file(self.h5fname, \"a\")\n data = self.h5file.root.table[:]\n if common.verbose:\n print(\"Type of read:\", type(data))\n print(\"Description of the record:\", data.dtype.descr)\n print(\"First 3 elements of read:\", data[:3])\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check that all columns have been selected\n self.assertEqual(len(data), 150)\n\n # Finally, check that the contents are ok\n for i in range(150):\n idata = data['color'][i]\n if i < 100:\n self.assertEqual(idata, np.array(\"ab\", dtype=\"|S4\"))\n else:\n self.assertEqual(idata, np.array(\"a \", dtype=\"|S4\"))\n\n\nclass TableNativeFlavorOpenTestCase(TableNativeFlavorTestCase):\n close = False\n\n\nclass TableNativeFlavorCloseTestCase(TableNativeFlavorTestCase):\n close = True\n\n\nclass AttributesTestCase(common.TempFileMixin, TestCase):\n def setUp(self):\n super(AttributesTestCase, self).setUp()\n\n # Create an instance of an HDF5 Table\n self.h5file.create_group(self.h5file.root, 'group')\n\n def test01_writeAttribute(self):\n \"\"\"Checking the creation of a numpy attribute.\"\"\"\n\n group = self.h5file.root.group\n g_attrs = group._v_attrs\n g_attrs.numpy1 = np.zeros((1, 1), dtype='int16')\n if self.close:\n self._reopen(mode='a')\n group = self.h5file.root.group\n g_attrs = group._v_attrs\n\n # Check that we can retrieve a numpy object\n data = g_attrs.numpy1\n npcomp = np.zeros((1, 1), dtype='int16')\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, npcomp.dtype.descr)\n if common.verbose:\n print(\"npcomp-->\", npcomp)\n print(\"data-->\", data)\n self.assertTrue(allequal(npcomp, data, \"numpy\"))\n\n def test02_updateAttribute(self):\n \"\"\"Checking the modification of a numpy attribute.\"\"\"\n\n group = self.h5file.root.group\n g_attrs = group._v_attrs\n g_attrs.numpy1 = np.zeros((1, 2), dtype='int16')\n if self.close:\n self._reopen(mode='a')\n group = self.h5file.root.group\n g_attrs = group._v_attrs\n\n # Update this attribute\n g_attrs.numpy1 = np.ones((1, 2), dtype='int16')\n\n # Check that we can retrieve a numpy object\n data = g_attrs.numpy1\n npcomp = np.ones((1, 2), dtype='int16')\n\n # Check that both NumPy objects are equal\n self.assertIsInstance(data, np.ndarray)\n\n # Check the type\n self.assertEqual(data.dtype.descr, npcomp.dtype.descr)\n if common.verbose:\n print(\"npcomp-->\", npcomp)\n print(\"data-->\", data)\n self.assertTrue(allequal(npcomp, data, \"numpy\"))\n\n\nclass AttributesOpenTestCase(AttributesTestCase):\n close = 0\n\n\nclass AttributesCloseTestCase(AttributesTestCase):\n close = 1\n\n\nclass StrlenTestCase(common.TempFileMixin, TestCase):\n def setUp(self):\n super(StrlenTestCase, self).setUp()\n\n # Create an instance of an HDF5 Table\n group = self.h5file.create_group(self.h5file.root, 'group')\n tablelayout = {'Text': StringCol(itemsize=1000), }\n self.table = self.h5file.create_table(group, 'table', tablelayout)\n self.table.flavor = 'numpy'\n row = self.table.row\n row['Text'] = 'Hello Francesc!' # XXX: check unicode --> bytes\n row.append()\n row['Text'] = 'Hola Francesc!' # XXX: check unicode --> bytes\n row.append()\n self.table.flush()\n\n def test01(self):\n \"\"\"Checking the lengths of strings (read field).\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n self.table = self.h5file.root.group.table\n\n # Get both strings\n str1 = self.table.col('Text')[0]\n str2 = self.table.col('Text')[1]\n if common.verbose:\n print(\"string1-->\", str1)\n print(\"string2-->\", str2)\n\n # Check that both NumPy objects are equal\n self.assertEqual(len(str1), len(b'Hello Francesc!'))\n self.assertEqual(len(str2), len(b'Hola Francesc!'))\n self.assertEqual(str1, b'Hello Francesc!')\n self.assertEqual(str2, b'Hola Francesc!')\n\n def test02(self):\n \"\"\"Checking the lengths of strings (read recarray).\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n self.table = self.h5file.root.group.table\n\n # Get both strings\n str1 = self.table[:]['Text'][0]\n str2 = self.table[:]['Text'][1]\n\n # Check that both NumPy objects are equal\n self.assertEqual(len(str1), len(b'Hello Francesc!'))\n self.assertEqual(len(str2), len(b'Hola Francesc!'))\n self.assertEqual(str1, b'Hello Francesc!')\n self.assertEqual(str2, b'Hola Francesc!')\n\n def test03(self):\n \"\"\"Checking the lengths of strings (read recarray, row by row).\"\"\"\n\n if self.close:\n self._reopen(mode='a')\n self.table = self.h5file.root.group.table\n\n # Get both strings\n str1 = self.table[0]['Text']\n str2 = self.table[1]['Text']\n\n # Check that both NumPy objects are equal\n self.assertEqual(len(str1), len(b'Hello Francesc!'))\n self.assertEqual(len(str2), len(b'Hola Francesc!'))\n self.assertEqual(str1, b'Hello Francesc!')\n self.assertEqual(str2, b'Hola Francesc!')\n\n\nclass StrlenOpenTestCase(StrlenTestCase):\n close = 0\n\n\nclass StrlenCloseTestCase(StrlenTestCase):\n close = 1\n\n\ndef suite():\n theSuite = unittest.TestSuite()\n niter = 1\n\n # theSuite.addTest(unittest.makeSuite(StrlenOpenTestCase))\n # theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))\n # theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))\n for i in range(niter):\n theSuite.addTest(unittest.makeSuite(Basic0DOneTestCase))\n theSuite.addTest(unittest.makeSuite(Basic0DTwoTestCase))\n theSuite.addTest(unittest.makeSuite(Basic1DOneTestCase))\n theSuite.addTest(unittest.makeSuite(Basic1DTwoTestCase))\n theSuite.addTest(unittest.makeSuite(Basic1DThreeTestCase))\n theSuite.addTest(unittest.makeSuite(Basic2DTestCase))\n theSuite.addTest(unittest.makeSuite(GroupsArrayTestCase))\n theSuite.addTest(unittest.makeSuite(TableReadTestCase))\n theSuite.addTest(unittest.makeSuite(TableNativeFlavorOpenTestCase))\n theSuite.addTest(unittest.makeSuite(TableNativeFlavorCloseTestCase))\n theSuite.addTest(unittest.makeSuite(AttributesOpenTestCase))\n theSuite.addTest(unittest.makeSuite(AttributesCloseTestCase))\n theSuite.addTest(unittest.makeSuite(StrlenOpenTestCase))\n theSuite.addTest(unittest.makeSuite(StrlenCloseTestCase))\n if common.heavy:\n theSuite.addTest(unittest.makeSuite(Basic10DTestCase))\n # The 32 dimensions case takes forever to run!!\n # theSuite.addTest(unittest.makeSuite(Basic32DTestCase))\n return theSuite\n\n\nif __name__ == '__main__':\n common.parse_argv(sys.argv)\n common.print_versions()\n unittest.main(defaultTest='suite')\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.ones", "numpy.np.array", "numpy.arange", "numpy.dtype" ] ]
shiwj16/raa-drl
[ "0fc19546ac3186b2c60785a363a9f5d581ac08b9" ]
[ "RAA-DuelingDQN/src/anderson_alpha.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\nimport torch\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\nclass RAA(object):\r\n def __init__(self, num_critics, use_restart, reg=0.1):\r\n self.size = num_critics\r\n self.reg = reg # regularization\r\n self.use_restart = use_restart\r\n self.count = 0\r\n self.interval = 5000\r\n self.errors = torch.zeros(self.interval).to(device)\r\n self.opt_error = torch.tensor(0.).to(device)\r\n\r\n def calculate(self, Qs, F_Qs):\r\n Qs = Qs.t()\r\n F_Qs = F_Qs.t()\r\n delta_Qs = F_Qs - Qs\r\n cur_size = Qs.size(1)\r\n\r\n del_mat = delta_Qs.t().mm(delta_Qs)\r\n alpha = del_mat / torch.abs(torch.mean(del_mat))\r\n alpha += self.reg * torch.eye(cur_size).to(device)\r\n\r\n alpha = torch.sum(alpha.inverse(), 1)\r\n alpha = torch.unsqueeze(alpha / torch.sum(alpha), 1)\r\n\r\n # restart checking\r\n self.count += 1\r\n self.errors[self.count % self.interval] = torch.mean(torch.pow(delta_Qs[:, -1], 2)).detach()\r\n\r\n if self.use_restart:\r\n if self.count % self.interval == 0:\r\n error = torch.mean(self.errors)\r\n if self.count == self.interval:\r\n self.opt_error = error\r\n else:\r\n self.opt_error = torch.min(self.opt_error, error)\r\n\r\n if (self.count > self.interval and error > self.opt_error) or self.count > 100000:\r\n print(error, self.opt_error)\r\n restart = True\r\n self.count = 0\r\n else:\r\n restart = False\r\n else:\r\n restart = False\r\n else:\r\n restart = False\r\n\r\n return alpha, restart\r\n\r\n" ]
[ [ "torch.zeros", "torch.min", "torch.pow", "torch.cuda.is_available", "torch.tensor", "torch.eye", "torch.mean", "torch.sum" ] ]
mountain/numbers
[ "66ff00b57b197788113b51af97bf176451206c75" ]
[ "test/test_game.py" ]
[ "import unittest\n\nimport numpy as np\n\nfrom numx.serengeti import Serengeti\n\n\nclass TestGame(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_probability(self):\n for alpha in np.linspace(0.2, 0.8, 16):\n print(alpha)\n\n self.game = Serengeti({}, alpha=alpha)\n xs, ys = self.game.IX, self.game.IY\n self.game.peaky = 0.0\n self.game.peaky = 0.0\n p = self.game.probability(xs, ys)\n\n r = np.sqrt(xs * xs + ys * ys)\n s = (r > (3 - 0.05)) * (r < (3 + 0.05))\n b = np.mean(s * p) * self.game.size * self.game.size\n self.assertGreaterEqual(b, -1.0)\n self.assertLessEqual(b, 4.0)\n" ]
[ [ "numpy.linspace", "numpy.mean", "numpy.sqrt" ] ]
etiennelndr/analyze_images
[ "edb0f31f9732b062a48fb298a5d8aadb2b0caa6a" ]
[ "src/nnmodels/animalsmodel.py" ]
[ "try:\n from .model import NNModel\n\n from keras.layers import Conv2D, Dropout, MaxPooling2D, Flatten, Dense\n from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img\n from keras.optimizers import Adam\n from keras.losses import categorical_crossentropy\n from keras.metrics import categorical_accuracy\n\n from os.path import realpath\n\n import tensorflow as tf\n import numpy as np\n\n from PIL import Image\nexcept ImportError as err:\n exit(err)\n\n\nclass AnimalsModel(NNModel):\n \"\"\"\n Neural network model for animal classification.\n\n Sources:\n - dogs-vs-cats dataset -> https://www.kaggle.com/c/dogs-vs-cats/data\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialization of the model.\n \"\"\"\n super().__init__('sequential', 'animals', model_name=self.__class__.__name__.lower())\n\n # Input data shape\n self.input_shape = (150, 150, 3)\n # File extensions for data to predict\n self.FILE_EXTENSIONS = [\n \"png\",\n \"jpg\",\n \"jpeg\",\n \"tif\",\n \"tiff\"\n ]\n\n def create_layers(self):\n \"\"\"\n Creates each layer of the model.\n \"\"\"\n base_dir = join(realpath(__file__).split(\"src\")[0], \"datas/dogs_vs_cats\")\n train_dir = join(base_dir, \"training\")\n val_dir = join(base_dir, \"validation\")\n test_dir = join(base_dir, \"testing\")\n\n assert exists(train_dir) == True\n assert exists(val_dir) == True\n assert exists(test_dir) == True\n\n train_datagen = ImageDataGenerator(rescale=1./255,\n rotation_range=40,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\n val_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n # Generators\n train_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=self.input_shape[:2],\n batch_size=32,\n class_mode='binary')\n val_generator = val_datagen.flow_from_directory(\n val_dir,\n target_size=self.input_shape[:2],\n batch_size=32,\n class_mode='binary')\n test_generator = test_datagen.flow_from_directory(\n test_dir,\n target_size=self.input_shape[:2],\n batch_size=20,\n class_mode='binary')\n \n # Datas\n self.datas = { \"train_generator\" : train_generator, \"val_generator\" : val_generator, \"test_generator\" : test_generator }\n\n # Add layers to the model\n self.add_layer(Conv2D(32, (3, 3), activation=tf.nn.relu, input_shape=self.input_shape))\n self.add_layer(MaxPooling2D((2, 2)))\n self.add_layer(Conv2D(64, (3, 3), activation=tf.nn.relu))\n self.add_layer(MaxPooling2D((2, 2)))\n self.add_layer(Conv2D(128, (3, 3), activation=tf.nn.relu))\n self.add_layer(MaxPooling2D((2, 2)))\n self.add_layer(Conv2D(128, (3, 3), activation=tf.nn.relu))\n self.add_layer(MaxPooling2D((2, 2)))\n self.add_layer(Flatten())\n self.add_layer(Dropout(0.5))\n self.add_layer(Dense(512, activation=tf.nn.relu))\n self.add_layer(Dense(1, activation=tf.nn.sigmoid))\n\n self.get_model().summary()\n\n def learn(self):\n \"\"\"\n Compiles and fits a model, evaluation is optional.\n \"\"\"\n # Starting the training\n self._training = True\n\n # Number of epochs\n epochs = 100\n # Learning rate\n learning_rate = 1e-3\n # Compiling the model with an optimizer and a loss function\n self._model.compile(optimizer=Adam(lr=learning_rate, decay=learning_rate/epochs),\n loss=categorical_crossentropy,\n metrics=[categorical_accuracy])\n\n # Fitting the model by using our train and validation data\n # It returns the history that can be plot in the future\n if \"train_generator\" in self.datas and \"val_generator\" in self.datas:\n # Fit including validation datas\n self._history = self._model.fit_generator(\n self.datas[\"train_generator\"],\n steps_per_epoch = 100,\n epochs = epochs,\n validation_data = self.datas[\"val_generator\"],\n validation_steps = 20)\n elif \"train_generator\" in self.datas:\n # Fit without validation datas\n self._history = self._model.fit_generator(\n self.datas[\"train_generator\"],\n steps_per_epoch = 100,\n epochs = epochs)\n else:\n raise NotImplementedError(\"Unknown data\")\n\n if \"test_generator\" in self.datas:\n # Evaluation of the model\n testLoss, acc_test = self._model.evaluate_generator(self.datas[\"test_generator\"], steps=50, verbose=1)\n print(\"Loss / test: \" + str(testLoss) + \" and accuracy: \" + str(acc_test))\n\n # Training is over\n self._training = False\n\n def loadDataToPredict(self, filename):\n \"\"\"\n Loads data to predict.\n \"\"\"\n # Open the desired picture\n im = Image.open(filename)\n \n # Resize the picture\n im = im.resize(self.input_shape[:2])\n\n # Get the image array\n self.__imgToPredict = np.array(im)\n\n # Be careful -> each pixel value must be a float\n self.__imgToPredict = self.__imgToPredict.astype('float32')\n # Make a copy of this array to show the picture\n img = np.copy(self.__imgToPredict)\n\n # Normalize the image\n self.__imgToPredict /= 255\n\n # Close the file pointer (if possible)\n im.close()\n\n def predictValue(self):\n \"\"\"\n Predicts a value with a given data.\n \"\"\"\n pred = self._model.predict(self.__imgToPredict.reshape((1,) + self.input_shape))\n return \"dog\" if pred[0][0] >= 0.5 else \"cat\"\n" ]
[ [ "numpy.array", "numpy.copy" ] ]
amirDahari1/super-res
[ "2a93a20d65c570a5398caef65957fb612c3581c8" ]
[ "code/Evaluation.py" ]
[ "import BatchMaker\nimport LearnTools\nimport Networks\nimport ImageTools\nimport argparse\nimport torch\nimport numpy as np\nfrom tifffile import imsave, imread\n\n# Parsing arguments:\nparser = argparse.ArgumentParser()\n\nargs = LearnTools.return_args(parser)\n\nprogress_dir, wd, wg = args.directory, args.widthD, args.widthG\nn_res_blocks, pix_distance = args.n_res_blocks, args.pixel_coefficient_distance\nnum_epochs, g_update, n_dims = args.num_epochs, args.g_update, args.n_dims\nsquash, down_sample = args.squash_phases, args.down_sample\nD_dimensions_to_check, scale_f = args.d_dimensions_to_check, args.scale_factor\nsize_to_evaluate, separator = args.volume_size_to_evaluate, args.separator\ng_file_name, super_sample = args.g_image_path, args.super_sampling\nphases_to_low, g_epoch_id = args.phases_low_res_idx, args.g_epoch_id\n\nprogress_main_dir = 'progress/' + progress_dir\n# progress_main_dir = 'progress'\npath_to_g_weights = progress_main_dir + '/g_weights.pth' + g_epoch_id\n# path_to_g_weights = progress_main_dir + '/g_weights_large_slice.pth'\nG_image_path = 'data/' + g_file_name\n# G_image_path = 'data/new_vol_down_sample.tif'\n\nrand_id = str(np.random.randint(10000))\n\nfile_name = 'generated_tif' + rand_id + '.tif'\ncrop_to_cube = False\ninput_with_noise = True\nall_pore_input = False\n\n# crop the edges\ncrop = 4\n\n# Number of GPUs available. Use 0 for CPU mode.\nngpu = 1\n\n# Decide which device we want to run on\ndevice = torch.device(\n \"cuda:0\" if (torch.cuda.is_available() and ngpu > 0) else \"cpu\")\nprint('device is ' + str(device))\n\n# the material indices to low-res:\nto_low_idx = torch.LongTensor(phases_to_low).to(device)\n\n# Number of channels in the training images. For color images this is 3\nif squash:\n if input_with_noise:\n nc_g = 3\n else:\n nc_g = 2\nelse:\n if input_with_noise:\n nc_g = 1 + to_low_idx.size()[0] + 1 # channel for pore plus number of\n # material phases to low res.\n else:\n nc_g = 1 + to_low_idx.size()[0]\n\n# TODO make this more general, to support any number of discriminator phases\nif separator:\n nc_d = 2\nelse:\n nc_d = 3 # three phases for the discriminator input\n\nG_net = Networks.generator(ngpu, wg, nc_g, nc_d, n_res_blocks, n_dims,\n scale_factor=scale_f).to(device)\nG_net.load_state_dict(torch.load(path_to_g_weights, map_location=torch.device(\n device)))\n\n# If the whole network is saved:\n# G_net = torch.load(path_to_g_weights, map_location=torch.device(device))\nG_net.eval()\n\n\ndef crop_to_down_sample(high_res):\n \"\"\"\n If down sample, crops the high resolution image to fit the scale factor.\n \"\"\"\n dims = np.array(high_res.shape)\n crop_dims = []\n for idx in range(len(dims)):\n dim = dims[idx]\n for subtract in range(dim):\n # doing % twice because the number can be 0 from below (%1.6=1.599)\n if np.round((dim - subtract) % scale_f, 5) % scale_f == 0:\n crop_dims.append(dim - subtract)\n break\n return high_res[:crop_dims[0], :crop_dims[1], :crop_dims[2]]\n\n\nwith torch.no_grad(): # save the images\n # 1. Start a new run\n # wandb.init(project='SuperRes', name='making large volume',\n # entity='tldr-group')\n\n step_len = int(np.round(128/scale_f, 5))\n overlap = int(step_len/2)\n high_overlap = int(np.round(overlap / 2 * scale_f, 5))\n step = step_len - overlap\n\n BM_G = BatchMaker.\\\n BatchMaker(device=device, to_low_idx=to_low_idx, path=G_image_path,\n sf=scale_f, dims=n_dims, stack=False,\n down_sample=down_sample, low_res=not down_sample,\n rot_and_mir=False, squash=squash, super_sample=super_sample)\n im_3d = BM_G.all_image_batch()\n\n if all_pore_input:\n im_3d[:] = 0\n im_3d[:, 0] = 1\n\n if input_with_noise:\n input_size = im_3d.size()\n # make noise channel and concatenate it to input:\n noise = torch.randn(input_size[0], 1, *input_size[2:],\n device=device, dtype=im_3d.dtype)\n im_3d = torch.cat((im_3d, noise), dim=1)\n\n nz1, nz2, nz3 = size_to_evaluate\n first_img_stack = []\n with torch.no_grad():\n last_ind1 = int(np.ceil((nz1-step_len)/step))\n for i in range(last_ind1 + 1):\n print('large step = ' + str(i))\n if i == last_ind1:\n first_lr_vec = im_3d[..., nz1-step_len:nz1, :, :]\n else:\n first_lr_vec = im_3d[..., i*step:i*step+step_len, :, :]\n second_img_stack = []\n last_ind2 = int(np.ceil((nz2-step_len)/step))\n for j in range(last_ind2 + 1):\n print('middle step = ' + str(j))\n if j == last_ind2:\n second_lr_vec = first_lr_vec[..., :, nz2-step_len:nz2, :]\n else:\n second_lr_vec = first_lr_vec[..., :, j * step:j * step +\n step_len, :]\n third_img_stack = []\n last_ind3 = int(np.ceil((nz3-step_len)/step))\n for k in range(last_ind3 + 1):\n print('small step = ' + str(k))\n if k == last_ind3:\n third_lr_vec = second_lr_vec[..., :, :,\n nz3-step_len:nz3]\n else:\n third_lr_vec = second_lr_vec[..., :, :, k * step:k *\n step + step_len]\n g_output, _ = G_net(third_lr_vec)\n g_output = g_output.detach().cpu()\n g_output = ImageTools.fractions_to_ohe(g_output)\n g_output_grey = ImageTools.one_hot_decoding(\n g_output).astype('int8').squeeze()\n if k == 0: # keep the beginning\n g_output_grey = g_output_grey[:, :, :-high_overlap]\n elif k == last_ind3: # keep the middle+end\n excess_voxels = int(\n ((nz3 - step_len) % step) * scale_f)\n if excess_voxels > 0:\n g_output_grey = g_output_grey[:, :,\n -(high_overlap +\n excess_voxels):]\n else:\n g_output_grey = g_output_grey[:, :, high_overlap:]\n else: # keep the middle\n g_output_grey = g_output_grey[:, :, high_overlap:\n - high_overlap]\n third_img_stack.append(np.int8(g_output_grey))\n res2 = np.concatenate(third_img_stack, axis=2)\n if j == 0:\n res2 = res2[:, :-high_overlap, :]\n elif j == last_ind2:\n excess_voxels = int(((nz2 - step_len) % step) * scale_f)\n if excess_voxels > 0:\n res2 = res2[:, -(high_overlap + excess_voxels):, :]\n else:\n res2 = res2[:, high_overlap:, :]\n else:\n res2 = res2[:, high_overlap:-high_overlap, :]\n second_img_stack.append(res2)\n res1 = np.concatenate(second_img_stack, axis=1)\n if i == 0:\n res1 = res1[:-high_overlap, :, :]\n elif i == last_ind1:\n excess_voxels = int(((nz1 - step_len) % step) * scale_f)\n if excess_voxels > 0:\n res1 = res1[-(high_overlap+excess_voxels):, :, :]\n else:\n res1 = res1[high_overlap:, :, :]\n else:\n res1 = res1[high_overlap:-high_overlap, :, :]\n first_img_stack.append(res1)\n img = np.concatenate(first_img_stack, axis=0)\n img = img[crop:-crop, crop:-crop, crop:-crop]\n low_res = np.squeeze(ImageTools.one_hot_decoding(im_3d.cpu()))\n if all_pore_input:\n imsave(progress_main_dir + '/' + file_name + '_pore', img)\n else:\n imsave(progress_main_dir + '/' + file_name, img)\n\n # also save the low-res input.\n imsave(progress_main_dir + '/' + file_name + 'low_res', low_res)\n" ]
[ [ "numpy.concatenate", "torch.device", "numpy.array", "torch.cat", "numpy.ceil", "numpy.int8", "numpy.round", "torch.no_grad", "torch.cuda.is_available", "torch.LongTensor", "numpy.random.randint", "torch.randn" ] ]
ppppps/SNN_Calibration
[ "1aca56daa5759a28bed6ed31b207c766d745dd51" ]
[ "CIFAR/models/utils.py" ]
[ "import numpy as np\nimport torch.nn as nn\n\n\nclass StraightThrough(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, channel_num: int = 1):\n super().__init__()\n\n def forward(self, input):\n return input\n\n\nclass AvgPoolConv(nn.Conv2d):\n \"\"\"\n Converting the AvgPool layers to a convolution-wrapped module,\n so that this module can be identified in Spiking-refactor.\n \"\"\"\n def __init__(self, kernel_size=2, stride=2, input_channel=64, padding=0, freeze_avg=True):\n super().__init__(input_channel, input_channel, kernel_size, padding=padding, stride=stride,\n groups=input_channel, bias=False)\n # init the weight to make them equal to 1/k/k\n self.set_weight_to_avg()\n self.freeze = freeze_avg\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, *inputs):\n self.set_weight_to_avg()\n x = super().forward(*inputs)\n return self.relu(x)\n\n def set_weight_to_avg(self):\n self.weight.data.fill_(1).div_(self.kernel_size[0] * self.kernel_size[1])\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, length=0):\n self.length = length\n self.reset()\n\n def reset(self):\n if self.length > 0:\n self.history = []\n else:\n self.count = 0\n self.sum = 0.0\n self.val = 0.0\n self.avg = 0.0\n\n def reduce_update(self, tensor, num=1):\n link.allreduce(tensor)\n self.update(tensor.item(), num=num)\n\n def update(self, val, num=1):\n if self.length > 0:\n # currently assert num==1 to avoid bad usage, refine when there are some explict requirements\n assert num == 1\n self.history.append(val)\n if len(self.history) > self.length:\n del self.history[0]\n\n self.val = self.history[-1]\n self.avg = np.mean(self.history)\n else:\n self.val = val\n self.sum += val*num\n self.count += num\n self.avg = self.sum / self.count\n" ]
[ [ "torch.nn.ReLU", "numpy.mean" ] ]
kaljuvee/openaltdata
[ "9c5d140b56cfd5260fe3cf52b24bb7d467e87cf1" ]
[ "data_collection/altdata_service/news/util/news_util.py" ]
[ "import os.path\r\nimport yaml\r\nimport logging\r\nfrom bs4 import BeautifulSoup\r\nimport data_collection.altdata_service.news.config.sqlalchemy_connector as dbconn\r\nfrom dateutil.parser import parse\r\nimport pandas as pd\r\n\r\nABS_PATH = os.path.dirname(os.path.abspath(__file__))\r\n# CONFIG_PATH = os.path.join(ROOT_DIR, 'configuration.conf')\r\n# RSS_PATH = os.path.join('config', 'altsignals-market-news-rss.yaml')\r\nMARKET_RSS_PATH = os.path.join(ABS_PATH, '../config', 'altsignals-market-news-rss.yaml')\r\nCOMPANY_RSS_PATH = os.path.join(ABS_PATH, '../config', 'altsignals-company-news-rss.yaml')\r\nnews = dbconn.db.Table('news', dbconn.metadata, autoload=True, autoload_with=dbconn.engine)\r\nDEFAULT_SESSION = 'pre-market'\r\nGLOBENEWSIRE = 'GlobeNewswire Inc.'\r\nTRIT = 'TRIT'\r\nCOMPANY_KEY = 'COMPANY_KEY'\r\n\r\n\r\ndef get_news_df():\r\n cols = ['title', 'summary', 'full_text', 'published', 'link', 'contributor', 'subject', 'keyword', 'provider',\r\n 'language', 'ticker', 'senti_score', 'senti_method', 'company', 'sector', 'market_cap', 'ticker_source',\r\n 'trading_session', 'yticker', 'ticker_normal']\r\n return pd.DataFrame(columns=cols)\r\n\r\n\r\ndef get_language(newsitem):\r\n if 'language' in newsitem:\r\n language = newsitem['language']\r\n else:\r\n return None\r\n return language\r\n\r\n\r\ndef get_ticker(news_item):\r\n try:\r\n if 'category' in news_item:\r\n if ':' in news_item['category']:\r\n ticker = news_item['category']\r\n else:\r\n ticker = None # we have no ticker in the article; use TRIT\r\n else:\r\n ticker = None\r\n except Exception as e:\r\n print('Exception in news_util.get_ticker:', e)\r\n ticker = None\r\n return ticker\r\n\r\n\r\ndef get_ticker_source(newsitem):\r\n if 'publisher' in newsitem:\r\n return newsitem['publisher']\r\n else:\r\n return None\r\n\r\n\r\ndef get_keyword(newsitem):\r\n if 'dc_keyword' in newsitem:\r\n return newsitem['dc_keyword']\r\n else:\r\n return None\r\n\r\n\r\ndef get_yticker(newsitem, key, key_type):\r\n ticker = get_ticker(newsitem)\r\n if ticker is not None and get_ticker_source(newsitem) == GLOBENEWSIRE:\r\n list_ticker = ticker.split(':')\r\n return list_ticker[1]\r\n elif ticker is not None and get_ticker_source(newsitem) == TRIT:\r\n list_ticker = ticker.split('.')\r\n return list_ticker[0]\r\n elif key_type == COMPANY_KEY:\r\n return key\r\n else:\r\n return None\r\n\r\n\r\ndef get_exchange(newsitem, key, key_type):\r\n ticker = get_ticker(newsitem)\r\n if ticker is not None and get_ticker_source(newsitem) == GLOBENEWSIRE:\r\n exchange = ticker.split(':')\r\n return exchange[0]\r\n else:\r\n return None\r\n\r\n\r\ndef get_contributor(newsitem):\r\n if 'contributors' in newsitem:\r\n return newsitem['contributors'][0]['name']\r\n else:\r\n return None\r\n\r\n\r\ndef get_company(newsitem):\r\n if 'contributors' in newsitem:\r\n return newsitem['contributors'][0]['name']\r\n else:\r\n return None\r\n\r\n\r\ndef get_trading_session(newsitem):\r\n utc = parse(newsitem['published'])\r\n return DEFAULT_SESSION\r\n\r\n\r\ndef clean_text(raw_html):\r\n cleantext = BeautifulSoup(raw_html, \"lxml\").text\r\n return cleantext\r\n\r\n\r\ndef check_news_item(link):\r\n exists_query = dbconn.connector.select([news.columns.link]).where(news.columns.link == link)\r\n exists = dbconn.conn.execute(exists_query)\r\n return exists.scalar() is not None\r\n\r\n\r\ndef load_market_rss_urls():\r\n with open(MARKET_RSS_PATH) as file:\r\n news_urls = yaml.full_load(file)\r\n logging.info(f\"Checking {len(news_urls.keys())} market RSS URLs\")\r\n return news_urls\r\n\r\n\r\ndef load_company_rss_urls():\r\n with open(COMPANY_RSS_PATH) as file:\r\n news_urls = yaml.full_load(file)\r\n logging.info(f\"Checking {len(news_urls.keys())} company RSS URLs\")\r\n return news_urls\r\n" ]
[ [ "pandas.DataFrame" ] ]
mcd4874/NeurIPS_competition
[ "4df1f222929e9824a55c9c4ae6634743391b0fe9" ]
[ "EEG_Lightning/dassl/engine/dg/MLDG_tmp_V1.py" ]
[ "import torch\nfrom torch.nn import functional as F\nimport torch.nn as nn\nfrom dassl.data import DataManager\nfrom dassl.optim import build_optimizer, build_lr_scheduler\nfrom dassl.utils import count_num_param\nfrom dassl.engine import TRAINER_REGISTRY, TrainerX\nfrom dassl.modeling.ops import ReverseGrad\nfrom dassl.engine.trainer_tmp import SimpleNet\nfrom torch.utils.data import Dataset as TorchDataset\nfrom dassl.utils import MetricMeter\nimport learn2learn as l2l\nimport numpy as np\n@TRAINER_REGISTRY.register()\nclass MLDGV1(TrainerX):\n \"\"\"\n Learning to Generalize: Meta-Learning for Domain Generalization\n https://arxiv.org/pdf/1710.03463.pdf\n \"\"\"\n def __init__(self, cfg):\n # self.alpha = cfg.TRAINER.MLDG.alpha\n self.inner_lr = cfg.TRAINER.MLDG.inner_lr\n self.num_test_subject = cfg.TRAINER.MLDG.num_test_subject\n self.num_inner_loop = 10\n self.warm_up = 20\n super().__init__(cfg)\n\n n_domain = cfg.DATALOADER.TRAIN_X.N_DOMAIN\n if n_domain <= 0:\n n_domain = self.dm.num_source_domains\n self.n_domain = n_domain\n batch_size = cfg.DATALOADER.TRAIN_X.BATCH_SIZE\n self.split_batch = batch_size // n_domain\n\n #create a cross entropy loss for whole dataset\n self.ce = nn.CrossEntropyLoss()\n # self.ce_1 = nn.CrossEntropyLoss()\n if cfg.DATASET.TOTAL_CLASS_WEIGHT:\n total_data_class_weight = self.dm.dataset.whole_class_weight\n if total_data_class_weight is not None:\n torch_weight = torch.from_numpy(np.array(total_data_class_weight)).float().to(self.device)\n print(\"torch weight : \",torch_weight)\n self.ce = nn.CrossEntropyLoss(weight=torch_weight)\n\n # create a cross entropy loss for each domain dataset\n self.cce = [nn.CrossEntropyLoss() for _ in range(self.n_domain)]\n if cfg.DATASET.DOMAIN_CLASS_WEIGHT:\n domain_class_weight = self.dm.dataset.domain_class_weight\n for domain, weight in domain_class_weight.items():\n # domain_class_weight[domain] = torch.from_numpy(np.array(weight)).float().to(self.device)\n torch_weight = torch.from_numpy(np.array(weight)).float().to(self.device)\n self.cce[domain] = nn.CrossEntropyLoss(weight=torch_weight)\n\n\n self.val_ce = nn.CrossEntropyLoss()\n\n\n self.candidates = np.arange(self.n_domain)\n index = np.random.permutation(self.candidates)\n self.meta_test_idx = index[:self.num_test_subject]\n self.meta_train_idx = index[self.num_test_subject:]\n\n # self.meta_test_idx = index[0]\n # self.alpha = 0.1 #within_subject\n\n # self.alpha = 1.0 #within_subject_1\n\n\n\n\n\n\n def check_cfg(self, cfg):\n assert cfg.DATALOADER.TRAIN_X.SAMPLER == 'RandomDomainSampler'\n def build_model(self):\n cfg = self.cfg\n\n print('Building Feature')\n self.model = SimpleNet(cfg, cfg.MODEL, self.num_classes, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n\n\n\n print('Building MAML')\n # fast_lr = 0.001\n self.maml = l2l.algorithms.MAML(self.model, lr=self.inner_lr)\n self.maml.to(self.device)\n\n\n self.optim_maml = build_optimizer(self.maml, cfg.OPTIM)\n self.sched_maml = build_lr_scheduler(self.optim_maml, cfg.OPTIM)\n self.register_model('maml', self.maml, self.optim_maml, self.sched_maml)\n\n # self.learner1 = None\n # self.learner2 = None\n\n # self.register_model('model', self.model)\n\n def check_equal_model(self,model1,model2):\n for p1, p2 in zip(model1.parameters(), model2.parameters()):\n if p1.data.ne(p2.data).sum() > 0:\n return False\n return True\n\n def check_equal_grad(self,model1,model2):\n for p1, p2 in zip(model1.parameters(), model2.parameters()):\n if p1.grad.data.ne(p2.grad.data).sum() > 0:\n return False\n return True\n\n def check_model_params(self,modelList):\n for model_info in modelList:\n # name = model_info['name']\n # model = model_info['model']\n name = model_info[0]\n model = model_info[1]\n print(name)\n for p in model.parameters():\n print(\"sum params : \",p.data.sum())\n\n\n def forward_backward(self, batch,backprob = True):\n if self.batch_idx == 0:\n index = np.random.permutation(self.candidates)\n self.meta_test_idx = index[:self.num_test_subject]\n self.meta_train_idx = index[self.num_test_subject:]\n print(\"update meta test to be subject : {}\".format(self.meta_test_idx))\n input_x, label_x, domain_x = self.parse_batch_train(batch)\n learner = self.maml.clone()\n\n #check model and maml params\n\n\n\n meta_train_loss = 0.0\n meta_test_loss = 0.0\n\n #clone maml\n\n if backprob:\n\n if self.epoch >= self.warm_up:\n\n # train Domain Specific model\n input_x = torch.split(input_x, self.split_batch, 0)\n label_x = torch.split(label_x, self.split_batch, 0)\n domain_x = torch.split(domain_x, self.split_batch, 0)\n d_x = [d[0].item() for d in domain_x]\n\n\n self.model_zero_grad(['maml'])\n for i in range(self.num_inner_loop):\n domain = np.random.permutation(self.meta_train_idx)[0]\n x,y = input_x[domain],label_x[domain]\n logits = learner(x)\n if self.cfg.DATASET.DOMAIN_CLASS_WEIGHT:\n loss = self.cce[domain](logits, y)\n else:\n loss = self.ce(logits, y)\n meta_train_loss = loss\n learner.adapt(loss)\n\n # for x, y, dy, d in zip(input_x, label_x, domain_x, d_x):\n # if d != self.meta_test_idx:\n # logits =learner(x)\n # if self.cfg.DATASET.DOMAIN_CLASS_WEIGHT:\n # loss = self.cce[d](logits, y)\n # else:\n # loss = self.ce(logits, y)\n # meta_train_loss+= loss\n\n\n\n # meta_train_loss /= len(self.meta_train_idx)\n # print(\"equal model and clone maml 1 \", self.check_equal_model(self.model, learner))\n # print(\"equal model and maml 1 \",self.check_equal_model(self.model,self.maml))\n # print(\"equal maml and clone maml 1 \", self.check_equal_model(self.maml, learner))\n\n # print(\"equal grad model and clone maml 1 \", self.check_equal_grad(self.model, learner))\n # print(\"equal grad model and maml 1 \", self.check_equal_grad(self.model, self.maml))\n # print(\"equal grad maml and clone maml 1 \", self.check_equal_grad(self.model, self.maml))\n\n # self.check_model_params([[\"model\",self.model],[\"maml\",self.maml],[\"clone\",learner]])\n # self.learner1 = learner\n # learner.adapt(meta_train_loss)\n # self.learner2 = learner\n # print(\"equal model and clone maml 2 \", self.check_equal_model(self.model, learner))\n # print(\"equal model and maml 2 \", self.check_equal_model(self.model, self.maml))\n # print(\"equal maml and clone maml 2 \", self.check_equal_model(self.maml, learner))\n\n # print(\"equal grad model and clone maml 2 \", self.check_equal_grad(self.model, learner))\n # print(\"equal grad model and maml 2 \", self.check_equal_grad(self.model, self.maml))\n # print(\"equal grad maml and clone maml 2 \", self.check_equal_grad(self.maml, learner))\n\n # self.check_model_params([[\"model\", self.model], [\"maml\", self.maml], [\"clone\", learner]])\n #meta test stage\n for domain in self.meta_test_idx:\n x, y = input_x[domain], label_x[domain]\n logits = learner(x)\n loss = self.ce(logits,y)\n meta_test_loss +=loss\n meta_test_loss /= len(self.meta_test_idx)\n self.model_backward(meta_test_loss)\n self.model_update(['maml'])\n\n # print(\"equal model and clone maml 3 \", self.check_equal_model(self.model, learner))\n # print(\"equal model and maml 3 \", self.check_equal_model(self.model, self.maml))\n # print(\"equal maml and clone maml 3 \", self.check_equal_model(self.maml, learner))\n #\n # print(\"equal grad model and clone maml 3 \", self.check_equal_grad(self.model, learner))\n # print(\"equal grad model and maml 3 \", self.check_equal_grad(self.model, self.maml))\n # print(\"equal grad maml and clone maml 3 \", self.check_equal_grad(self.model, self.maml))\n\n\n # self.check_model_params([[\"model\", self.model], [\"maml\", self.maml], [\"clone\", learner]])\n if (self.batch_idx + 1) == self.num_batches:\n self.update_lr( ['maml'])\n\n\n\n loss_summary = {\n 'meta_train_loss': meta_train_loss.item(),\n 'meta_test_loss': meta_test_loss.item(),\n # 'total_loss': final_loss.item()\n }\n else:\n logits = self.maml(input_x)\n loss = self.ce(logits,label_x)\n self.model_backward_and_update(loss,['maml'])\n if (self.batch_idx + 1) == self.num_batches:\n self.update_lr(['maml'])\n loss_summary = {\n 'general_loss': loss.item()\n }\n else:\n logits = learner(input_x)\n val_loss = self.val_ce(logits,label_x)\n\n # input_x = torch.split(input_x, self.split_batch, 0)\n # label_x = torch.split(label_x, self.split_batch, 0)\n\n # for domain in self.meta_train_idx:\n # x,y = input_x[domain],label_x[domain]\n # logits = self.learner1(x)\n # loss = self.val_ce(logits,y)\n # meta_train_loss+=loss\n #\n # meta_train_loss /= len(self.meta_train_idx)\n # for domain in self.meta_test_idx:\n # x,y = input_x[domain],label_x[domain]\n # logits = self.learner2(x)\n # loss = self.val_ce(logits,y)\n # meta_test_loss+=loss\n # meta_test_loss /= len(self.meta_test_idx)\n #\n # total_loss = meta_train_loss + self.alpha*meta_test_loss\n\n\n loss_summary = {\n \"general_loss\":val_loss.item()\n }\n\n\n\n\n return loss_summary\n\n @torch.no_grad()\n def validate(self):\n \"\"\"A generic testing pipeline.\"\"\"\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n # total_loss = losses.meters['total_loss'].avg\n total_loss = losses.meters['general_loss'].avg\n\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n return [total_loss,losses.dict_results(),results]\n\n\n\n\n def model_inference(self, input):\n logits = self.maml(input)\n probs = F.softmax(logits, dim=1)\n return probs\n\n\n\n\n\n" ]
[ [ "numpy.array", "torch.no_grad", "numpy.random.permutation", "torch.split", "numpy.arange", "torch.nn.functional.softmax", "torch.nn.CrossEntropyLoss" ] ]
FloatTech/AI-Bot
[ "2bdbff2f98c7a2fe9e2c5a81545fd5133d0e1f64" ]
[ "TF2_GPT-2/bot.py" ]
[ "\nimport re\nimport time\nimport queue\nimport logging\nimport threading\nimport collections\nimport json as json_\nimport os\nimport psutil\nimport websocket\n\nimport numpy as np \nimport Api as GPT\n\n\n\n\n\n\nWS_URL = \"ws://127.0.0.1:6700/ws\" # WebSocket 地址\nNICKNAME = [\"BOT\", \"ROBOT\"] # 机器人昵称\nSUPER_USER = [1237545454] # 主人的 QQ 号\n# 日志设置 level=logging.DEBUG -> 日志级别为 DEBUG\nlogging.basicConfig(level=logging.DEBUG, format=\"[void] %(asctime)s - %(levelname)s - %(message)s\")\nlogger = logging.getLogger(__name__)\n\n\nclass Plugin:\n def __init__(self, context: dict):\n self.ws = WS_APP\n self.context = context\n\n def match(self) -> bool:\n return self.on_full_match(\"hello\")\n\n def handle(self):\n self.send_msg(text(\"hello world!\"))\n\n def on_message(self) -> bool:\n return self.context[\"post_type\"] == \"message\"\n\n def on_full_match(self, keyword=\"\") -> bool:\n return self.on_message() and self.context[\"message\"] == keyword\n\n def on_reg_match(self, pattern=\"\") -> bool:\n return self.on_message() and re.search(pattern, self.context[\"message\"])\n\n def only_to_me(self) -> bool:\n flag = False\n for nick in NICKNAME + [f\"[CQ:at,qq={self.context['self_id']}] \"]:\n if self.on_message() and nick in self.context[\"message\"]:\n flag = True\n self.context[\"message\"] = self.context[\"message\"].replace(nick, \"\")\n return flag\n\n def super_user(self) -> bool:\n return self.context[\"user_id\"] in SUPER_USER\n\n def admin_user(self) -> bool:\n return self.super_user() or self.context[\"sender\"][\"role\"] in (\"admin\", \"owner\")\n\n def call_api(self, action: str, params: dict) -> dict:\n echo_num, q = echo.get()\n data = json_.dumps({\"action\": action, \"params\": params, \"echo\": echo_num})\n logger.info(\"发送调用 <- \" + data)\n self.ws.send(data)\n try: # 阻塞至响应或者等待30s超时\n return q.get(timeout=30)\n except queue.Empty:\n logger.error(\"API调用[{echo_num}] 超时......\")\n\n def send_msg(self, *message) -> int:\n # https://github.com/botuniverse/onebot-11/blob/master/api/public.md#send_msg-%E5%8F%91%E9%80%81%E6%B6%88%E6%81%AF\n if \"group_id\" in self.context and self.context[\"group_id\"]:\n return self.send_group_msg(*message)\n else:\n return self.send_private_msg(*message)\n\n def send_private_msg(self, *message) -> int:\n # https://github.com/botuniverse/onebot-11/blob/master/api/public.md#send_private_msg-%E5%8F%91%E9%80%81%E7%A7%81%E8%81%8A%E6%B6%88%E6%81%AF\n params = {\"user_id\": self.context[\"user_id\"], \"message\": message}\n ret = self.call_api(\"send_private_msg\", params)\n return 0 if ret is None or ret[\"status\"] == \"failed\" else ret[\"data\"][\"message_id\"]\n\n def send_group_msg(self, *message) -> int:\n # https://github.com/botuniverse/onebot-11/blob/master/api/public.md#send_group_msg-%E5%8F%91%E9%80%81%E7%BE%A4%E6%B6%88%E6%81%AF\n params = {\"group_id\": self.context[\"group_id\"], \"message\": message}\n ret = self.call_api(\"send_group_msg\", params)\n return 0 if ret is None or ret[\"status\"] == \"failed\" else ret[\"data\"][\"message_id\"]\n\n\n\n\ndef text(string: str) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E7%BA%AF%E6%96%87%E6%9C%AC\n return {\"type\": \"text\", \"data\": {\"text\": string}}\n\n\ndef image(file: str, cache=True) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E5%9B%BE%E7%89%87\n return {\"type\": \"image\", \"data\": {\"file\": file, \"cache\": cache}}\n\n\ndef record(file: str, cache=True) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E8%AF%AD%E9%9F%B3\n return {\"type\": \"record\", \"data\": {\"file\": file, \"cache\": cache}}\n\n\ndef at(qq: int) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E6%9F%90%E4%BA%BA\n return {\"type\": \"at\", \"data\": {\"qq\": qq}}\n\n\ndef xml(data: str) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#xml-%E6%B6%88%E6%81%AF\n return {\"type\": \"xml\", \"data\": {\"data\": data}}\n\n\ndef json(data: str) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#json-%E6%B6%88%E6%81%AF\n return {\"type\": \"json\", \"data\": {\"data\": data}}\n\n\ndef music(data: str) -> dict:\n # https://github.com/botuniverse/onebot-11/blob/master/message/segment.md#%E9%9F%B3%E4%B9%90%E5%88%86%E4%BA%AB-\n return {\"type\": \"music\", \"data\": {\"type\": \"qq\", \"id\": data}}\n\n\n\"\"\"\n在下面加入你自定义的插件,自动加载本文件所有的 Plugin 的子类\n只需要写一个 Plugin 的子类,重写 match() 和 handle()\nmatch() 返回 True 则自动回调 handle()\n\"\"\"\n\n\nclass TestPlugin(Plugin):\n def match(self): # 说 hello 则回复\n return self.on_full_match(\"hello\")\n\n def handle(self):\n self.send_msg(at(self.context[\"user_id\"]), text(\"hello world!\"))\n\n\nclass f(Plugin) :\n def match(self):\n return self.on_full_match(\"mua~\")\n \n def handle(self):\n self.send_msg(at(self.context[\"user_id\"]),text(\"恶心🤢\"))\n\n\n\n\nclass ss(Plugin) :\n def match(self) :\n return self.on_full_match(\"沙比\")\n \n def handle(self):\n\n po = np.random.random(1)\n op = np.random.random(1)\n if op > po :\n self.send_msg(at(self.context[\"user_id\"]),text('歪!!骂谁呐!'))\n else :\n self.send_msg(at(self.context[\"user_id\"]),text('草草....草尼🐎🐎(¬︿̫̿¬☆)不理你了'))\n\n\n\n \n \n\n\n\n\n\nclass ADD(Plugin):\n def match(self) :\n return self.only_to_me() and self.on_full_match(\"好慢啊你\")\n\n def handle(self):\n \n self.send_msg(at(self.context[\"user_id\"]),text(\"要不你来试试?!!呜呜呜😭\"))\n\n\n\n\n\nclass SELF(Plugin) :\n def match(self) :\n return self.on_full_match(\"检查身体\")\n\n def handle(self):\n\n info = os.system('ver')\n\n net_work = psutil.cpu_stats()\n \n\n mem = psutil.virtual_memory()\n# 系统总计内存\n All_M = float(mem.total) / 1024 / 1024 / 1024\n# 系统已经使用内存\n use_ing = float(mem.used) / 1024 / 1024 / 1024\n\n# 系统空闲内存\n free = float(mem.free) / 1024 / 1024 / 1024\n\n all_m = '系统总计内存:%d.3GB' % All_M\n Use = '系统已经使用内存:%d.3GB' % use_ing\n Free = '系统空闲内存:%d.3GB' % free\n C_k = 'CPU状态:{}'.format(net_work)\n\n\n\n self.send_msg(text('{}\\n\\n{}\\n\\n{}\\n\\n{}\\n{}'.format(info,all_m,Use,Free,C_k)))\n\n\n\nclass TestPlugin3(Plugin):\n def match(self): # 戳一戳机器人则回复\n return self.context[\"post_type\"] == \"notice\" and self.context[\"sub_type\"] == \"poke\"\\\n and self.context[\"target_id\"] == self.context[\"self_id\"]\n\n def handle(self):\n k = np.random.random(1)\n j = np.random.random(1)\n x = \"请不要戳我 >_<\"\n h = \"歪!!戳我干嘛!!(╯▔皿▔)╯\"\n if k < j :\n self.send_msg(text(x))\n else :\n self.send_msg(text(h))\n \n\n\nclass TPugin(Plugin) :\n def match(self) :\n return self.on_full_match('生成文章')\n \n def handle(self):\n self.send_msg(text('构思中可能需要几分钟,取决于我的小脑袋ε=ε=ε=(~ ̄▽ ̄)~........'))\n\n\n#GPT-2生成文章插件\nclass GeneratePlugin(Plugin) :\n def match(self) :\n \n return self.on_full_match('生成文章')\n \n def handle(self):\n \n\n GPT.sequence_gen(\n model_path = \"C:\\\\Users\\\\xbj0916\\\\Desktop\\\\TF2_GPT-2\\\\TF2_GPT\\\\model\\\\\",\n model_param = \"C:\\\\Users\\\\xbj0916\\\\Desktop\\\\TF2_GPT-2\\\\TF2_GPT\\\\model\\\\model_par.json\",\n vocab = \"C:\\\\Users\\\\xbj0916\\\\Desktop\\\\TF2_GPT-2\\\\TF2_GPT\\\\data\\\\bpe_model.model\",\n seq_len = 512,\n temperature = 1,\n top_k = 8,\n top_p = 0.9,\n nucleus_sampling = False, \n context = \"sample context\")#文章题目\n\n \n f = open('s.txt',encoding='utf-8').read()\n self.send_msg(text('哒哒哒~~~生成完成:{}'.format(f)))\n \n #这里是私发可以改为群发\n \n \n\n\n\n\n \n\"\"\"\n在上面自定义你的插\n\"\"\"\n\n\ndef plugin_pool(context: dict):\n # 遍历所有的 Plugin 的子类,执行匹配\n for P in Plugin.__subclasses__():\n plugin = P(context)\n if plugin.match():\n plugin.handle()\n\n\nclass Echo:\n def __init__(self):\n self.echo_num = 0\n self.echo_list = collections.deque(maxlen=20)\n\n def get(self):\n self.echo_num += 1\n q = queue.Queue(maxsize=1)\n self.echo_list.append((self.echo_num, q))\n return self.echo_num, q\n\n def match(self, context: dict):\n for obj in self.echo_list:\n if context[\"echo\"] == obj[0]:\n obj[1].put(context)\n\n\ndef on_message(_, message):\n # https://github.com/botuniverse/onebot-11/blob/master/event/README.md\n context = json_.loads(message)\n if \"echo\" in context:\n logger.debug(\"调用返回 -> \" + message)\n # 响应报文通过队列传递给调用 API 的函数\n echo.match(context)\n elif \"meta_event_type\" in context:\n logger.debug(\"心跳事件 -> \" + message)\n else:\n logger.info(\"收到事件 -> \" + message)\n # 消息事件,开启线程\n t = threading.Thread(target=plugin_pool, args=(context, ))\n t.start()\n\n\nif __name__ == \"__main__\":\n echo = Echo()\n WS_APP = websocket.WebSocketApp(\n WS_URL,\n on_message=on_message,\n on_open=lambda _: logger.debug(\"连接成功......\"),\n on_close=lambda _: logger.debug(\"重连中......\"),\n )\n while True: # 掉线重连\n WS_APP.run_forever()\n time.sleep(5)\n" ]
[ [ "numpy.random.random" ] ]
gaoxinge/taichi
[ "86d403f071b8505858763d4712b37cd71b89db91" ]
[ "python/taichi/examples/simulation/stable_fluid.py" ]
[ "# References:\n# http://developer.download.nvidia.com/books/HTML/gpugems/gpugems_ch38.html\n# https://github.com/PavelDoGreat/WebGL-Fluid-Simulation\n# https://www.bilibili.com/video/BV1ZK411H7Hc?p=4\n# https://github.com/ShaneFX/GAMES201/tree/master/HW01\n\nimport argparse\n\nimport numpy as np\n\nimport taichi as ti\n\n# How to run:\n# `python stable_fluid.py`: use the jacobi iteration to solve the linear system.\n# `python stable_fluid.py -S`: use a sparse matrix to do so.\nparser = argparse.ArgumentParser()\nparser.add_argument('-S',\n '--use-sp-mat',\n action='store_true',\n help='Solve Poisson\\'s equation by using a sparse matrix')\nargs, unknowns = parser.parse_known_args()\n\nres = 512\ndt = 0.03\np_jacobi_iters = 500 # 40 for a quicker but less accurate result\nf_strength = 10000.0\ncurl_strength = 0\ntime_c = 2\nmaxfps = 60\ndye_decay = 1 - 1 / (maxfps * time_c)\nforce_radius = res / 2.0\ngravity = True\ndebug = False\npaused = False\nuse_sparse_matrix = False\n\nuse_sparse_matrix = args.use_sp_mat\n\nif use_sparse_matrix:\n ti.init(arch=ti.x64)\n print('Using sparse matrix')\nelse:\n ti.init(arch=ti.gpu)\n print('Using jacobi iteration')\n\n_velocities = ti.Vector.field(2, float, shape=(res, res))\n_new_velocities = ti.Vector.field(2, float, shape=(res, res))\nvelocity_divs = ti.field(float, shape=(res, res))\nvelocity_curls = ti.field(float, shape=(res, res))\n_pressures = ti.field(float, shape=(res, res))\n_new_pressures = ti.field(float, shape=(res, res))\n_dye_buffer = ti.Vector.field(3, float, shape=(res, res))\n_new_dye_buffer = ti.Vector.field(3, float, shape=(res, res))\n\n\nclass TexPair:\n def __init__(self, cur, nxt):\n self.cur = cur\n self.nxt = nxt\n\n def swap(self):\n self.cur, self.nxt = self.nxt, self.cur\n\n\nvelocities_pair = TexPair(_velocities, _new_velocities)\npressures_pair = TexPair(_pressures, _new_pressures)\ndyes_pair = TexPair(_dye_buffer, _new_dye_buffer)\n\nif use_sparse_matrix:\n # use a sparse matrix to solve Poisson's pressure equation.\n @ti.kernel\n def fill_laplacian_matrix(A: ti.types.sparse_matrix_builder()):\n for i, j in ti.ndrange(res, res):\n row = i * res + j\n center = 0.0\n if j != 0:\n A[row, row - 1] += -1.0\n center += 1.0\n if j != res - 1:\n A[row, row + 1] += -1.0\n center += 1.0\n if i != 0:\n A[row, row - res] += -1.0\n center += 1.0\n if i != res - 1:\n A[row, row + res] += -1.0\n center += 1.0\n A[row, row] += center\n\n N = res * res\n K = ti.linalg.SparseMatrixBuilder(N, N, max_num_triplets=N * 6)\n b = ti.field(ti.f32, shape=N)\n\n fill_laplacian_matrix(K)\n L = K.build()\n solver = ti.linalg.SparseSolver(solver_type=\"LLT\")\n solver.analyze_pattern(L)\n solver.factorize(L)\n\n\[email protected]\ndef sample(qf, u, v):\n I = ti.Vector([int(u), int(v)])\n I = max(0, min(res - 1, I))\n return qf[I]\n\n\[email protected]\ndef lerp(vl, vr, frac):\n # frac: [0.0, 1.0]\n return vl + frac * (vr - vl)\n\n\[email protected]\ndef bilerp(vf, p):\n u, v = p\n s, t = u - 0.5, v - 0.5\n # floor\n iu, iv = ti.floor(s), ti.floor(t)\n # fract\n fu, fv = s - iu, t - iv\n a = sample(vf, iu, iv)\n b = sample(vf, iu + 1, iv)\n c = sample(vf, iu, iv + 1)\n d = sample(vf, iu + 1, iv + 1)\n return lerp(lerp(a, b, fu), lerp(c, d, fu), fv)\n\n\n# 3rd order Runge-Kutta\[email protected]\ndef backtrace(vf: ti.template(), p, dt: ti.template()):\n v1 = bilerp(vf, p)\n p1 = p - 0.5 * dt * v1\n v2 = bilerp(vf, p1)\n p2 = p - 0.75 * dt * v2\n v3 = bilerp(vf, p2)\n p -= dt * ((2 / 9) * v1 + (1 / 3) * v2 + (4 / 9) * v3)\n return p\n\n\[email protected]\ndef advect(vf: ti.template(), qf: ti.template(), new_qf: ti.template()):\n for i, j in vf:\n p = ti.Vector([i, j]) + 0.5\n p = backtrace(vf, p, dt)\n new_qf[i, j] = bilerp(qf, p) * dye_decay\n\n\[email protected]\ndef apply_impulse(vf: ti.template(), dyef: ti.template(),\n imp_data: ti.types.ndarray()):\n g_dir = -ti.Vector([0, 9.8]) * 300\n for i, j in vf:\n omx, omy = imp_data[2], imp_data[3]\n mdir = ti.Vector([imp_data[0], imp_data[1]])\n dx, dy = (i + 0.5 - omx), (j + 0.5 - omy)\n d2 = dx * dx + dy * dy\n # dv = F * dt\n factor = ti.exp(-d2 / force_radius)\n\n dc = dyef[i, j]\n a = dc.norm()\n\n momentum = (mdir * f_strength * factor + g_dir * a / (1 + a)) * dt\n\n v = vf[i, j]\n vf[i, j] = v + momentum\n # add dye\n if mdir.norm() > 0.5:\n dc += ti.exp(-d2 * (4 / (res / 15)**2)) * ti.Vector(\n [imp_data[4], imp_data[5], imp_data[6]])\n\n dyef[i, j] = dc\n\n\[email protected]\ndef divergence(vf: ti.template()):\n for i, j in vf:\n vl = sample(vf, i - 1, j)\n vr = sample(vf, i + 1, j)\n vb = sample(vf, i, j - 1)\n vt = sample(vf, i, j + 1)\n vc = sample(vf, i, j)\n if i == 0:\n vl.x = -vc.x\n if i == res - 1:\n vr.x = -vc.x\n if j == 0:\n vb.y = -vc.y\n if j == res - 1:\n vt.y = -vc.y\n velocity_divs[i, j] = (vr.x - vl.x + vt.y - vb.y) * 0.5\n\n\[email protected]\ndef vorticity(vf: ti.template()):\n for i, j in vf:\n vl = sample(vf, i - 1, j)\n vr = sample(vf, i + 1, j)\n vb = sample(vf, i, j - 1)\n vt = sample(vf, i, j + 1)\n velocity_curls[i, j] = (vr.y - vl.y - vt.x + vb.x) * 0.5\n\n\[email protected]\ndef pressure_jacobi(pf: ti.template(), new_pf: ti.template()):\n for i, j in pf:\n pl = sample(pf, i - 1, j)\n pr = sample(pf, i + 1, j)\n pb = sample(pf, i, j - 1)\n pt = sample(pf, i, j + 1)\n div = velocity_divs[i, j]\n new_pf[i, j] = (pl + pr + pb + pt - div) * 0.25\n\n\[email protected]\ndef subtract_gradient(vf: ti.template(), pf: ti.template()):\n for i, j in vf:\n pl = sample(pf, i - 1, j)\n pr = sample(pf, i + 1, j)\n pb = sample(pf, i, j - 1)\n pt = sample(pf, i, j + 1)\n vf[i, j] -= 0.5 * ti.Vector([pr - pl, pt - pb])\n\n\[email protected]\ndef enhance_vorticity(vf: ti.template(), cf: ti.template()):\n # anti-physics visual enhancement...\n for i, j in vf:\n cl = sample(cf, i - 1, j)\n cr = sample(cf, i + 1, j)\n cb = sample(cf, i, j - 1)\n ct = sample(cf, i, j + 1)\n cc = sample(cf, i, j)\n force = ti.Vector([abs(ct) - abs(cb),\n abs(cl) - abs(cr)]).normalized(1e-3)\n force *= curl_strength * cc\n vf[i, j] = min(max(vf[i, j] + force * dt, -1e3), 1e3)\n\n\[email protected]\ndef copy_divergence(div_in: ti.template(), div_out: ti.template()):\n for I in ti.grouped(div_in):\n div_out[I[0] * res + I[1]] = -div_in[I]\n\n\[email protected]\ndef apply_pressure(p_in: ti.types.ndarray(), p_out: ti.template()):\n for I in ti.grouped(p_out):\n p_out[I] = p_in[I[0] * res + I[1]]\n\n\ndef solve_pressure_sp_mat():\n copy_divergence(velocity_divs, b)\n x = solver.solve(b)\n apply_pressure(x, pressures_pair.cur)\n\n\ndef solve_pressure_jacobi():\n for _ in range(p_jacobi_iters):\n pressure_jacobi(pressures_pair.cur, pressures_pair.nxt)\n pressures_pair.swap()\n\n\ndef step(mouse_data):\n advect(velocities_pair.cur, velocities_pair.cur, velocities_pair.nxt)\n advect(velocities_pair.cur, dyes_pair.cur, dyes_pair.nxt)\n velocities_pair.swap()\n dyes_pair.swap()\n\n apply_impulse(velocities_pair.cur, dyes_pair.cur, mouse_data)\n\n divergence(velocities_pair.cur)\n\n if curl_strength:\n vorticity(velocities_pair.cur)\n enhance_vorticity(velocities_pair.cur, velocity_curls)\n\n if use_sparse_matrix:\n solve_pressure_sp_mat()\n else:\n solve_pressure_jacobi()\n\n subtract_gradient(velocities_pair.cur, pressures_pair.cur)\n\n if debug:\n divergence(velocities_pair.cur)\n div_s = np.sum(velocity_divs.to_numpy())\n print(f'divergence={div_s}')\n\n\nclass MouseDataGen(object):\n def __init__(self):\n self.prev_mouse = None\n self.prev_color = None\n\n def __call__(self, gui):\n # [0:2]: normalized delta direction\n # [2:4]: current mouse xy\n # [4:7]: color\n mouse_data = np.zeros(8, dtype=np.float32)\n if gui.is_pressed(ti.GUI.LMB):\n mxy = np.array(gui.get_cursor_pos(), dtype=np.float32) * res\n if self.prev_mouse is None:\n self.prev_mouse = mxy\n # Set lower bound to 0.3 to prevent too dark colors\n self.prev_color = (np.random.rand(3) * 0.7) + 0.3\n else:\n mdir = mxy - self.prev_mouse\n mdir = mdir / (np.linalg.norm(mdir) + 1e-5)\n mouse_data[0], mouse_data[1] = mdir[0], mdir[1]\n mouse_data[2], mouse_data[3] = mxy[0], mxy[1]\n mouse_data[4:7] = self.prev_color\n self.prev_mouse = mxy\n else:\n self.prev_mouse = None\n self.prev_color = None\n return mouse_data\n\n\ndef reset():\n velocities_pair.cur.fill(0)\n pressures_pair.cur.fill(0)\n dyes_pair.cur.fill(0)\n\n\nvisualize_d = True #visualize dye (default)\nvisualize_v = False #visualize velocity\nvisualize_c = False #visualize curl\n\ngui = ti.GUI('Stable Fluid', (res, res))\nmd_gen = MouseDataGen()\n\nwhile gui.running:\n if gui.get_event(ti.GUI.PRESS):\n e = gui.event\n if e.key == ti.GUI.ESCAPE:\n break\n elif e.key == 'r':\n paused = False\n reset()\n elif e.key == 's':\n if curl_strength:\n curl_strength = 0\n else:\n curl_strength = 7\n elif e.key == 'g':\n gravity = not gravity\n elif e.key == 'v':\n visualize_v = True\n visualize_c = False\n visualize_d = False\n elif e.key == 'd':\n visualize_d = True\n visualize_v = False\n visualize_c = False\n elif e.key == 'c':\n visualize_c = True\n visualize_d = False\n visualize_v = False\n elif e.key == 'p':\n paused = not paused\n elif e.key == 'd':\n debug = not debug\n\n # Debug divergence:\n # print(max((abs(velocity_divs.to_numpy().reshape(-1)))))\n\n if not paused:\n mouse_data = md_gen(gui)\n step(mouse_data)\n if visualize_c:\n vorticity(velocities_pair.cur)\n gui.set_image(velocity_curls.to_numpy() * 0.03 + 0.5)\n elif visualize_d:\n gui.set_image(dyes_pair.cur)\n elif visualize_v:\n gui.set_image(velocities_pair.cur.to_numpy() * 0.01 + 0.5)\n gui.show()\n" ]
[ [ "numpy.linalg.norm", "numpy.random.rand", "numpy.zeros" ] ]
fanyang587/NestedNet
[ "8479ff62d26f5d110277185397ca29bb536cde56" ]
[ "datasets/S3DIS2.py" ]
[ "#\n#\n# 0=========================0\n# | Kernel Point CNN |\n# 0=========================0\n#\n#\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Handle S3DIS dataset in a class\n#\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Hugues THOMAS - 11/06/2018\n#\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Imports and global variables\n# \\**********************************/\n#\n\n# Basic libs\nimport json\nimport tensorflow as tf\nimport numpy as np\nimport time\nimport pickle\nfrom sklearn.neighbors import KDTree\nimport os\n\n# PLY reader\nfrom utils.ply import read_ply, write_ply\n\n# OS functions\nfrom os import makedirs, listdir\nfrom os.path import exists, join, isfile, isdir\n\n# Dataset parent class\nfrom datasets.common_org import Dataset\n\n# Subsampling extension\nimport cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Utility functions\n# \\***********************/\n#\n\ndef grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):\n \"\"\"\n CPP wrapper for a grid subsampling (method = barycenter for points and features\n :param points: (N, 3) matrix of input points\n :param features: optional (N, d) matrix of features (floating number)\n :param labels: optional (N,) matrix of integer labels\n :param sampleDl: parameter defining the size of grid voxels\n :param verbose: 1 to display\n :return: subsampled points, with features and/or labels depending of the input\n \"\"\"\n\n if (features is None) and (labels is None):\n return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)\n elif (labels is None):\n return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)\n elif (features is None):\n return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)\n else:\n return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n#\n# Class Definition\n# \\***************/\n#\n\n\nclass S3DISDataset(Dataset):\n \"\"\"\n Class to handle S3DIS dataset for segmentation task.\n \"\"\"\n\n # Initiation methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, input_threads=8):\n Dataset.__init__(self, 'S3DIS')\n\n ###########################\n # Object classes parameters\n ###########################\n\n # Dict from labels to names\n self.label_to_names = {0: 'ceiling',\n 1: 'floor',\n 2: 'wall',\n 3: 'beam',\n 4: 'column',\n 5: 'window',\n 6: 'door',\n 7: 'chair',\n 8: 'table',\n 9: 'bookcase',\n 10: 'sofa',\n 11: 'board',\n 12: 'clutter'}\n\n # Initiate a bunch of variables concerning class labels\n self.init_labels()\n\n # List of classes ignored during training (can be empty)\n self.ignored_labels = np.array([])\n\n ####################\n # Dataset parameters\n ####################\n\n # Type of task conducted on this dataset\n self.network_model = 'cloud_segmentation'\n\n # Number of input threads\n self.num_threads = input_threads\n\n ##########################\n # Parameters for the files\n ##########################\n\n # Path of the folder containing ply files\n self.path = '../../Data/Stanford3dDataset_v1.2'\n\n # Path of the training files\n self.train_path = 'original_ply'\n\n # List of files to process\n ply_path = join(self.path, self.train_path)\n\n # Proportion of validation scenes\n self.cloud_names = ['Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6']\n self.all_splits = [0, 1, 2, 3, 4, 5]\n self.validation_split = 2\n\n # List of training files\n self.train_files = [join(ply_path, f + '.ply') for f in self.cloud_names]\n\n ###################\n # Prepare ply files\n ###################\n\n self.prepare_S3DIS_ply()\n\n def prepare_S3DIS_ply(self):\n\n print('\\nPreparing ply files')\n t0 = time.time()\n\n # Folder for the ply files\n ply_path = join(self.path, self.train_path)\n if not exists(ply_path):\n makedirs(ply_path)\n\n for cloud_name in self.cloud_names:\n\n # Pass if the cloud has already been computed\n cloud_file = join(ply_path, cloud_name + '.ply')\n if exists(cloud_file):\n continue\n\n # Get rooms of the current cloud\n cloud_folder = join(self.path, cloud_name)\n room_folders = [join(cloud_folder, room) for room in listdir(cloud_folder) if isdir(join(cloud_folder, room))]\n\n # Initiate containers\n cloud_points = np.empty((0, 3), dtype=np.float32)\n cloud_colors = np.empty((0, 3), dtype=np.uint8)\n cloud_classes = np.empty((0, 1), dtype=np.int32)\n\n # Loop over rooms\n for i, room_folder in enumerate(room_folders):\n\n print('Cloud %s - Room %d/%d : %s' % (cloud_name, i+1, len(room_folders), room_folder.split('\\\\')[-1]))\n\n for object_name in listdir(join(room_folder, 'Annotations')):\n\n if object_name[-4:] == '.txt':\n\n # Text file containing point of the object\n object_file = join(room_folder, 'Annotations', object_name)\n\n # Object class and ID\n tmp = object_name[:-4].split('_')[0]\n if tmp in self.name_to_label:\n object_class = self.name_to_label[tmp]\n elif tmp in ['stairs']:\n object_class = self.name_to_label['clutter']\n else:\n raise ValueError('Unknown object name: ' + str(tmp))\n\n # Read object points and colors\n with open(object_file, 'r') as f:\n object_data = np.array([[float(x) for x in line.split()] for line in f])\n\n # Stack all data\n cloud_points = np.vstack((cloud_points, object_data[:, 0:3].astype(np.float32)))\n cloud_colors = np.vstack((cloud_colors, object_data[:, 3:6].astype(np.uint8)))\n object_classes = np.full((object_data.shape[0], 1), object_class, dtype=np.int32)\n cloud_classes = np.vstack((cloud_classes, object_classes))\n\n # Save as ply\n write_ply(cloud_file,\n (cloud_points, cloud_colors, cloud_classes),\n ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])\n\n print('Done in {:.1f}s'.format(time.time() - t0))\n\n def load_subsampled_clouds(self, subsampling_parameter):\n \"\"\"\n Presubsample point clouds and load into memory (Load KDTree for neighbors searches\n \"\"\"\n\n if 0 < subsampling_parameter <= 0.01:\n raise ValueError('subsampling_parameter too low (should be over 1 cm')\n\n # Create path for files\n tree_path = join(self.path, 'input_{:.3f}'.format(subsampling_parameter))\n if not exists(tree_path):\n makedirs(tree_path)\n\n # Initiate containers\n self.input_trees = {'training': [], 'validation': []}\n self.input_colors = {'training': [], 'validation': []}\n self.input_labels = {'training': [], 'validation': []}\n\n for i, file_path in enumerate(self.train_files):\n\n # Restart timer\n t0 = time.time()\n\n # get cloud name and split\n cloud_name = file_path.split('/')[-1][:-4]\n if self.all_splits[i] == self.validation_split:\n cloud_split = 'validation'\n else:\n cloud_split = 'training'\n\n # Name of the input files\n KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))\n sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))\n\n # Check if inputs have already been computed\n if isfile(KDTree_file):\n print('\\nFound KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, subsampling_parameter))\n\n # read ply with data\n data = read_ply(sub_ply_file)\n sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T\n sub_labels = data['class']\n\n # Read pkl with search tree\n with open(KDTree_file, 'rb') as f:\n search_tree = pickle.load(f)\n\n else:\n print('\\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, subsampling_parameter))\n\n # Read ply file\n data = read_ply(file_path)\n points = np.vstack((data['x'], data['y'], data['z'])).T\n colors = np.vstack((data['red'], data['green'], data['blue'])).T\n labels = data['class']\n\n # Subsample cloud\n sub_points, sub_colors, sub_labels = grid_subsampling(points,\n features=colors,\n labels=labels,\n sampleDl=subsampling_parameter)\n\n # Rescale float color and squeeze label\n sub_colors = sub_colors / 255\n sub_labels = np.squeeze(sub_labels)\n\n # Get chosen neighborhoods\n search_tree = KDTree(sub_points, leaf_size=50)\n\n # Save KDTree\n with open(KDTree_file, 'wb') as f:\n pickle.dump(search_tree, f)\n\n # Save ply\n write_ply(sub_ply_file,\n [sub_points, sub_colors, sub_labels],\n ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])\n\n # Fill data containers\n self.input_trees[cloud_split] += [search_tree]\n self.input_colors[cloud_split] += [sub_colors]\n self.input_labels[cloud_split] += [sub_labels]\n\n size = sub_colors.shape[0] * 4 * 7\n print('{:.1f} MB loaded in {:.1f}s'.format(size * 1e-6, time.time() - t0))\n\n print('\\nPreparing reprojection indices for testing')\n\n # Get number of clouds\n self.num_training = len(self.input_trees['training'])\n self.num_validation = len(self.input_trees['validation'])\n\n # Get validation and test reprojection indices\n self.validation_proj = []\n self.validation_labels = []\n i_val = 0\n for i, file_path in enumerate(self.train_files):\n\n # Restart timer\n t0 = time.time()\n\n # Get info on this cloud\n cloud_name = file_path.split('/')[-1][:-4]\n\n # Validation projection and labels\n if self.all_splits[i] == self.validation_split:\n proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))\n if isfile(proj_file):\n with open(proj_file, 'rb') as f:\n proj_inds, labels = pickle.load(f)\n else:\n data = read_ply(file_path)\n points = np.vstack((data['x'], data['y'], data['z'])).T\n labels = data['class']\n\n # Compute projection inds\n proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(points, return_distance=False))\n proj_inds = proj_inds.astype(np.int32)\n\n # Save\n with open(proj_file, 'wb') as f:\n pickle.dump([proj_inds, labels], f)\n\n self.validation_proj += [proj_inds]\n self.validation_labels += [labels]\n i_val += 1\n print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))\n\n print()\n\n return\n\n # Utility methods\n # ------------------------------------------------------------------------------------------------------------------\n\n\n def get_batch_gen(self, split, config):\n \"\"\"\n A function defining the batch generator for each split. Should return the generator, the generated types and\n generated shapes\n :param split: string in \"training\", \"validation\" or \"test\"\n :param config: configuration file\n :return: gen_func, gen_types, gen_shapes\n \"\"\"\n\n ############\n # Parameters\n ############\n\n # Initiate parameters depending on the chosen split\n if split == 'training':\n\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = config.epoch_steps * config.batch_num\n random_pick_n = None\n\n elif split == 'validation':\n\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = config.validation_size * config.batch_num\n\n elif split == 'test':\n\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = config.validation_size * config.batch_num\n\n elif split == 'ERF':\n\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = 1000000\n self.batch_limit = 1\n np.random.seed(42)\n\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Initiate potentials for regular generation\n if not hasattr(self, 'potentials'):\n self.potentials = {}\n self.min_potentials = {}\n\n # Reset potentials\n self.potentials[split] = []\n self.min_potentials[split] = []\n data_split = split\n if split == 'ERF':\n data_split = 'validation'\n for i, tree in enumerate(self.input_trees[data_split]):\n self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]\n self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))]\n\n ##########################\n # Def generators functions\n ##########################\n\n def get_random_epoch_inds():\n\n # Initiate container for indices\n all_epoch_inds = np.zeros((2, 0), dtype=np.int32)\n\n # Choose random points of each class for each cloud\n for cloud_ind, cloud_labels in enumerate(self.input_labels[split]):\n epoch_indices = np.empty((0,), dtype=np.int32)\n for label_ind, label in enumerate(self.label_values):\n if label not in self.ignored_labels:\n\n label_indices = np.where(np.equal(cloud_labels, label))[0]\n if len(label_indices) <= random_pick_n:\n epoch_indices = np.hstack((epoch_indices, label_indices))\n elif len(label_indices) < 50 * random_pick_n:\n new_randoms = np.random.choice(label_indices, size=random_pick_n, replace=False)\n epoch_indices = np.hstack((epoch_indices, new_randoms.astype(np.int32)))\n else:\n rand_inds = []\n while len(rand_inds) < random_pick_n:\n rand_inds = np.unique(np.random.choice(label_indices, size=5 * random_pick_n, replace=True))\n epoch_indices = np.hstack((epoch_indices, rand_inds[:random_pick_n].astype(np.int32)))\n\n # Stack those indices with the cloud index\n epoch_indices = np.vstack((np.full(epoch_indices.shape, cloud_ind, dtype=np.int32), epoch_indices))\n\n # Update the global indice container\n all_epoch_inds = np.hstack((all_epoch_inds, epoch_indices))\n\n return all_epoch_inds\n\n def random_balanced_gen():\n\n # First choose the point we are going to look at for this epoch\n # *************************************************************\n\n # This generator cannot be used on test split\n if split == 'training':\n all_epoch_inds = get_random_epoch_inds()\n elif split == 'validation':\n all_epoch_inds = get_random_epoch_inds()\n else:\n raise ValueError('generator to be defined for test split.')\n\n # Now create batches\n # ******************\n\n # Initiate concatanation lists\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n\n batch_n = 0\n\n # Generator loop\n for i, rand_i in enumerate(np.random.permutation(all_epoch_inds.shape[1])):\n\n cloud_ind = all_epoch_inds[0, rand_i]\n point_ind = all_epoch_inds[1, rand_i]\n\n # Get points from tree structure\n points = np.array(self.input_trees[split][cloud_ind].data, copy=False)\n\n # Center point of input region\n center_point = points[point_ind, :].reshape(1, -1)\n\n # Add noise to the center point\n noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n\n # Indices of points in input region\n input_inds = self.input_trees[split][cloud_ind].query_radius(pick_point,\n r=config.in_radius)[0]\n\n # Number collected\n n = input_inds.shape[0]\n\n # Safe check for very dense areas\n if n > self.batch_limit:\n input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False)\n n = input_inds.shape[0]\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[split][cloud_ind][input_inds]\n input_labels = self.input_labels[split][cloud_ind][input_inds]\n input_labels = np.array([self.label_to_idx[l] for l in input_labels])\n\n # In case batch is full, yield it and reset it\n if batch_n + n > self.batch_limit and batch_n > 0:\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n batch_n = 0\n\n # Add data to current batch\n if n > 0:\n p_list += [input_points]\n c_list += [np.hstack((input_colors, input_points + pick_point))]\n pl_list += [input_labels]\n pi_list += [input_inds]\n ci_list += [cloud_ind]\n\n # Update batch size\n batch_n += n\n\n if batch_n > 0:\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n\n def spatially_regular_gen():\n\n # Initiate concatanation lists\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n\n batch_n = 0\n\n # Generator loop\n for i in range(epoch_n):\n\n\n # Choose a random cloud\n cloud_ind = int(np.argmin(self.min_potentials[split]))\n\n # Choose point ind as minimum of potentials\n point_ind = np.argmin(self.potentials[split][cloud_ind])\n\n # Get points from tree structure\n points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False)\n\n # Center point of input region\n center_point = points[point_ind, :].reshape(1, -1)\n\n # Add noise to the center point\n if split != 'ERF':\n noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n else:\n pick_point = center_point\n\n # Indices of points in input region\n input_inds = self.input_trees[data_split][cloud_ind].query_radius(pick_point,\n r=config.in_radius)[0]\n\n # Number collected\n n = input_inds.shape[0]\n\n # Update potentials (Tuckey weights)\n if split != 'ERF':\n dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)\n tukeys = np.square(1 - dists / np.square(config.in_radius))\n tukeys[dists > np.square(config.in_radius)] = 0\n self.potentials[split][cloud_ind][input_inds] += tukeys\n self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))\n\n # Safe check for very dense areas\n if n > self.batch_limit:\n input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False)\n n = input_inds.shape[0]\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[data_split][cloud_ind][input_inds]\n if split in ['test', 'ERF']:\n input_labels = np.zeros(input_points.shape[0])\n else:\n input_labels = self.input_labels[data_split][cloud_ind][input_inds]\n input_labels = np.array([self.label_to_idx[l] for l in input_labels])\n\n # In case batch is full, yield it and reset it\n if batch_n + n > self.batch_limit and batch_n > 0:\n\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n batch_n = 0\n\n # Add data to current batch\n if n > 0:\n p_list += [input_points]\n c_list += [np.hstack((input_colors, input_points + pick_point))]\n pl_list += [input_labels]\n pi_list += [input_inds]\n ci_list += [cloud_ind]\n\n # Update batch size\n batch_n += n\n\n if batch_n > 0:\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n\n ###################\n # Choose generators\n ###################\n\n # Define the generator that should be used for this split\n if split == 'training':\n gen_func = spatially_regular_gen\n\n elif split == 'validation':\n gen_func = spatially_regular_gen\n\n elif split in ['test', 'ERF']:\n gen_func = spatially_regular_gen\n\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Define generated types and shapes\n gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32)\n gen_shapes = ([None, 3], [None, 6], [None], [None], [None], [None])\n\n return gen_func, gen_types, gen_shapes\n\n def get_tf_mapping(self, config):\n\n # Returned mapping function\n def tf_map(stacked_points, stacked_colors, point_labels, stacks_lengths, point_inds, cloud_inds):\n \"\"\"\n [None, 3], [None, 3], [None], [None]\n \"\"\"\n\n # Get batch indice for each point\n batch_inds = self.tf_get_batch_inds(stacks_lengths)\n\n # Augment input points\n stacked_points, scales, rots = self.tf_augment_input(stacked_points,\n batch_inds,\n config)\n\n # First add a column of 1 as feature for the network to be able to learn 3D shapes\n stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)\n\n # Get coordinates and colors\n stacked_original_coordinates = stacked_colors[:, 3:]\n stacked_colors = stacked_colors[:, :3]\n\n # Augmentation : randomly drop colors\n if config.in_features_dim in [4, 5]:\n num_batches = batch_inds[-1] + 1\n s = tf.cast(tf.less(tf.random_uniform((num_batches,)), config.augment_color), tf.float32)\n stacked_s = tf.gather(s, batch_inds)\n stacked_colors = stacked_colors * tf.expand_dims(stacked_s, axis=1)\n\n # Then use positions or not\n if config.in_features_dim == 1:\n pass\n elif config.in_features_dim == 2:\n stacked_features = tf.concat((stacked_features, stacked_original_coordinates[:, 2:]), axis=1)\n elif config.in_features_dim == 3:\n stacked_features = stacked_colors\n elif config.in_features_dim == 4:\n stacked_features = tf.concat((stacked_features, stacked_colors), axis=1)\n elif config.in_features_dim == 5:\n stacked_features = tf.concat((stacked_features, stacked_colors, stacked_original_coordinates[:, 2:]), axis=1)\n elif config.in_features_dim == 7:\n stacked_features = tf.concat((stacked_features, stacked_colors, stacked_points), axis=1)\n else:\n raise ValueError('Only accepted input dimensions are 1, 3, 4 and 7 (without and with rgb/xyz)')\n\n # Get the whole input list\n input_list = self.tf_segmentation_inputs(config,\n stacked_points,\n stacked_features,\n point_labels,\n stacks_lengths,\n batch_inds)\n\n # Add scale and rotation for testing\n input_list += [scales, rots]\n input_list += [point_inds, cloud_inds]\n\n return input_list\n\n return tf_map\n\n def load_evaluation_points(self, file_path):\n \"\"\"\n Load points (from test or validation split) on which the metrics should be evaluated\n \"\"\"\n\n # Get original points\n data = read_ply(file_path)\n return np.vstack((data['x'], data['y'], data['z'])).T\n\n\n\n # Debug methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def check_input_pipeline_timing(self, config):\n\n # Create a session for running Ops on the Graph.\n cProto = tf.ConfigProto()\n cProto.gpu_options.allow_growth = True\n self.sess = tf.Session(config=cProto)\n\n # Init variables\n self.sess.run(tf.global_variables_initializer())\n\n # Initialise iterator with train data\n self.sess.run(self.train_init_op)\n\n # Run some epochs\n n_b = config.batch_num\n t0 = time.time()\n mean_dt = np.zeros(2)\n last_display = t0\n epoch = 0\n training_step = 0\n while epoch < 100:\n\n try:\n # Run one step of the model.\n t = [time.time()]\n ops = self.flat_inputs\n\n # Get next inputs\n np_flat_inputs = self.sess.run(ops)\n t += [time.time()]\n\n # Restructure flatten inputs\n points = np_flat_inputs[:config.num_layers]\n neighbors = np_flat_inputs[config.num_layers:2 * config.num_layers]\n batches = np_flat_inputs[-7]\n n_b = 0.99 * n_b + 0.01 * batches.shape[0]\n t += [time.time()]\n\n # Average timing\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Console display\n if (t[-1] - last_display) > 1.0:\n last_display = t[-1]\n message = 'Step {:08d} : timings {:4.2f} {:4.2f} - {:d} x {:d} => b = {:.1f}'\n print(message.format(training_step,\n 1000 * mean_dt[0],\n 1000 * mean_dt[1],\n neighbors[0].shape[0],\n neighbors[0].shape[1],\n n_b))\n\n training_step += 1\n\n except tf.errors.OutOfRangeError:\n print('End of train dataset')\n self.sess.run(self.train_init_op)\n epoch += 1\n\n return\n\n def check_input_pipeline_batches(self, config):\n\n # Create a session for running Ops on the Graph.\n cProto = tf.ConfigProto()\n cProto.gpu_options.allow_growth = True\n self.sess = tf.Session(config=cProto)\n\n # Init variables\n self.sess.run(tf.global_variables_initializer())\n\n # Initialise iterator with train data\n self.sess.run(self.train_init_op)\n\n # Run some epochs\n mean_b = 0\n min_b = 1000000\n max_b = 0\n t0 = time.time()\n mean_dt = np.zeros(2)\n last_display = t0\n epoch = 0\n training_step = 0\n while epoch < 100:\n\n try:\n # Run one step of the model.\n t = [time.time()]\n ops = self.flat_inputs\n\n # Get next inputs\n np_flat_inputs = self.sess.run(ops)\n t += [time.time()]\n\n # Restructure flatten inputs\n points = np_flat_inputs[:config.num_layers]\n neighbors = np_flat_inputs[config.num_layers:2 * config.num_layers]\n batches = np_flat_inputs[-7]\n\n max_ind = np.max(batches)\n batches_len = [np.sum(b < max_ind-0.5) for b in batches]\n\n for b_l in batches_len:\n mean_b = 0.99 * mean_b + 0.01 * b_l\n max_b = max(max_b, np.max(batches_len))\n min_b = min(min_b, np.min(batches_len))\n\n print('{:d} < {:.1f} < {:d} /'.format(min_b, mean_b, max_b),\n self.training_batch_limit,\n batches_len)\n\n t += [time.time()]\n\n # Average timing\n mean_dt = 0.01 * mean_dt + 0.99 * (np.array(t[1:]) - np.array(t[:-1]))\n\n training_step += 1\n\n except tf.errors.OutOfRangeError:\n print('End of train dataset')\n self.sess.run(self.train_init_op)\n epoch += 1\n\n return\n\n def check_input_pipeline_neighbors(self, config):\n\n # Create a session for running Ops on the Graph.\n cProto = tf.ConfigProto()\n cProto.gpu_options.allow_growth = True\n self.sess = tf.Session(config=cProto)\n\n # Init variables\n self.sess.run(tf.global_variables_initializer())\n\n # Initialise iterator with train data\n self.sess.run(self.train_init_op)\n\n # Run some epochs\n hist_n = 500\n neighb_hists = np.zeros((config.num_layers, hist_n), dtype=np.int32)\n t0 = time.time()\n mean_dt = np.zeros(2)\n last_display = t0\n epoch = 0\n training_step = 0\n while epoch < 100:\n\n try:\n # Run one step of the model.\n t = [time.time()]\n ops = self.flat_inputs\n\n # Get next inputs\n np_flat_inputs = self.sess.run(ops)\n t += [time.time()]\n\n # Restructure flatten inputs\n points = np_flat_inputs[:config.num_layers]\n neighbors = np_flat_inputs[config.num_layers:2 * config.num_layers]\n batches = np_flat_inputs[-7]\n\n for neighb_mat in neighbors:\n print(neighb_mat.shape)\n\n counts = [np.sum(neighb_mat < neighb_mat.shape[0], axis=1) for neighb_mat in neighbors]\n hists = [np.bincount(c, minlength=hist_n) for c in counts]\n\n neighb_hists += np.vstack(hists)\n\n print('***********************')\n dispstr = ''\n fmt_l = len(str(int(np.max(neighb_hists)))) + 1\n for neighb_hist in neighb_hists:\n for v in neighb_hist:\n dispstr += '{num:{fill}{width}}'.format(num=v, fill=' ', width=fmt_l)\n dispstr += '\\n'\n print(dispstr)\n print('***********************')\n\n t += [time.time()]\n\n # Average timing\n mean_dt = 0.01 * mean_dt + 0.99 * (np.array(t[1:]) - np.array(t[:-1]))\n\n training_step += 1\n\n except tf.errors.OutOfRangeError:\n print('End of train dataset')\n self.sess.run(self.train_init_op)\n epoch += 1\n\n return\n\n def check_input_pipeline_colors(self, config):\n\n # Create a session for running Ops on the Graph.\n cProto = tf.ConfigProto()\n cProto.gpu_options.allow_growth = True\n self.sess = tf.Session(config=cProto)\n\n # Init variables\n self.sess.run(tf.global_variables_initializer())\n\n # Initialise iterator with train data\n self.sess.run(self.train_init_op)\n\n # Run some epochs\n t0 = time.time()\n mean_dt = np.zeros(2)\n epoch = 0\n training_step = 0\n while epoch < 100:\n\n try:\n # Run one step of the model.\n t = [time.time()]\n ops = self.flat_inputs\n\n # Get next inputs\n np_flat_inputs = self.sess.run(ops)\n t += [time.time()]\n\n # Restructure flatten inputs\n stacked_points = np_flat_inputs[:config.num_layers]\n stacked_colors = np_flat_inputs[-9]\n batches = np_flat_inputs[-7]\n stacked_labels = np_flat_inputs[-5]\n\n # Extract a point cloud and its color to save\n max_ind = np.max(batches)\n for b_i, b in enumerate(batches):\n\n # Eliminate shadow indices\n b = b[b < max_ind-0.5]\n\n # Get points and colors (only for the concerned parts)\n points = stacked_points[0][b]\n colors = stacked_colors[b]\n labels = stacked_labels[b]\n\n write_ply('S3DIS_input_{:d}.ply'.format(b_i),\n [points, colors[:, 1:4], labels],\n ['x', 'y', 'z', 'red', 'green', 'blue', 'labels'])\n\n a = 1/0\n\n\n\n t += [time.time()]\n\n # Average timing\n mean_dt = 0.01 * mean_dt + 0.99 * (np.array(t[1:]) - np.array(t[:-1]))\n\n training_step += 1\n\n except tf.errors.OutOfRangeError:\n print('End of train dataset')\n self.sess.run(self.train_init_op)\n epoch += 1\n\n return\n\n def check_debug_input(self, config, path):\n\n # Get debug file\n file = join(path, 'all_debug_inputs.pkl')\n with open(file, 'rb') as f1:\n inputs = pickle.load(f1)\n\n #Print inputs\n nl = config.num_layers\n for layer in range(nl):\n\n print('Layer : {:d}'.format(layer))\n\n points = inputs[layer]\n neighbors = inputs[nl + layer]\n pools = inputs[2*nl + layer]\n upsamples = inputs[3*nl + layer]\n\n nan_percentage = 100 * np.sum(np.isnan(points)) / np.prod(points.shape)\n print('Points =>', points.shape, '{:.1f}% NaN'.format(nan_percentage))\n nan_percentage = 100 * np.sum(np.isnan(neighbors)) / np.prod(neighbors.shape)\n print('neighbors =>', neighbors.shape, '{:.1f}% NaN'.format(nan_percentage))\n nan_percentage = 100 * np.sum(np.isnan(pools)) / (np.prod(pools.shape) +1e-6)\n print('pools =>', pools.shape, '{:.1f}% NaN'.format(nan_percentage))\n nan_percentage = 100 * np.sum(np.isnan(upsamples)) / (np.prod(upsamples.shape) +1e-6)\n print('upsamples =>', upsamples.shape, '{:.1f}% NaN'.format(nan_percentage))\n\n ind = 4 * nl\n features = inputs[ind]\n nan_percentage = 100 * np.sum(np.isnan(features)) / np.prod(features.shape)\n print('features =>', features.shape, '{:.1f}% NaN'.format(nan_percentage))\n ind += 1\n batch_weights = inputs[ind]\n ind += 1\n in_batches = inputs[ind]\n max_b = np.max(in_batches)\n print(in_batches.shape)\n in_b_sizes = np.sum(in_batches < max_b - 0.5, axis=-1)\n print('in_batch_sizes =>', in_b_sizes)\n ind += 1\n out_batches = inputs[ind]\n max_b = np.max(out_batches)\n print(out_batches.shape)\n out_b_sizes = np.sum(out_batches < max_b - 0.5, axis=-1)\n print('out_batch_sizes =>', out_b_sizes)\n ind += 1\n point_labels = inputs[ind]\n ind += 1\n if config.dataset.startswith('ShapeNetPart_multi'):\n object_labels = inputs[ind]\n nan_percentage = 100 * np.sum(np.isnan(object_labels)) / np.prod(object_labels.shape)\n print('object_labels =>', object_labels.shape, '{:.1f}% NaN'.format(nan_percentage))\n ind += 1\n augment_scales = inputs[ind]\n ind += 1\n augment_rotations = inputs[ind]\n ind += 1\n\n print('\\npoolings and upsamples nums :\\n')\n\n #Print inputs\n nl = config.num_layers\n for layer in range(nl):\n\n print('\\nLayer : {:d}'.format(layer))\n\n neighbors = inputs[nl + layer]\n pools = inputs[2*nl + layer]\n upsamples = inputs[3*nl + layer]\n\n max_n = np.max(neighbors)\n nums = np.sum(neighbors < max_n - 0.5, axis=-1)\n print('min neighbors =>', np.min(nums))\n\n if np.prod(pools.shape) > 0:\n max_n = np.max(pools)\n nums = np.sum(pools < max_n - 0.5, axis=-1)\n print('min pools =>', np.min(nums))\n\n if np.prod(upsamples.shape) > 0:\n max_n = np.max(upsamples)\n nums = np.sum(upsamples < max_n - 0.5, axis=-1)\n print('min upsamples =>', np.min(nums))\n\n\n print('\\nFinished\\n\\n')" ]
[ [ "numpy.random.rand", "numpy.random.choice", "numpy.argmin", "numpy.min", "tensorflow.global_variables_initializer", "numpy.max", "numpy.random.normal", "numpy.bincount", "numpy.full", "numpy.empty", "tensorflow.concat", "numpy.concatenate", "tensorflow.shape", "tensorflow.random_uniform", "tensorflow.ConfigProto", "numpy.prod", "numpy.vstack", "numpy.square", "numpy.equal", "numpy.array", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.Session", "sklearn.neighbors.KDTree", "numpy.hstack", "numpy.squeeze", "numpy.isnan", "numpy.random.seed", "numpy.sum", "numpy.random.permutation", "tensorflow.gather" ] ]
ZGainsforth/thermopy3
[ "b3a1ddcc92d17ff297a22d97daa645748abfeba2" ]
[ "thermopy/iapws.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom .units import Pressure, Temperature, Enthalpy\nfrom numpy import array, sum, sqrt\n\n\nclass Water(object):\n\n \"\"\"Taken from\n\n The International Association for the Properties of Water and\n Steam. Lucerne, Switzerland. August 2007. Revised Release on the\n IAPWS Industrial Formulation 1997 for the Thermodynamic Properties\n of Water and Steam.\n\n Functions implemented:\n\n Saturation line\n h(p,T) region1, region2 and no warnings yet\n \"\"\"\n\n R = 0.461526 # kJ/(kg K)\n Tc = 647.096 # Triple point temperature (K)\n pc = 22.064 # Triple point pressure (MPa)\n rc = 322 # Triple point density kg/m3\n\n data = {\n 'n': array([0.11670521452767E4,\n - 0.72421316703206E6,\n - 0.17073846940092E2,\n 0.12020824702470E5,\n - 0.32325550322333E7,\n 0.14915108613530E2,\n - 0.48232657361591E4,\n 0.40511340542057E6,\n - 0.23855557567849,\n 0.65017534844798E3], 'd'),\n 'pT_datar1': array([\n [0, -2, 0.14632971213167],\n [0, -1, -0.84548187169114],\n [0, 0, -0.37563603672040E1],\n [0, 1, 0.33855169168385E1],\n [0, 2, -0.95791963387872],\n [0, 3, 0.15772038513228],\n [0, 4, -0.16616417199501E-1],\n [0, 5, 0.81214629983568E-3],\n [1, -9, 0.28319080123804E-3],\n [1, -7, -0.60706301565874E-3],\n [1, -1, -0.18990068218419E-1],\n [1, 0, -0.32529748770505E-1],\n [1, 1, -0.21841717175414E-1],\n [1, 3, -0.52838357969930E-4],\n [2, -3, -0.47184321073267E-3],\n [2, 0, -0.30001780793026E-3],\n [2, 1, 0.47661393906987E-4],\n [2, 3, -0.44141845330846E-5],\n [2, 17, -0.72694996297594E-15],\n [3, -4, -0.31679644845054E-4],\n [3, 0, -0.28270797985312E-5],\n [3, 6, -0.85205128120103E-9],\n [4, -5, -0.22425281908000E-5],\n [4, -2, -0.65171222895601E-6],\n [4, 10, -0.14341729937924E-12],\n [5, -8, -0.40516996860117E-6],\n [8, -11, -0.12734301741641E-8],\n [8, -6, -0.17424871230634E-9],\n [21, -29, -0.68762131295531E-18],\n [23, -31, 0.14478307828521E-19],\n [29, -38, 0.26335781662795E-22],\n [30, -39, -0.11947622640071E-22],\n [31, -40, 0.18228094581404E-23],\n [32, -41, -0.93537087292458E-25]], 'd'),\n 'pT_datar20': array([\n [0, -0.96927686500217E1],\n [1, 0.10086655968018E2],\n [-5, -0.56087911283020E-2],\n [-4, 0.71452738081455E-1],\n [-3, -0.40710498223928],\n [-2, 0.14240819171444E1],\n [-1, -0.43839511319450E1],\n [2, -0.28408632460772],\n [3, 0.21268463753307E-1]], 'd'),\n 'pT_datar2r': array([\n [1, 0, -0.17731742473213E-2],\n [1, 1, -0.17834862292358E-1],\n [1, 2, -0.45996013696365E-1],\n [1, 3, -0.57581259083432E-1],\n [1, 6, -0.50325278727930E-1],\n [2, 1, -0.33032641670203E-4],\n [2, 2, -0.18948987516315E-3],\n [2, 4, -0.39392777243355E-2],\n [2, 7, -0.43797295650573E-1],\n [2, 36, -0.26674547914087E-4],\n [3, 0, 0.20481737692309E-7],\n [3, 1, 0.43870667284435E-6],\n [3, 3, -0.32277677238570E-4],\n [3, 6, -0.15033924542148E-2],\n [3, 35, -0.40668253562649E-1],\n [4, 1, -0.78847309559367E-9],\n [4, 2, 0.12790717852285E-7],\n [4, 3, 0.48225372718507E-6],\n [5, 7, 0.22922076337661E-5],\n [6, 3, -0.16714766451061E-1],\n [6, 16, -0.21171472321355E-2],\n [6, 35, -0.23895741934104E2],\n [7, 0, -0.59059564324270E-1],\n [7, 11, -0.12621808899101E-5],\n [7, 25, -0.38946842435739E-1],\n [8, 8, 0.11256211360459E-1],\n [8, 36, -0.82311340897998E1],\n [9, 13, 0.19809712802088E-7],\n [10, 4, 0.10406965210174E-1],\n [10, 10, -0.10234747095929E-1],\n [10, 14, -0.10018179379511E-8],\n [16, 29, -0.80882908646985E-1],\n [16, 50, 0.10693031879409],\n [18, 57, -0.33662250574171],\n [20, 20, 0.89185845355421E-2],\n [20, 35, 0.30629316876232E-1],\n [20, 48, -0.42002467698208E-5],\n [21, 21, -0.59056029685639E-2],\n [22, 53, 0.37826947613457E-5],\n [23, 39, -0.12768608934681E-1],\n [24, 26, 0.73087610595061E-2],\n [24, 40, 0.55414715350778E-1],\n [24, 58, -0.94369707241210E-6]], 'd'),\n }\n\n def psat(self, T):\n \"\"\"\n Returns the saturation pressure of water at a given temperature.\n\n Remember that temperature must be between 273.15K (triple point)\n and 647.096K (critical point)\n\n Temperatures in K, Pressures in Pa.\n\n >>> w = Water()\n >>> w.psat(300)\n 3536.5894130130105\n >>> w.psat(130)\n Traceback (most recent call last):\n File \"/usr/lib/python2.5/doctest.py\", line 1228, in __run\n compileflags, 1) in test.globs\n File \"<doctest __main__.Water.psat[2]>\", line 1, in <module>\n w.psat(130)\n File \"iapws.py\", line 153, in psat\n 'No saturation pressure for this temperature')\n ValueError: No saturation pressure for this temperature\n >>> w.psat(700)\n Traceback (most recent call last):\n File \"/usr/lib/python2.5/doctest.py\", line 1228, in __run\n compileflags, 1) in test.globs\n File \"<doctest __main__.Water.psat[3]>\", line 1, in <module>\n w.psat(700)\n File \"iapws.py\", line 146, in psat\n 'No saturation pressure for this temperature')\n ValueError: No saturation pressure for this temperature\n \"\"\"\n if T < 273.15 or T > 647.096:\n raise ValueError(\n 'No saturation pressure for this temperature')\n\n n = self.data['n']\n v = T + n[8] / (T - n[9])\n A = v ** 2 + n[0] * v + n[1]\n B = n[2] * v ** 2 + n[3] * v + n[4]\n C = n[5] * v ** 2 + n[6] * v + n[7]\n return Pressure(((2 * C) / (-B + sqrt(B ** 2 - 4 * A * C))) ** 4).unit('MPa')\n\n def Tsat(self, p):\n \"\"\"\n Returns the saturation temperature of water at a given pressure.\n\n Remember that pressure must be between 0.000611213MPa (triple\n point) and 22.064MPa (critical point)\n\n Temperatures in K, pressures in MPa.\n\n >>> w = Water()\n >>> w.Tsat(100000)\n 372.7559186113...\n >>> w.Tsat(1200000)\n 461.1146416213...\n >>> w.Tsat(100)\n Traceback (most recent call last):\n File \"/usr/lib/python2.5/doctest.py\", line 1228, in __run\n compileflags, 1) in test.globs\n File \"<doctest __main__.Water.Tsat[3]>\", line 1, in <module>\n w.Tsat(100)\n File \"iapws.py\", line 193, in Tsat\n raise ValueError('No saturation temperature for this pressure')\n ValueError: No saturation temperature for this pressure\n >>> w.Tsat(101325)\n 373.12430000048056\n \"\"\"\n\n p = Pressure(p).MPa\n\n if p < 0.000611213 or p > 22.064:\n raise ValueError('No saturation temperature for this pressure')\n\n n = self.data['n']\n beta = p ** 0.25\n E = beta ** 2 + n[2] * beta + n[5]\n F = n[0] * beta ** 2 + n[3] * beta + n[6]\n G = n[1] * beta ** 2 + n[4] * beta + n[7]\n D = (2 * G) / (-F - sqrt(F ** 2 - 4 * E * G))\n\n return Temperature(0.5 * (n[9] + D - sqrt((n[9] + D) ** 2 - 4 * (n[8] + n[9] * D))))\n\n def h(self, p, T):\n \"\"\"\n Returns specific enthalpy (J/kg) for a given pressure (Pa) and\n Temperature (K).\n\n >>> w = Water()\n >>> round(w.h(3000000,300), 6)\n 115.331273\n >>> w.h(3500,300)\n 2549.9114508400203\n\n There are also error codes\n\n Results checked against the reference.\n \"\"\"\n p = Pressure(p).MPa\n\n # region 1 implementation\n if p >= self.psat(T).MPa:\n # Liquid water (pressure over saturation pressure)\n pi = p / 16.53\n tau = 1386 / T\n\n raw_data = self.data['pT_datar1']\n I = raw_data[:, 0]\n J = raw_data[:, 1]\n n = raw_data[:, 2]\n\n return Enthalpy(\n self.R * T * tau * sum((n * (7.1 - pi) ** I) * J * ((tau - 1.222) ** (J - 1))))\n\n if p < self.psat(T).MPa:\n # steam, pressure under saturation pressure\n pi = p\n tau = 540 / T\n\n raw_data0 = self.data['pT_datar20']\n J0 = raw_data0[:, 0]\n n0 = raw_data0[:, 1]\n\n raw_datar = self.data['pT_datar2r']\n I = raw_datar[:, 0]\n J = raw_datar[:, 1]\n n = raw_datar[:, 2]\n\n g0_tau = sum(n0 * J0 * tau ** (J0 - 1))\n gr_tau = sum(n * pi ** I * J * (tau - 0.5) ** (J - 1))\n\n return Enthalpy(self.R * T * tau * (g0_tau + gr_tau))\n\n def T_ph(self, p, h):\n \"\"\"\n Returns the temperature (K) given the pressure (MPa) and\n specific enthalpy (kJ/kg). Only region 2a implemented\n (p<4MPa) (Reimplement).\n\n >>> w = Water()\n >>> def t_round(l): return round(l[0], 6), round(l[1], 6)\n >>> t_round(w.T_ph(3,500))\n (391.798509, 4.1313215739117...e+21)\n >>> t_round(w.T_ph(3,4000))\n (-14923984.403553, 1010.775766)\n >>> t_round(w.T_ph(0.001,3000))\n (-103213.846234, 534.433241)\n >>> t_round(w.T_ph(3,500))\n (391.798509, 4.1313215739117...e+21)\n >>> t_round(w.T_ph(80,500))\n (378.108626, -6.029123659828...e+28)\n >>> t_round(w.T_ph(80,1500))\n (611.041229, -5.572621155340...e+22)\n \"\"\"\n eta = h / 2500.\n\n raw_data = array([\n [1, 0, 0, -0.23872489924521E3],\n [2, 0, 1, 0.40421188637945E3],\n [3, 0, 2, 0.11349746881718E3],\n [4, 0, 6, -0.58457616048039E1],\n [5, 0, 22, -0.15285482413140E-3],\n [6, 0, 32, -0.10866707695377E-5],\n [7, 1, 0, -0.13391744872602E2],\n [8, 1, 1, 0.43211039183559E2],\n [9, 1, 2, -0.54010067170506E2],\n [10, 1, 3, 0.30535892203916E2],\n [11, 1, 4, -0.65964749423638E1],\n [12, 1, 10, 0.93965400878363E-2],\n [13, 1, 32, 0.11573647505340E-6],\n [14, 2, 10, -0.25858641282073E-4],\n [15, 2, 32, -0.40644363084799E-8],\n [16, 3, 10, 0.66456186191635E-7],\n [17, 3, 32, 0.80670734103027E-10],\n [18, 4, 32, -0.93477771213947E-12],\n [19, 5, 32, 0.58265442020601E-14],\n [20, 6, 32, -0.15020185953503E-16]], 'd')\n\n i = raw_data[:, 0]\n I = raw_data[:, 1]\n J = raw_data[:, 2]\n n = raw_data[:, 3]\n\n raw_data2 = array([\n [1, 0, 0, 0.10898952318288E4],\n [2, 0, 1, 0.84951654495535E3],\n [3, 0, 2, -0.10781748091826E3],\n [4, 0, 3, 0.33153654801263E2],\n [5, 0, 7, -0.74232016790248E1],\n [6, 0, 20, 0.11765048724356E2],\n [7, 1, 0, 0.18445749355790E1],\n [8, 1, 1, -0.41792700549624E1],\n [9, 1, 2, 0.62478196935812E1],\n [10, 1, 3, -0.17344563108114E2],\n [11, 1, 7, -0.20058176862096E3],\n [12, 1, 9, 0.27196065473796E3],\n [13, 1, 11, -0.45511318285818E3],\n [14, 1, 18, 0.30919688604755E4],\n [15, 1, 44, 0.25226640357872E6],\n [16, 2, 0, -0.61707422868339E-2],\n [17, 2, 2, -0.31078046629583],\n [18, 2, 7, 0.11670873077107E2],\n [19, 2, 36, 0.12812798404046E9],\n [20, 2, 38, -0.98554909623276E9],\n [21, 2, 40, 0.28224546973002E10],\n [22, 2, 42, -0.35948971410703E10],\n [23, 2, 44, 0.17227349913197E10],\n [24, 3, 24, -0.13551334240775E5],\n [25, 3, 44, 0.12848734664650E8],\n [26, 4, 12, 0.13865724283226E1],\n [27, 4, 32, 0.23598832556514E6],\n [28, 4, 44, -0.13105236545054E8],\n [29, 5, 32, 0.73999835474766E4],\n [30, 5, 36, -0.55196697030060E6],\n [31, 5, 42, 0.37154085996233E7],\n [32, 6, 34, 0.19127729239660E5],\n [33, 6, 44, -0.41535164835634E6],\n [34, 7, 28, -0.62459855192507E2]], 'd')\n\n eta2 = h / 2000.\n i2 = raw_data2[:, 0]\n I2 = raw_data2[:, 1]\n J2 = raw_data2[:, 2]\n n2 = raw_data2[:, 3]\n\n return (sum(n * p ** I * (eta + 1) ** J), sum(n2 * p ** I2 * (eta2 - 2.1) ** J2))\n" ]
[ [ "numpy.sum", "numpy.array", "numpy.sqrt" ] ]
sekisetsu-method/star-eyes
[ "dc8e7ea17e1f124f0f42550504f032dd3c735591" ]
[ "core/cvt_00014.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf8 -*-\nprint(unicode(\"積雪メソッド\",\"UTF-8\"))\n\n__author__ = \"Arlo Emerson <[email protected]>\"\n__status__ = \"production\"\n__version__ = \"14.0\"\n__date__ = \"17 August 2018\"\n\n#--- LICENSE ------------------------------------------------------------------\n# This code cvt_[version number].py and all software created by Sekisetsu Method and or Arlo Emerson or other designated authors covered by the MIT License.\n\n# MIT License\n\n# Copyright (c) 2017, 2018 Arlo Emerson, Sekisetsu Method\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n#--- THE SEKISETSU METHOD EXPLAINED -------------------------------------------\n'''\nWHAT IT IS\nSekisetsu is a Japanese word for snowfall accumulation. The Sekisetsu Method (積雪メソッド) is a technical analysis tool combining price action geometry (candlesticks) with fluid dynamics to reveal otherwise hidden structure and market participant intention, and displays this information on a price chart. The method can also be termed \"Price Action Fluid Dynamics\". Many terms and ideas from fluid dynamics and chaotic systems are borrowed and used both in the code and as allegory in the training material. Regardless of terminology, the goal of the method is to empower the user to align trades with larger players. \n\nHOW IT WORKS\nOHLC price action data (in the form of a modified candlestick chart) creates the surfaces and boundaries within a control volume tank (CVT). The tank is filled with both heavy and light particles. The particles accumulate in the cavities and recesses of the price action surfaces. The pooling of particles results in three patterns: top-heavy, bottom-heavy and stagnant accumulations. These differences can be viewed directly on the chart, and are further expressed as an \"imbalance ratio histogram\". A standard deviation method is employed to locate relatively stagnant periods of price action. It is these periods of lower volotility coinciding with imbalances in particle accumulation where major entry signals emerge, revealing both the location and trading direction of large players, i.e. market makers. \n\nThe software is open source, highly configurable, and easily lends itself to integration with existing trading systems.\n'''\n\n#--- USAGE --------------------------------------------------------------------\n# • CSV files need to live in the \"csv\" folder at project root. \n# • Histograms are created in the \"histograms\" folder at project root.\n# • Simulations (frame by frame PNGs of the simulation) are created in the \"simulations\" folder at project root.\n\n# To run the program: \n# • Use PIP to install all requirements (see requirements.txt).\n# • Add CSV files to the \"csv\" folder at project root.\n# • If running native linux, comment out os.environ['SDL_VIDEODRIVER']='dummy'\n# \t(this is a Windows workaround when running the Ubuntu shell on Windows)\n# • from a shell, run:\n# \t$ python cvt_[version_number].py\n#------------------------------------------------------------------------------\n\n#--- NOTES --------------------------------------------------------------------\n# Project website with how-to and trading ideas: http://www.sekisetsumethod.com\n\n# See the README file for detailed information on usage.\n# See http://www.sekisetsumethod.com for charts, signals and training.\n\n# Thanks to Maarten Baert's excellent physics engine (licensed under LGPL).\n# More info: http://www.maartenbaert.be/extremephysics/\n#------------------------------------------------------------------------------\n\nimport os as os\nos.environ['SDL_VIDEODRIVER']='dummy' # Use this if running the Ubuntu bash on windows\nimport pygame, sys, math, random, csv, glob, subprocess, shutil, heapq, argparse, textwrap\nimport lib.standard_deviation_function as sdef\nimport lib.TextColors as TextColors\nfrom lib.extremephysics import *\nfrom numpy import interp\nfrom PIL import Image, ImageDraw\n\ntarget_dir = \"../csv/\"\nfile_type = '.csv'\nparticle_birth_count = 1280 # should match window width\n\n\n# Particle/fluid simulations occur within a Control Volume Tank. \n# The current settings in this version are tuned to USDJPY 15 and 30 minute chart data.\nclass ControlVolumeTank():\n\n\tdef __init__(self):\n\t\tprint(self.__class__.__name__, __version__)\n\t\tprint(\"Running \" + TextColors.HEADERLEFT3 + TextColors.INVERTED + self.__class__.__name__ + \" \" + \\\n\t\t\tTextColors.ENDC + \" version \" + __version__ + \" of Sekisetsu Method Star Eyes fork.\")\n\n\t\tself.dataset_file = '' # overridden\n\t\tself.save_sequences = True\n\t\tself.particles_birth_count = 0 # overridden\n\t\tself.FRAME_LIMIT = 200 # 200 for production\n\t\tself.render_frames_directory = \"../simulations/\"\n\t\tself.render_histogram_directory = \"../histograms/\"\n\t\tself.code_name = \"star_eyes\"\n\t\tself.permutation_name = __version__\n\t\tself.histogram_animation_directory = self.code_name + \"_\" + __version__ + \"/\" \n\t\tself.PARTICLE_SHAPE_MODE = \"CIRCLE\"\n\t\tself.PARTICLE_DIAMETER = .1\n\t\tself.COEFFICIENT_RESTITUTION = 0.1 #0.01\n\t\tself.FRICTION = 0.1\n\t\tself.DATASET_HIGHEST_INDEX = 0\n\t\tself.DATASET_LOWEST_INDEX = 0\n\t\tself.draw = ImageDraw.Draw\n\n\t\tself.previous_sdev_y = 900\n\t\tself.standard_dev_start_y = 900\n\t\tself.previous_money_flow_y = 900\n\t\tself.previous_sdev_vol_y = 850\n\t\tself.standard_dev_vol_start_y = 850\n\t\tself.previous_sd_mfi_y = 800\n\t\tself.standardDevMFI = 0\n\n\t\tself.FRAME_RATE = 24 \n\t\tself.CANDLESTICK_WIDTH = 1\n\t\tself.new_x_default_value = 10\n\t\tself.CONTAINER_WALLS_WIDTH = 2\n\t\tself.CANDLE_GUTTER = 3\n\t\tself.run = True\n\t\tself.DATASET_LOWEST = 107 # overridden, used for scaling the chart into this game window\n\t\tself.DATASET_HIGHEST = 111 # overridden\n\t\tself.DATASET_VOLUME_HIGHEST = 0 # overridden\n\t\tself.DATASET_VOLUME_LOWEST = 0 # overridden\n\t\tself.price_high = 0\n\t\tself.price_low = 0\n\t\tself.offset_index = 0 # used for cycling through the T axis\n\t\tself.truncated_dataset_file_name = \"\"\n\t\tself.PAINTABLE_LIMIT = 1268 # used as a canvas limit so there are some venting gaps on L and R side of chart\n\t\tself.HEIGHT_SCALING_FACTOR = 1.1 # set to 1.2 initially. if things are getting truncated, lower this number to fit more into the screen\n\t\t# note: set to negative number to do interesting head-on particle collisions.\n\n\t\trandom.seed()\n\t\tpygame.display.init()\n\t\tpygame.font.init()\n\t\tself.fpsclock = pygame.time.Clock()\n\t\tself.WINDOW_WIDTH = 1280\n\t\tself.WINDOW_HEIGHT = 720\n\t\tself.surf_window = pygame.display.set_mode((self.WINDOW_WIDTH, self.WINDOW_HEIGHT))\n\t\tself.font = pygame.font.SysFont(\"Sans\", 12)\t\n\t\tself.font_large = pygame.font.SysFont(\"Sans\", 24)\t\n\t\tself.cx = self.WINDOW_WIDTH / 2\n\t\tself.cy = self.WINDOW_HEIGHT / 2\n\t\tself.mouse_x = 0\n\t\tself.mouse_y = 0\n\t\tself.color_static = pygame.Color(52, 30, 162)\n\t\tself.COLOR_STANDARD_DEVIATION = pygame.Color(\"yellow\")\t\t\n\t\tself.COLOR_STANDARD_DEVIATION_VOL = pygame.Color(\"blue\")\t\n\t\tself.COLOR_HEAVY_PARTICLES = pygame.Color(0, 146, 255)\n\t\tself.COLOR_LIGHT_PARTICLES = pygame.Color(255, 0, 255)\n\t\tself.COLOR_HISTOGRAM_UP = (0, 146, 255)\n\t\tself.COLOR_HISTOGRAM_DOWN = (255, 0, 255)\n\t\tself.COLOR_ENTRY_SIGNAL = (0, 255, 100)\n\t\tself.COLOR_MONEY_FLOW_INDEX = pygame.Color(\"green\")\t\t\n\t\tself.MOUSE_HINGE_JOINT = -1.0\n\t\tself.edge_boxes = []\n\t\tself.candlestick_boxes = []\n\t\tself.heavy_particles = []\n\t\tself.light_particles = []\n\t\tself.standard_dev_list = []\n\t\tself.standard_dev_list_vol = []\n\t\tself.mfi = []\n\t\tself.mfi_calc = []\n\t\tself.mfi_standard_dev = []\n\t\tself.new_x = self.new_x_default_value\n\t\tself.index_counter = 0\n\t\tself.verbose = False\n\t\tself.debug = False\n\t\tself.candleIndex = 0\n\t\tself.highlight_sigma = True # can be overridden by passing in -highlight_sigma argument\n\t\tself.sigma_period = 17 # can be overridden by passing in -sigma_period argument\n\t\tself.show_histogram_ratio = True\n\t\tself.show_histogram_standard_dev = False\n\t\tself.show_MFI = False\n\t\tself.histogram_standard_dev_period = 7\n\t\tself.show_histogram_simple_average = False\n\t\tself.histogram_simple_average_period = 9\n\t\tself.sigma_sort_low = 315 # the number of sigma lines to draw\n\t\tself.offset_index_override = 0 # the index of the candle to begin a simulation\n\t\tself.sample_period_size = 0 # override this to e.g. 10, and set the offset_index_override to e.g. 55 \n\t\tself.permutation_index = 0 # the outer loop index, this will be appended to file name, and is useful for running multiple simulations on one dataset in order to observe variances in particle distribution\n\t\tself.candlePlusGutterWidth = (self.CANDLESTICK_WIDTH + self.CANDLE_GUTTER)\n\n\t\thelpMessage = 'See README.md and setup_instructions.md for specifics. Here are some commands to try: \\n' + \\\n\t\t\t\"• Standard deviation of price (SD, yellow line) + Volume SD (blue line) + 100 lowest sigma values highlighted in green: \" + TextColors.OKGREEN + 'python cvt_00014.py --sigma_period 17 -hrat 1 -v -ssl 100' + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Price SD + lowest sigma values highlighted in green: \" + TextColors.OKGREEN + 'python cvt_00014.py --sigma_period 23 --highlight_sigma True -v ' + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Price SD + histogram SD of particle distribution: \" + TextColors.OKGREEN + \"python cvt_00014.py --sigma_period 19 -v -hrat False -hsd True -hsdp 34\" + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Price SD + histogram moving average (MA) of particle distribution: \" + TextColors.OKGREEN + \"python cvt_00014.py --sigma_period 17 -v -hrat False -hsa True -hsap 23\" + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Price SD + histogram MA with a larger set of low SD highlighted: \" + TextColors.OKGREEN + \"python cvt_00014.py --sigma_period 34 -v -hrat True -ssl 100\" + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Start at some other index in the dataset (e.g. 120 candles from latest): \" + TextColors.OKGREEN + \"python cvt_00014.py --sigma_period 34 -v -oo 120 -hrat 1\" + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Start at some other index and march forward N candles: \" + TextColors.OKGREEN + \"python cvt_00014.py --sigma_period 34 -v -oo 120 -sps 10 -hrat 1\" + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\"• Run a series of simulations at the same index: \" + TextColors.OKGREEN + \"python cvt_00014.py --sigma_period 23 -v -oo 127 -hrat 1 -ssl 1\" + TextColors.ENDC + \"\\n\" + \\\n\t\t\t\" \"\n\n\t\t\t\n\t\tparser = argparse.ArgumentParser(description=helpMessage, epilog=textwrap.dedent(\"\"), formatter_class=argparse.RawTextHelpFormatter)\n\t\tparser.add_argument('-s', '--highlight_sigma', dest='highlight_sigma', required=False, help=\"Paint lines from low sigma regions to the top of the chart. This helps isolate important areas in the histogram.\")\n\t\tparser.add_argument('-p', '--sigma_period', dest='sigma_period', required=False, help=\"The sigma period used to calculate the standard deviation. Default is 17.\")\n\t\tparser.add_argument('-hrat', '--show_histo_ratio', dest='show_histo_ratio', required=False, help=\"Show the histogram ratio lines.\")\n\t\tparser.add_argument('-hsd', '--show_histo_sd', dest='show_histo_sd', required=False, help=\"Show a standard deviation line of the histogram.\")\n\t\tparser.add_argument('-hsdp', '--histo_sd_period', dest='histo_sd_period', required=False, help=\"Histogram standard deviation period. Default is 7.\")\n\t\tparser.add_argument('-hsa', '--show_histo_simple_average', dest='show_histo_simple_average', required=False, help=\"Show a simple average line of the histogram.\")\n\t\tparser.add_argument('-hsap', '--histo_simple_average_period', dest='histo_simple_average_period', required=False, help=\"Histogram simple average period. Default is 9.\")\n\t\tparser.add_argument('-ssl', '--sigma_sort_low', dest='sigma_sort_low', required=False, help=\"The number of samples to use for highlighting the low points in sigma. Default is 40. Higher numbers will add more lines and include a larger range.\")\n\t\tparser.add_argument('-oo', '--offset_index_override', dest='offset_index_override', required=False, help=\"The index of the current data set to begin at. This is helpful if you see a breakout candle somewhere in the past and want to run the simulation with that price being at the far right of the chart.\")\n\t\tparser.add_argument('-sps', '--sample_period_size', dest='sample_period_size', required=False, help=\"The size of the sample set of candles to run a simulation on. Use with offset index override -oo.\")\n\t\tparser.add_argument('-mfi', '--show_mfi', dest='show_mfi', required=False, help=\"Display both MFI over the chart and MFI standard deviation at bottom.\")\n\n\t\tparser.add_argument('-v','--verbose', dest='verbose', action='store_true', help=\"Explain what is being done.\")\n\t\tparser.add_argument('-d','--debug', dest='debug', action='store_true', help=\"Lower level messages for debugging.\")\t\t\n\t\tparser.add_argument('--version', action='version', version='%(prog)s ' + __version__)\n\n\t\targs = parser.parse_args()\n\t\t\n\t\tif args.verbose:\n\t\t\tself.verbose = True\n\n\t\tif args.debug:\n\t\t\tself.debug = True\n\n\t\tif self.string_to_bool(args.highlight_sigma):\n\t\t\tself.highlight_sigma = True\n\n\t\tif self.string_to_bool(args.show_mfi):\n\t\t\tself.show_MFI = True\n\n\t\tif args.show_histo_ratio: \n\t\t\tself.show_histogram_ratio = self.string_to_bool(args.show_histo_ratio)\n\n\t\tif args.sigma_period: \n\t\t\tself.sigma_period = int( args.sigma_period )\n\n\t\tif args.sigma_sort_low: \n\t\t\tself.sigma_sort_low = int( args.sigma_sort_low )\n\n\t\tif self.string_to_bool(args.show_histo_sd): \n\t\t\tself.show_histogram_standard_dev = True\n\n\t\tif args.sample_period_size:\n\t\t\tself.sample_period_size = int(args.sample_period_size)\n\n\t\tif args.histo_sd_period:\n\t\t\tself.histogram_standard_dev_period = int(args.histo_sd_period)\n\n\t\tif self.string_to_bool(args.show_histo_simple_average): \n\t\t\tself.show_histogram_simple_average = True\n\n\t\tif args.histo_simple_average_period:\n\t\t\tself.histogram_simple_average_period = int(args.histo_simple_average_period)\n\n\t\tif args.offset_index_override:\n\t\t\tself.offset_index_override = int(args.offset_index_override)\n\n\t\tif args.debug and args.verbose:\n\t\t\tself.print_debug(\"Running in verbose mode with debug messages.\")\n\t\telif args.debug and not args.verbose:\n\t\t\tself.print_debug(\"Running in debug mode.\")\n\t\telif args.verbose and not args.debug:\n\t\t\tself.print_verbose(\"Running in verbose mode.\")\n\t\n\tdef string_to_bool(self, pArg):\n \t\tif None == pArg:\n \t\t\treturn False\n \t\telif pArg.lower() in (\"y\", \"yes\", \"true\", \"t\", \"1\"):\n \t\t\treturn True\n \t\telse:\n \t\t\treturn False\n\n\tdef set_dataset_file(self, pFileName):\n\t\tself.dataset_file = pFileName\n\n\tdef draw_box(self, x, y, w, h, rot, color):\n\t\tpoints = [[-w / 2.0, -h / 2.0], [w / 2.0, -h / 2.0], [w / 2.0, h / 2.0], [-w / 2.0, h / 2.0]]\n\t\tfor p in points:\n\t\t\tp[:] = [x + p[0] * math.cos(rot) + p[1] * math.sin(rot), y - p[0] * math.sin(rot) + p[1] * math.cos(rot)]\n\t\tpygame.draw.polygon(self.surf_window, color, points, 1)\n\n\tdef draw_circle(self, x, y, d, color):\n\t\tpoints = [[-d / 2.0, -d / 2.0], [d / 2.0, -d / 2.0]]\n\t\tpygame.draw.circle(self.surf_window, color, [x,y], d/2, 1)\n\t\t# circle(Surface, color, pos, radius, width=0) -> Rect\n\n\t# for drawing a progress bar\n\tdef draw_growing_rectangle(self, pInt):\n\t\tpoints = (20,20,50+pInt, 30)\n\t\t# TODO: make this grow automatically\n\t\tpygame.draw.rect(self.surf_window, self.COLOR_STANDARD_DEVIATION, points, 1)\n\n\tdef draw_standard_dev_line(self, pCoords):\n\t\tpygame.draw.line(self.surf_window, self.COLOR_STANDARD_DEVIATION, pCoords[0], pCoords[1], 1)\n\n\tdef draw_standard_dev_line_vol(self, pCoords):\n\t\tpygame.draw.line(self.surf_window, self.COLOR_STANDARD_DEVIATION_VOL, pCoords[0], pCoords[1], 1)\n\n\tdef draw_mfi(self, pCoords, pIndex):\n\t\t# self.new_x-candlePlusGutterWidth, self.previous_money_flow_y\n\t\t# self.new_x, self.standard_dev_vol_start_y - newMfCalc\n\t\t# priceHigh, priceLow\n\n\t\ttwoCandles = self.candlePlusGutterWidth * 2\n\n\t\t# if self.mfi[pIndex][1][1] < self.mfi[pIndex-1][1][1] and self.mfi[pIndex-1][1][1] > self.mfi[pIndex-2][1][1]: \n\t\t# # we have spiked up and down\n\t\t# \tpygame.draw.line( self.surf_window, pygame.Color(\"red\"), ( self.candlePlusGutterWidth * pIndex,pCoords[2][0]-20 ), ( self.candlePlusGutterWidth * pIndex,pCoords[2][0] + twoCandles) )\n\t\tpygame.draw.line(self.surf_window, pygame.Color(\"gray\"), pCoords[0], pCoords[1], 1)\n\n\tdef draw_sd_mfi(self, pCoords):\n\t\tpygame.draw.line(self.surf_window, pygame.Color(\"gray\"), pCoords[0], pCoords[1], 1)\n\n\tdef init_dataset(self):\t\n\t\tcsvfile = open(self.dataset_file, 'r')\n\t\tlines = csvfile.readlines() \n\t\t\n\t\trowCount = 0\n\t\tfor row in lines:\n\t\t\trowCount += 1\n\t\t\n\t\ttmpDataSet = []\n\n\t\t# this reverse orders the orig data so we can paint from left to right with it\n\t\tstartIndex = rowCount - self.offset_index - 315\n\n\t\tfor i in range( startIndex, rowCount - self.offset_index ):\n\t\t\ttmpDataSet.append(lines[i])\n\t\t\n\t\tself.dataset = tmpDataSet\n\n\t\ttmpList = []\n\t\ttmpVolList = []\n\t\ttmpCount = 0\n\t\ttmpMFI = []\n\n\t\tfor row in self.dataset:\n\t\t\t# if tmpCount > 0:\t\t\t\n\t\t\t# this is to determine the min/max\n\t\t\t# tmpTruncatedRow = row[1:4] # works for dukascopy\n\t\t\trowList = row.split(\",\")\n\t\t\t# self.print_debug(rowList)\n\t\t\ttmpTruncatedRow = rowList[2:6] # works for metatrader\t\t\t\t\t\n\n\t\t\tif tmpTruncatedRow != []:\n\t\t\t\ttmpList.append( max(tmpTruncatedRow) )\n\t\t\t\ttmpList.append( min(tmpTruncatedRow) )\n\n\t\t\ttmpTruncatedRow = rowList[6:7]\n\t\t\tif tmpTruncatedRow != []:\n\t\t\t\ttmpVolList.append( float( tmpTruncatedRow[0].strip() ) )\n\n\t\tself.DATASET_LOWEST = int( round( float( min(tmpList)) ) ) -1\n\t\tself.DATASET_HIGHEST = int( round( float( max(tmpList)) ) ) +1\n\n\t\tself.DATASET_VOLUME_LOWEST = int( round( float( min(tmpVolList) * self.DATASET_LOWEST ) ) ) -1\n\t\tself.DATASET_VOLUME_HIGHEST = int( round( float( max(tmpVolList) * self.DATASET_HIGHEST ) ) ) +1\n\n\t\tself.DATASET_MFI_HIGHEST = 100 #self.DATASET_HIGHEST * self.DATASET_VOLUME_HIGHEST\n\t\tself.DATASET_MFI_LOWEST = 0 #self.DATASET_LOWEST * self.DATASET_VOLUME_LOWEST\n\n\t\t# firstRowRead = 0\n\t\tfor row in self.dataset:\n\t\t\tself.paint_candle(row) # returns 0 if row volume is empty\n\t\t\tself.candleIndex += 1\n\n\t\tself.print_verbose( str(self.candleIndex) + \" records in data set\" )\n\n\t\tslashLocation = self.dataset_file.rfind('/') \n\t\tdirectory = self.dataset_file[slashLocation+1:]\n\t\tself.truncated_dataset_file_name = directory[:-4] #trim off the '.csv'\n\t\tself.print_verbose( self.truncated_dataset_file_name )\n\n\tdef game_start(self):\n\t\t\n\t\tself.world = ep_world_create()\n\t\tep_world_set_sleeping(self.world, True, 30, 0, 0.002, 0.0001)\n\t\tep_world_set_settings(self.world, 1.0 / 4.0, 20, 10, 0.1, 0.5, 0, 0.5, 1)\n\n\t\tself.init_dataset()\n\n\t\tself.mouseParticleId = self.get_static_body_id()\n\t\tself.MOUSE_HINGE_JOINT = -1.0\n\t\tparticlePosition_X = 10\n\n\t\t# HOW TO SET THE FRICTIONS...\n\t\t# ep_shape_set_material(global.world,body,shape1,0.5,0.4,0,0);\n\t\t# 0.5: coefficient of restitution.\n\t\t# 0.4: friction.\n\t\t# 0: normal surface velocity.\n\t\t# 0: tangential surface velocity.\n\n\t\t# physics boundaries of the stage, AKA the Control Volume Tank.\n\t\t# MAKE FLOOR\n\t\ttmpW = self.WINDOW_WIDTH - self.CONTAINER_WALLS_WIDTH\n\t\ttmpH = self.CONTAINER_WALLS_WIDTH\n\t\ttmpX = self.WINDOW_WIDTH / 2\n\t\ttmpY = self.WINDOW_HEIGHT - self.CONTAINER_WALLS_WIDTH\n\n\t\t# ep_shape_create_box(world_id, body_id, w, h, x, y, rot, density)\n\t\ttmpBodyId = self.get_static_body_id()\n\t\tself.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])\n\t\tshape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)\n\t\tep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)\n\t\tep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)\n\n\t\t# LEFT WALL\n\t\ttmpW = self.CONTAINER_WALLS_WIDTH\n\t\ttmpH = self.WINDOW_HEIGHT - self.CONTAINER_WALLS_WIDTH\n\t\ttmpX = 0\n\t\ttmpY = self.WINDOW_HEIGHT / 2\n\n\t\ttmpBodyId = self.get_static_body_id()\n\t\tself.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])\n\t\tshape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)\n\t\tep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)\n\t\tep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)\n\n\t\t# RIGHT WALL\n\t\ttmpW = self.CONTAINER_WALLS_WIDTH\n\t\ttmpH = self.WINDOW_HEIGHT - self.CONTAINER_WALLS_WIDTH\n\t\ttmpX = self.WINDOW_WIDTH - self.CONTAINER_WALLS_WIDTH\n\t\ttmpY = self.WINDOW_HEIGHT / 2\n\n\t\ttmpBodyId = self.get_static_body_id()\n\t\tself.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])\n\t\tshape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)\n\t\tep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)\n\t\tep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)\n\n\t\t# MAKE CEILING\n\t\ttmpW = self.WINDOW_WIDTH - self.CONTAINER_WALLS_WIDTH\n\t\ttmpH = self.CONTAINER_WALLS_WIDTH\n\t\ttmpX = self.WINDOW_WIDTH / 2\n\t\ttmpY = self.CONTAINER_WALLS_WIDTH\n\n\t\ttmpBodyId = self.get_static_body_id()\n\t\tself.edge_boxes.append([tmpW, tmpH, tmpX, tmpY, math.radians(0)])\n\t\tshape = ep_shape_create_box(self.world, tmpBodyId, tmpW, tmpH, tmpX, tmpY, math.radians(0), 1)\n\t\tep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)\n\t\tep_shape_set_material(self.world, tmpBodyId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)\n\n\t\t# GENERATE PARTICLES\n\t\tparticleCount = 0\n\t\tfor i in range(0, self.particles_birth_count):\n\n\t\t\t# HEAVY PARTICLES\n\t\t\ttmpId = self.get_dynamic_body_id()\n\t\t\tshape = self.get_particle_shape(tmpId)\n\t\t\tep_shape_set_collision(self.world, tmpId, shape, 1, 1, 0)\n\t\t\tep_shape_set_material(self.world, tmpId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)\n\t\t\tep_body_calculate_mass(self.world, tmpId)\n\n\t\t\tif particlePosition_X >= self.WINDOW_WIDTH:\n\t\t\t\tparticlePosition_X = 0\n\t\t\telse:\n\t\t\t\tparticlePosition_X += 1\n\t\t\ttmpRadian = random.randrange(0,57)\n\t\t\tep_body_set_position(self.world, tmpId, particlePosition_X, 10, math.radians(tmpRadian))\n\t\t\tep_body_set_gravity(self.world, tmpId, 0, 1.0)\n\t\t\tself.heavy_particles.append(tmpId)\n\t\t\tparticleCount += 1\n\n\t\t\t# LIGHTWEIGHT PARTICLES\n\t\t\ttmpId = self.get_dynamic_body_id()\n\t\t\tshape = self.get_particle_shape(tmpId) \n\t\t\tep_shape_set_collision(self.world, tmpId, shape, 1, 1, 0)\n\t\t\tep_shape_set_material(self.world, tmpId, shape, self.COEFFICIENT_RESTITUTION, self.FRICTION, 0, 0)\n\t\t\tep_body_calculate_mass(self.world, tmpId)\n\t\t\ttmpRadian = random.randrange(0,57)\n\t\t\tep_body_set_position(self.world, tmpId, particlePosition_X, self.WINDOW_HEIGHT-10, math.radians(tmpRadian))\n\t\t\tep_body_set_gravity(self.world, tmpId, 0, -1.0)\n\t\t\tself.light_particles.append(tmpId)\n\t\t\tparticleCount += 1\n\n\tdef get_particle_shape(self, tmpId):\n\t\t# ep_shape_create_circle method API...\n\t\t# shape1 = ep_shape_create_circle(global.world,body,32,0,0,0,1);\n\t\t# 32: the radius of the circle.\n\t\t# 0,0,0: the relative coordinates of the shape (x,y,rot).\n\t\t# 1: the density of the circle (not used for static bodies).\n\n\t\tif self.PARTICLE_SHAPE_MODE == \"CIRCLE\":\n\t\t\treturn ep_shape_create_circle(self.world, tmpId, self.PARTICLE_DIAMETER,0,0,0,1);\n\t\telse: #default square\n\t\t\treturn ep_shape_create_box(self.world, tmpId, self.PARTICLE_DIAMETER, self.PARTICLE_DIAMETER, 0, 0, 0, 1)\n\n\tdef paint_candle(self, pRow):\n\n\t\tif self.new_x >= self.PAINTABLE_LIMIT: # no matter the record count, limit candles to window width\n\t\t\treturn 0\n\n\t\tif pRow == []:\n\t\t\treturn 0\n\n\t\ttimestamp = pRow[0][0]\n\t\tself.print_debug(timestamp)\n\t\t\n\t\t# for dukascopy the rows are 1 thru 4\n\t\t# for metatrader it's 2 through 5\n\t\tpriceOpen = self.interpolate(float(pRow.split(\",\")[2]))\n\t\tpriceHigh = self.interpolate(float(pRow.split(\",\")[3]))\n\t\tpriceLow = self.interpolate(float(pRow.split(\",\")[4]))\n\t\tpriceClose = self.interpolate(float(pRow.split(\",\")[5]))\n\t\tvolume = self.interpolate_volume(float(pRow.split(\",\")[6]))\n\n\t\t'''\n\t\texperiment: use open/close rather than high low\n\t\tinitial result seems to be high/low is more accurate\n\t\tif priceOpen > priceClose:\n\t\t\tpriceHigh = priceOpen\n\t\t\tpriceLow = priceClose\n\t\telse:\n\t\t\tpriceHigh = priceClose\n\t\t\tpriceLow = priceOpen\n\n\t\tif priceOpen < priceClose:\n\t\t\tpriceLow = priceOpen\n\t\t\tpriceHigh = priceClose\n\t\telse:\n\t\t\tpriceHigh = priceOpen\n\t\t\tpriceLow = priceClose\n\t\t'''\n\n\t\tif self.DATASET_HIGHEST == priceHigh:\n\t\t\tself.DATASET_HIGHEST_INDEX = self.candleIndex\n\n\t\tif self.DATASET_LOWEST == priceLow:\n\t\t\tself.DATASET_LOWEST_INDEX = self.candleIndex\n\t\t\n\t\t# PRICE STANDARD DEVIATION\n\t\tsdSet = self.get_last_n_prices(self.candleIndex)\n\t\tstandardDev = sdef.getStandardDeviation(sdSet).real\n\t\tstandardDev *= (math.pow( math.pi*self.get_phi(), 4) )\n\t\t\n\t\tself.standard_dev_list.append([[self.new_x-self.candlePlusGutterWidth, self.previous_sdev_y], [self.new_x, self.standard_dev_start_y-standardDev]])\n\t\tself.previous_sdev_y = self.standard_dev_start_y-standardDev\t\t\t\n\n\t\t# VOLUME SD\n\t\tsdSetVol = self.get_last_n_volumes(self.candleIndex)\n\t\tstandardDevVol = sdef.getStandardDeviation(sdSetVol).real\n\t\tstandardDevVol *= (math.pow( math.pi*self.get_phi(), 2.5) )\n\n\t\tself.standard_dev_list_vol.append([[self.new_x-self.candlePlusGutterWidth, self.previous_sdev_vol_y], [self.new_x, self.standard_dev_vol_start_y-standardDevVol]])\n\n\t\t# MONEY FLOW INDEX \t\t\n\t\tpositive_money_flow = 0 \n\t\tnegative_money_flow = 0 \n\n\t\thighPriceSet = self.get_last_n_high_prices(self.candleIndex)\n\t\tlowPriceSet = self.get_last_n_low_prices(self.candleIndex)\n\n\t\t# sdSet is a present to past ordered list\n\t\t# so we need to loop it in reverse\n\n\t\t# this example uses high and low\n\t\t# magicNumber = 1/137\n\t\t# for i, k in reversed( list(enumerate(sdSet)) ):\n\t\t# \tif i > 0:\n\t\t# \t\tif highPriceSet[i] > highPriceSet[i-1]:\n\t\t# \t\t\tpositive_money_flow += highPriceSet[i] * sdSetVol[i] # * (1+magicNumber)\n\n\t\t# \t\tif lowPriceSet[i] < lowPriceSet[i-1]:\n\t\t# \t\t\tnegative_money_flow += lowPriceSet[i] * sdSetVol[i] # * (1+magicNumber)\n\n\t\tfor i, k in reversed( list(enumerate(sdSet)) ):\n\t\t\tif i > 0:\n\t\t\t\tif highPriceSet[i] > highPriceSet[i-1]:\n\t\t\t\t\tpositive_money_flow += highPriceSet[i] * sdSetVol[i]\n\n\t\t\t\tif lowPriceSet[i] < lowPriceSet[i-1]:\n\t\t\t\t\tnegative_money_flow += lowPriceSet[i] * sdSetVol[i]\n\n\t\tmoney_flow_index = 100 * ( positive_money_flow / ( positive_money_flow + negative_money_flow) )\n\t\tnewMfCalc = self.interpolate_mfi( money_flow_index )\n\n\t\t# RAW MFI\n\t\tself.mfi.append( [[self.new_x-self.candlePlusGutterWidth, self.previous_money_flow_y], [self.new_x, self.standard_dev_vol_start_y - newMfCalc], [priceHigh, priceLow]] )\n\t\tself.previous_money_flow_y = self.standard_dev_vol_start_y - newMfCalc\n\t\t\n\t\t# SD MFI\n\t\tmfiSDAdjust = self.WINDOW_HEIGHT + 150\n\t\tself.mfi_calc.append( newMfCalc )\n\t\tif (self.candleIndex >= self.sigma_period):\n\t\t\tsdSetMFI = self.mfi_calc[ -self.sigma_period:-1 ]\n\t\t\tself.standardDevMFI = sdef.getStandardDeviation(sdSetMFI).real\n\t\t\tself.standardDevMFI *= (math.pow( math.pi*self.get_phi(), (2.97)) )\n\n\t\tself.mfi_standard_dev.append( [[self.new_x-self.candlePlusGutterWidth, self.previous_sd_mfi_y], [self.new_x, mfiSDAdjust - self.standardDevMFI]] )\n\t\tself.previous_sd_mfi_y = mfiSDAdjust - self.standardDevMFI\n\n\t\t# VOLUME SD\n\t\tself.previous_sdev_vol_y = self.standard_dev_vol_start_y - standardDevVol\n\n\t\t# experimental, use to filter out zero volume periods\n\t\t# if volume == 0:\n\t\t# \treturn 0\n\n\t\tcandleHeight = 0\n\t\t\n\t\t# DETERMINE CANDLE PRICE HEIGHT\n\t\tcandleHeight = priceHigh - priceLow\n\t\tnewY = ((candleHeight/2)) + priceLow\n\t\tcandleHeight = abs(candleHeight)\n\n\t\ttmpBodyId = self.get_static_body_id()\n\t\tself.edge_boxes.append([self.CANDLESTICK_WIDTH, candleHeight, self.new_x, newY, math.radians(0)])\n\t\tshape = ep_shape_create_box(self.world, tmpBodyId, self.CANDLESTICK_WIDTH, candleHeight, self.new_x, newY, math.radians(0), 1)\n\t\t\n\t\t# self.price_high = priceHigh + candleHeight/2\n\t\t# self.price_low = newY\n\n\t\tep_shape_set_collision(self.world, tmpBodyId, shape, 1, 1, 0)\n\t\ttmpCoef = 2\n\t\ttmpFric = 1\n\t\tep_shape_set_material(self.world, tmpBodyId, shape, tmpCoef, tmpFric, 0, 0)\n\n\t\t# advance the x\n\t\tself.new_x += self.candlePlusGutterWidth\n\n\t\treturn 1\n\n\tdef get_x_location_of_candle(self, pIndex):\n\t\ttmpAdd = self.new_x_default_value\n\t\tfor i in range(0, pIndex):\n\t\t\ttmpAdd += (self.CANDLESTICK_WIDTH + self.CANDLE_GUTTER)\n\t\treturn tmpAdd\n\n\tdef get_last_n_prices(self, pIndex):\n\t\ttmpList = []\n\t\treturnList = []\n\t\tdsSubset = []\n\t\tlookback = self.sigma_period\n\n\t\tdsSubset.append( self.dataset[pIndex] )\n\t\ttry:\n\t\t\tfor i in range(1, lookback):\n\t\t\t\tdsSubset.append( self.dataset[pIndex-i] )\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tpass\n\n\t\tfor i in range(0, len(dsSubset)):\n\t\t\t# priceOpen = float(dsSubset[i].split(\",\")[2])\n\t\t\t# priceHigh = float(dsSubset[i].split(\",\")[3])\n\t\t\t# priceLow = float(dsSubset[i].split(\",\")[4])\n\t\t\tpriceClose = float(dsSubset[i].split(\",\")[5])\n\t\t\t# tmpList.append(priceOpen)\n\t\t\t# tmpList.append(priceHigh)\n\t\t\t# tmpList.append(priceLow)\n\t\t\ttmpList.append(priceClose) # note: just using the close makes for a bit spikier, low notches are more defined\n\n\t\treturn tmpList\n\n\tdef get_last_n_high_prices(self, pIndex):\n\t\ttmpList = []\n\t\treturnList = []\n\t\tdsSubset = []\n\t\tlookback = self.sigma_period\n\n\t\tdsSubset.append( self.dataset[pIndex] )\n\t\ttry:\n\t\t\tfor i in range(1, lookback):\n\t\t\t\tdsSubset.append( self.dataset[pIndex-i] )\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tpass\n\n\t\tfor i in range(0, len(dsSubset)):\n\t\t\t# priceOpen = float(dsSubset[i].split(\",\")[2])\n\t\t\tpriceHigh = float(dsSubset[i].split(\",\")[3])\n\t\t\t# priceLow = float(dsSubset[i].split(\",\")[4])\n\t\t\t# priceClose = float(dsSubset[i].split(\",\")[5])\n\t\t\t# tmpList.append(priceOpen)\n\t\t\ttmpList.append(priceHigh)\n\t\t\t# tmpList.append(priceLow)\n\t\t\t# tmpList.append(priceClose)\n\n\t\treturn tmpList\n\n\tdef get_last_n_low_prices(self, pIndex):\n\t\ttmpList = []\n\t\treturnList = []\n\t\tdsSubset = []\n\t\tlookback = self.sigma_period\n\n\t\tdsSubset.append( self.dataset[pIndex] )\n\t\ttry:\n\t\t\tfor i in range(1, lookback):\n\t\t\t\tdsSubset.append( self.dataset[pIndex-i] )\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tpass\n\n\t\tfor i in range(0, len(dsSubset)):\n\t\t\t# priceOpen = float(dsSubset[i].split(\",\")[2])\n\t\t\t# priceHigh = float(dsSubset[i].split(\",\")[3])\n\t\t\tpriceLow = float(dsSubset[i].split(\",\")[4])\n\t\t\t# priceClose = float(dsSubset[i].split(\",\")[5])\n\t\t\t# tmpList.append(priceOpen)\n\t\t\t# tmpList.append(priceHigh)\n\t\t\ttmpList.append(priceLow)\n\t\t\t# tmpList.append(priceClose)\n\n\t\treturn tmpList\n\n\tdef get_last_n_volumes(self, pIndex):\n\t\ttmpList = []\n\t\treturnList = []\n\t\tdsSubset = []\n\t\tlookback = self.sigma_period\n\n\t\tdsSubset.append( self.dataset[pIndex] )\n\t\ttry:\n\t\t\tfor i in range(1, lookback):\n\t\t\t\tdsSubset.append( self.dataset[pIndex-i] )\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tpass\n\n\t\tfor i in range(0, len(dsSubset)):\n\t\t\t# priceOpen = float(dsSubset[i].split(\",\")[2])\n\t\t\t# priceHigh = float(dsSubset[i].split(\",\")[3])\n\t\t\t# priceLow = float(dsSubset[i].split(\",\")[4])\n\t\t\t# priceClose = float(dsSubset[i].split(\",\")[5])\n\t\t\tvolume = int(dsSubset[i].split(\",\")[6])\n\t\t\t# tmpList.append(priceOpen)\n\t\t\t# tmpList.append(priceHigh)\n\t\t\t# tmpList.append(priceLow)\n\t\t\t# tmpList.append(priceClose)\n\t\t\ttmpList.append(volume)\n\n\t\treturn tmpList\n\n\tdef get_static_body_id(self):\n\t\treturn ep_body_create_static(self.world)\n\n\tdef get_dynamic_body_id(self):\n\t\treturn ep_body_create_dynamic(self.world, False)\n\n\tdef interpolate(self, pVal):\n\t\tnewVal = interp(pVal, [self.DATASET_LOWEST, self.DATASET_HIGHEST ], [self.WINDOW_HEIGHT*self.HEIGHT_SCALING_FACTOR, 0])\n\t\treturn newVal\n\n\tdef interpolate_volume(self, pVal):\n\t\tnewVal = interp(pVal, [self.DATASET_VOLUME_LOWEST, self.DATASET_VOLUME_HIGHEST ], [self.WINDOW_HEIGHT*self.HEIGHT_SCALING_FACTOR, 0])\n\t\treturn newVal\n\n\tdef interpolate_mfi(self, pVal):\n\t\tnewVal = interp(pVal, [self.DATASET_MFI_LOWEST, self.DATASET_MFI_HIGHEST ], [self.WINDOW_HEIGHT, 0])\n\t\treturn newVal\n\n\tdef game_end(self):\t\n\t\tep_world_destroy(self.world)\n\n\tdef game_run(self):\n\t\tself.game_start()\n\n\t\twhile self.run == True:\t\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == \"QUIT\":\n\t\t\t\t\tpygame.quit()\n\t\t\t\t\tsys.exit()\n\t\t\t\telif event.type == \"MOUSEMOTION\":\n\t\t\t\t\tself.mouse_x, self.mouse_y = event.pos\n\t\t\t\telif event.type == \"MOUSEBUTTONDOWN\":\n\t\t\t\t\tself.mouse_x, self.mouse_y = event.pos\n\t\t\t\t\tif ep_world_collision_test_circle(self.world, 0, self.mouse_x, self.mouse_y, 0, 1, 1, 0) > 0:\n\t\t\t\t\t\tb = ep_world_get_collision_body(self.world, 0)\n\t\t\t\t\t\ts = ep_world_get_collision_shape(self.world, 0)\n\t\t\t\t\t\tif not ep_body_is_static(self.world, b):\n\t\t\t\t\t\t\txx = ep_body_coord_world_to_local_x(self.world, b, self.mouse_x, self.mouse_y)\n\t\t\t\t\t\t\tyy = ep_body_coord_world_to_local_y(self.world, b, self.mouse_x, self.mouse_y)\n\t\t\t\t\t\t\tmousehingejoint = ep_hingejoint_create(self.world, b, self.mouseParticleId, xx, yy, 0, 0, 0)\n\t\t\t\t\t\t\tep_hingejoint_set_max_force(self.world, mousehingejoint, 10000)\n\t\t\t\telif event.type == \"MOUSEBUTTONUP\":\n\t\t\t\t\tself.mouse_x, self.mouse_y = event.pos\n\t\t\t\t\tif self.MOUSE_HINGE_JOINT != -1.0:\n\t\t\t\t\t\tep_hingejoint_destroy(self.world, self.MOUSE_HINGE_JOINT)\n\t\t\t\t\t\tself.MOUSE_HINGE_JOINT = -1.0\n\t\t\t\telif event.type == \"KEYDOWN\":\n\t\t\t\t\tif event.key == \"K_ESCAPE\":\n\t\t\t\t\t\tpygame.event.post(pygame.event.Event(QUIT))\n\t\t\t\t\telif event.key == \"K_r\":\n\t\t\t\t\t\tself.game_end()\n\t\t\t\t\t\tself.game_start()\n\t\t\t\n\t\t\tvx = self.mouse_x - ep_body_get_x_center(self.world, self.mouseParticleId)\n\t\t\tvy = self.mouse_y - ep_body_get_y_center(self.world, self.mouseParticleId)\n\t\t\tif self.MOUSE_HINGE_JOINT != -1.0:\n\t\t\t\td = math.sqrt(vx * vx + vy * vy)\n\t\t\t\tif d > 10:\n\t\t\t\t\tvx *= 10 / d\n\t\t\t\t\tvy *= 10 / d\n\t\t\tep_body_set_velocity_center(self.world, self.mouseParticleId, vx, vy, 0)\n\t\t\t\n\t\t\tfor i in range(4):\n\t\t\t\tep_world_update_contacts(self.world)\n\t\t\t\tep_world_simulate_step(self.world)\n\t\t\t\n\t\t\tself.surf_window.lock()\n\t\t\tself.surf_window.fill(pygame.Color(0, 0, 0))\n\t\t\t\n\t\t\tfor b in self.edge_boxes:\n\t\t\t\tself.draw_box(b[2], b[3], b[0], b[1], b[4], self.color_static)\n\t\t\t\n\t\t\tfor b in self.heavy_particles:\n\t\t\t\tself.draw_box(ep_body_get_x(self.world, b), \\\n\t\t\t\t\tep_body_get_y(self.world, b), self.PARTICLE_DIAMETER, self.PARTICLE_DIAMETER, ep_body_get_rot(self.world, b), \\\n\t\t\t\t\tself.COLOR_HEAVY_PARTICLES)\n\t\t\t\n\t\t\tfor b in self.light_particles:\n\t\t\t\tself.draw_box(ep_body_get_x(self.world, b), \\\n\t\t\t\t\tep_body_get_y(self.world, b), self.PARTICLE_DIAMETER, self.PARTICLE_DIAMETER, ep_body_get_rot(self.world, b), \\\n\t\t\t\t\tself.COLOR_LIGHT_PARTICLES)\n\t\t\t\n\t\t\tfor b in self.candlestick_boxes:\n\t\t\t\tself.draw_box(b[2], b[3], b[0], b[1], b[4], self.color_static)\n\t\t\t\t\t\t\n\t\t\tfor b in self.standard_dev_list:\n\t\t\t\tself.draw_standard_dev_line(b)\n\n\t\t\tfor b in self.standard_dev_list_vol:\n\t\t\t\tself.draw_standard_dev_line_vol(b)\n\n\t\t\tfor b in self.mfi:\n\t\t\t\tif self.show_MFI == True:\n\t\t\t\t\ttmpIndex = self.mfi.index(b)\n\t\t\t\t\tself.draw_mfi(b, tmpIndex)\n\n\t\t\tfor b in self.mfi_standard_dev:\t\n\t\t\t\tif self.show_MFI == True:\n\t\t\t\t\tself.draw_sd_mfi(b)\n\n\t\t\tpygame.display.set_caption(self.truncated_dataset_file_name + \" ||| \" + str( self.offset_index ) + \" steps back \" )\n\n\t\t\tself.surf_window.unlock()\n\t\t\t\n\t\t\tself.display_text_large(self.truncated_dataset_file_name, 10, 695, pygame.Color(255, 255, 255))\n\t\t\t\t\t\t\n\t\t\t# chart labels\n\t\t\t# text = \"----\" + str(self.DATASET_HIGHEST)\n\t\t\t# self.displayText(text, self.interpolate(self.DATASET_HIGHEST + 2), self.get_x_location_of_candle(self.DATASET_HIGHEST_INDEX),\\\n\t\t\t# \tpygame.Color(255, 255, 0))\n\n\t\t\tpygame.display.update()\n\t\t\tself.fpsclock.tick(self.FRAME_RATE)\n\n\t\t\tself.index_counter += 1\n\n\t\t\t# make another frame for the animation\n\t\t\tif self.save_sequences == True:\n\n\t\t\t\tif not os.path.exists(self.render_frames_directory + self.truncated_dataset_file_name):\n\t\t\t\t\tos.makedirs(self.render_frames_directory + self.truncated_dataset_file_name)\n\n\t\t\t\ttmpDir = self.render_frames_directory + self.truncated_dataset_file_name + \"/\" + self.code_name + \"_\" + self.number_formatter( self.index_counter )\n\n\t\t\t\tpygame.image.save(self.surf_window, tmpDir + \".png\")\n\n\t\t\t# make the histogram\n\t\t\tif self.index_counter == self.FRAME_LIMIT:\n\t\t\t\t\n\t\t\t\ttmpFileName = self.render_histogram_directory + self.truncated_dataset_file_name + \".png\"\n\t\t\t\t\t\n\t\t\t\t# make the histogram folder if it's absent\n\t\t\t\tif not os.path.exists(self.render_histogram_directory):\n\t\t\t\t\tos.makedirs(self.render_histogram_directory)\n\n\t\t\t\tself.print_verbose( \"Preparing final frame output to \" + tmpFileName ) \n\t\t\t\tpygame.image.save(self.surf_window, tmpFileName)\n\n\t\t\t\tself.make_histogram( tmpFileName )\n\n\t\t\t\t# Delete the temp file\n\t\t\t\tos.system( \"rm \" + tmpFileName )\n\n\t\t\t\tself.make_video_from_sequence()\n\n\t\t\t\tself.run = False\n\n\t\tself.game_end()\n\n\tdef make_video_from_sequence(self):\n\t\ttmpDir = self.render_frames_directory + self.truncated_dataset_file_name + \"/\"\n\n\t\tfiles = sorted( glob.glob( tmpDir + '*.png') )\n\n\t\tif len( files ) == 0:\n\t\t\tprint(\"nothing to convert in \" + tmpDir)\n\t\t\treturn\n\n\t\t# arg = \"ffmpeg -framerate 30 -pattern_type glob -i '\" + tmpDir + \"*.png' -c:v libx264 -pix_fmt yuv420p -crf 23 -y \" + self.render_frames_directory + \"/\" + self.truncated_dataset_file_name + \".mp4\"\n\t\t# os.system( arg )\n\n\t\t# make an AVI so we can convert into GIF\n\t\targ = \"ffmpeg -framerate 30 -pattern_type glob -i '\" + tmpDir + \"*.png' -c:v ffv1 -y \" + self.render_frames_directory + \"/temp.avi\"\n\t\tos.system( arg )\n\n\t\targ = \"ffmpeg -i \" + self.render_frames_directory + \"/temp.avi -pix_fmt rgb8 -y \" + self.render_frames_directory + \"/\" + \\\n\t\tself.truncated_dataset_file_name + \"_\" + self.number_formatter(self.offset_index) + \"_sig\" + str( self.sigma_period ) + \".gif\"\n\t\tos.system( arg )\n\t\tos.system( \"rm \" + self.render_frames_directory + \"temp.avi\" )\n\n\t\t# delete all PNGs from this location when done.\n\t\tshutil.rmtree(tmpDir)\n\n\tdef number_formatter(self, pNum):\n\t\treturn \"%03d\" % (pNum,)\n\n\tdef displayText(self, pTxt, pPosLeft, pPosRight, pColor):\n\t\tsurf_text = self.font.render(pTxt, False, pColor)\n\t\trect = surf_text.get_rect()\n\t\trect.topleft = (pPosLeft, pPosRight)\n\t\tself.surf_window.blit(surf_text, rect)\t\n\n\tdef display_text_large(self, pTxt, pPosLeft, pPosRight, pColor):\n\t\tsurf_text = self.font_large.render(pTxt, False, pColor)\n\t\trect = surf_text.get_rect()\n\t\trect.topleft = (pPosLeft, pPosRight)\n\t\tself.surf_window.blit(surf_text, rect)\t\n\n\tdef special_number(self):\n\t\treturn math.pi\n\t\treturn ((1+5 ** 0.5) / 2) * pow(math.pi, 4) \n\n\tdef get_phi(self):\n\t\treturn ((1+5 ** 0.5) / 2)\n\n\tdef make_histogram(self, pImg):\n\t\timg = Image.open(pImg)\n\t\timg_bbox = img.getbbox()\n\t\tself.draw = ImageDraw.Draw(img)\n\t\tpixels = img.load()\n\t\timbalanceRatioArray = []\n\t\toffsetY = 80\n\n\t\tfor xx in range( img.size[0] ):\n\n\t\t\theavyParticleCounter = 0.0\n\t\t\tlightParticleCounter = 0.0\n\t\t\ttmpOffset = 12\n\n\t\t\tfor yy in range(tmpOffset, img.size[1] - tmpOffset): # filter out particle detritus from the histogram data\n\n\t\t\t\tif pixels[ xx, yy ] == (0, 146, 255):\n\t\t\t\t\theavyParticleCounter += 1.0\n\t\t\t\telif pixels[ xx, yy ] == (255, 0, 255):\n\t\t\t\t\tlightParticleCounter += 1.0\n\n\t\t\timbalanceRatio1 = (heavyParticleCounter+1.0)/(lightParticleCounter+1.0)\n\t\t\timbalanceRatio2 = (lightParticleCounter+1.0)/(heavyParticleCounter+1.0)\n\t\t\timbalanceRatioArray.append( [-imbalanceRatio1, imbalanceRatio2] )\n\n\t\ttmpParticleFlowIndex = [] # experimental\n\n\t\t# Draw histogram at the top of the chart\n\t\tif self.show_histogram_ratio == True:\n\t\t\tfor r in range(0, len(imbalanceRatioArray)):\n\t\t\t\tself.draw.line(( r-1, 100+imbalanceRatioArray[r-1][0]*self.special_number(), r, 100+imbalanceRatioArray[r][0]*self.special_number()), \\\n\t\t\t\t\tfill=(self.COLOR_HISTOGRAM_UP), width=1 )\n\t\t\t\tself.draw.line(( r-1, 100+imbalanceRatioArray[r-1][1]*self.special_number(), r, 100+imbalanceRatioArray[r][1]*self.special_number()), \\\n\t\t\t\t\tfill=(self.COLOR_HISTOGRAM_DOWN), width=1 )\n\n\t\t\t\t# experimental\n\t\t\t\t# particle_flow_index = 100 / ( (imbalanceRatioArray[r][0]+1) + (imbalanceRatioArray[r][1]+1) )\n\t\t\t\t# tmpParticleFlowIndex.append( particle_flow_index )\n\t\t\t\t# print(particle_flow_index)\n\t\t\t\t# end experimental\n\n\t\t# reducerFactor = 1\n\t\t# for r in range(0, len( tmpParticleFlowIndex ) ):\n\t\t# \tself.draw.line(( r-1, tmpParticleFlowIndex[r-1]*reducerFactor, r, tmpParticleFlowIndex[r]*reducerFactor), fill=(self.COLOR_HISTOGRAM_UP), width=2 )\n\n\t\t# ---------------------------------------------------------------------------\t\t\n\t\t# Draw a simple average of the ratio - this section draws for the blue side\n\n\t\t# note: we are doing the averaging even if we don't show it, \n\t\t# this is because we need the average to perform other work later on\n\n\t\ttmpAvg1 = []\t\t\n\t\tfor r in range(0, len(imbalanceRatioArray)): \n\t\t\ttmpAvg = 0\n\t\t\ttmpthing = 0 \n\t\t\tfor f in range(0, self.histogram_simple_average_period):\n\t\t\t\ttmpthing += imbalanceRatioArray[r-f][0]\n\t\t\ttmpAvg = tmpthing/self.histogram_simple_average_period\n\t\t\ttmpAvg1.append(tmpAvg)\n\n\t\tif self.show_histogram_simple_average == True:\n\t\t\tfor r in range(0, len( tmpAvg1 ) ):\n\t\t\t\tself.draw.line(( r-1, offsetY+tmpAvg1[r-1]*self.special_number(), r, offsetY+tmpAvg1[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_UP), width=1 )\n\t\t\n\t\t# Draw a simple average of the ratio - this section draws for the pink side\n\t\ttmpAvg1 = []\n\t\tfor r in range(0, len(imbalanceRatioArray)): \n\t\t\ttmpAvg = 0\n\t\t\ttmpthing = 0 \n\t\t\tfor f in range(0, self.histogram_simple_average_period):\n\t\t\t\ttmpthing += imbalanceRatioArray[r-f][1]\n\t\t\ttmpAvg = tmpthing/self.histogram_simple_average_period\n\t\t\ttmpAvg1.append(tmpAvg)\n\n\t\tif self.show_histogram_simple_average == True:\n\t\t\tfor r in range(0, len( tmpAvg1 ) ):\n\t\t\t\tself.draw.line(( r-1, offsetY+tmpAvg1[r-1]*self.special_number(), r, offsetY+tmpAvg1[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_DOWN), width=1 )\n\n\t\tif self.highlight_sigma == True:\n\n\t\t\t# DRAW VERTICAL LINE AT POINT OF LOWEST STANDARD DEV\n\t\t\t# find the low points in the standard dev\n\t\t\t# put all the Y values of the standard deviation in a separate list\n\t\t\t# an entry in the list looks like [[0, 900], [10, 639.1957450511611]]\n\t\t\t# we want to look a the second nested list, and only the Y component\n\t\t\t# the higher this number is, the lower it occurs on the chart, i.e. the lowest standard dev value\n\t\t\ttmpList = []\n\t\t\tfor index in range(0, len(self.standard_dev_list)):\n\t\t\t\ttmpList.append( self.standard_dev_list[index][1][1] )\n\n\t\t\t# this works fine for the lowest, but only one result \n\t\t\t# tmpX = self.standard_dev_list[tmpList.index( max(tmpList) )][1][0]\n\t\t\t# tmpY = max(tmpList) # returns what represents the lowest standard dev value\n\t\t\t# # print(tmpX, tmpY)\n\t\t\t# self.draw.line(( tmpX, 0, tmpX, tmpY ), fill=(self.COLOR_ENTRY_SIGNAL), width=1 )\n\n\t\t\t# ----- TEST AREA -----------------------------------------------------------------------\t\n\t\t\t# TODO: determine if we can be smarter about how many lines to show per sigma low\n\n\t\t\tlargest = heapq.nlargest(self.sigma_sort_low, enumerate(tmpList), key=lambda x: x[1])\n\n\t\t\tfor item in largest:\n\t\t\t\t# self.print_debug( item )\n\n\t\t\t\ttmpX = self.standard_dev_list[item[0]][1][0]\n\t\t\t\ttmpY = item[1]\n\n\t\t\t\tbuyers = abs( imbalanceRatioArray[ self.get_x_location_of_candle( item[0] ) ][0] )\n\t\t\t\tsellers = abs( imbalanceRatioArray[ self.get_x_location_of_candle( item[0] ) ][1] )\n\t\t\t\ttmpYIndicatorStart = self.standard_dev_list_vol[ item[0] ][0][1]\n\t\t\t\tif ( buyers > sellers):\n\t\t\t\t\tself.draw.line(( tmpX, tmpYIndicatorStart, tmpX, tmpY ), fill=(self.COLOR_ENTRY_SIGNAL), width=1 )\n\t\t\t\telif ( sellers > buyers):\n\t\t\t\t\tself.draw.line(( tmpX, tmpYIndicatorStart, tmpX, tmpY ), fill=( (255,0,0) ), width=1 )\t\n\n\t\t\t\t# orig\n\t\t\t\t# self.draw.line(( tmpX, 150, tmpX, tmpY ), fill=(self.COLOR_ENTRY_SIGNAL), width=1 )\n\n\n\t\t\t\t# test area\n\t\t\t\t# new idea: \n\t\t\t\t# while we are looping the low sigmas, \n\t\t\t\t# get the histogram average based on which index we're on\n\t\t\t\t# self.print_debug( imbalanceRatioArray[ self.get_x_location_of_candle( item[0] ) ][0] )\n\n\t\t\t\t# compare this with what the volume is doing\n\n\t\t\t\t# self.print_debug( self.standard_dev_list_vol[ item[0] ] ) \n\n\t\t\t# ----------------------------------------------------------------------------------------\n\t\t# print(\"-------------------\")\n\t\t# print(len(imbalanceRatioArray))\t\t\n\t\t# print(len(self.standard_dev_list))\n\n\t\tif self.show_histogram_standard_dev == True:\n\t\t\t# Draw a standard deviation line based on the particle counts\n\t\t\t# histogram up - blue\n\t\t\tsdevParticles = []\n\t\t\tsigmaLookbackParticleCount = self.histogram_standard_dev_period\n\t\t\tsdevParticlesAdjust = 2\n\t\t\toffsetY = 125\n\n\t\t\tfor r in range(0, len(imbalanceRatioArray)): \n\t\t\t\ttopParticlesSet = []\n\t\t\t\tfor f in range(0, sigmaLookbackParticleCount):\n\t\t\t\t\ttopParticlesSet.append( imbalanceRatioArray[r-f][0] )\t\t\t\n\t\t\t\tstandardDev = sdef.getStandardDeviation(topParticlesSet).real\n\t\t\t\tstandardDev *= (math.pow( math.pi*self.get_phi(), sdevParticlesAdjust) )\n\t\t\t\tstandardDev *= -1 # negative adjustment to flip the projection\n\t\t\t\tsdevParticles.append( standardDev )\t\n\n\t\t\tfor r in range(0, len( sdevParticles ) ):\n\t\t\t\tself.draw.line(( r-1, offsetY+sdevParticles[r-1]*self.special_number(), r, offsetY+sdevParticles[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_UP), width=1 )\n\n\t\t\t# histogram down - pink\n\t\t\tsdevParticles = []\n\t\t\tfor r in range(0, len(imbalanceRatioArray)): \n\t\t\t\tbottomParticlesSet = []\n\t\t\t\tfor f in range(0, sigmaLookbackParticleCount):\n\t\t\t\t\tbottomParticlesSet.append( imbalanceRatioArray[r-f][1] )\t\t\t\n\t\t\t\tstandardDev = sdef.getStandardDeviation(bottomParticlesSet).real\n\t\t\t\tstandardDev *= (math.pow( math.pi*self.get_phi(), sdevParticlesAdjust) )\n\t\t\t\tstandardDev *= -1 # negative adjustment to flip the projection\n\t\t\t\tsdevParticles.append( standardDev )\n\n\t\t\tfor r in range(0, len( sdevParticles ) ):\n\t\t\t\tself.draw.line(( r-1, offsetY+sdevParticles[r-1]*self.special_number(), r, offsetY+sdevParticles[r]*self.special_number()), fill=(self.COLOR_HISTOGRAM_DOWN), width=1 )\n\n\t\t# Build the histogram directory if it's not there\n\t\tgif_animation_directory = self.render_histogram_directory + self.histogram_animation_directory + \\\n\t\t\t\t\t\t\t\t self.truncated_dataset_file_name + \"_\" + self.number_formatter(self.offset_index) + \"_sig\" + str( self.sigma_period )\n\t\tif not os.path.exists( gif_animation_directory ):\n\t\t\tos.makedirs( gif_animation_directory )\n\n\t\t# TODO: consider putting local timestamp on histogram\n\t\tlocal_current_time = \"\" # TBD\n\n\t\tprint(TextColors.HEADERLEFT + \"░ \" + TextColors.ENDC + TextColors.HEADERLEFT2 + \"░ \" + TextColors.ENDC + TextColors.HEADERLEFT3 + \"░\" + TextColors.ENDC)\n\t\tprint(TextColors.HEADERLEFT3 + \" ░\" + TextColors.ENDC + TextColors.HEADERLEFT + \" ░\" + TextColors.ENDC + TextColors.HEADERLEFT2 + \" ░\" + TextColors.ENDC)\n\n\t\t# Save the histogram\n\t\timg.save(gif_animation_directory + \"/\" + \\\n\t\t\t\t self.truncated_dataset_file_name + \"_\" + \\\n\t\t\t\t # local_current_time + \"_\" + \\\n\t\t\t\t self.number_formatter(self.offset_index) + \"_\" + \\\n\t\t\t\t self.number_formatter(self.permutation_index) + \\\n\t\t\t\t \"_sig\" + str( self.sigma_period ) + \\\n\t\t\t\t \".png\", format='PNG')\n\n\t\t# make a gif from available images\n\t\targ = \"ffmpeg -pattern_type glob -i '\" + gif_animation_directory + \"/*.png' -y \" + gif_animation_directory + \"/temp.avi\"\n\t\tos.system( arg )\n\t\targ = \"ffmpeg -i \" + gif_animation_directory + \"/temp.avi -pix_fmt rgb8 -y \" + gif_animation_directory + \"/\" + \\\n\t\t\t\tself.truncated_dataset_file_name + \"_\" + self.number_formatter(self.offset_index) + \"_sig\" + str( self.sigma_period ) + \".gif\"\n\t\tos.system( arg )\n\t\tos.system( \"rm \" + gif_animation_directory + \"/temp.avi\" )\n\n\t\tself.print_verbose(self.dataset_file + \" simulation done.\")\n\n\t\t# Automatically display the image\n\t\t# img.show()\n\n\tdef set_permutation_name(self, pIterationNumber):\n\t\tself.permutation_name = \\\n\t\tstr(pIterationNumber) + \"_\" + \\\n\t\tstr(self.dataset_file) + \"_\" + \\\n\t\tstr(self.particles_birth_count) + \"_\" + \\\n\t\tstr(self.CANDLESTICK_WIDTH) + \"_\" + \\\n\t\tstr(self.PARTICLE_DIAMETER) + \"_\" + \\\n\t\tstr(self.CANDLE_GUTTER)\n\n\tdef set_particles_birth_count(self, pParticleBirthCount):\n\t\tself.particles_birth_count = pParticleBirthCount\n\n\tdef\tset_candlestick_width(self, pCandlestickWidth):\n\t\tself.CANDLESTICK_WIDTH = pCandlestickWidth\n\n\tdef set_particles_diameter(self, pParticleDiameter):\n\t\tself.PARTICLE_DIAMETER = pParticleDiameter\n\n\tdef set_candle_gutter(self, pCandleGutter):\n\t\tself.CANDLE_GUTTER = pCandleGutter\n\n\tdef print_verbose(self, pMessage):\n\t\tif (self.verbose == True):\n\t\t\tprint(pMessage)\n\n\tdef print_debug(self, pMessage):\n\t\tif (self.debug == True):\n\t\t\tprint(pMessage)\t\t\t\n\t\t\n#--- RUNTIME NOTES --------------------------------------------------------------------\n# This particular flavor uses CSV files containing OHLC data. These files can be static or\n# dynamically updated, provided they adhere to the structure as included in sample CSV.\n# Place or write all CSV files in the directory specified in app.yaml.\napp_yaml = open(\"../config/app.yaml\", \"r\").readlines()\npath_to_csv_files = app_yaml[0].split(\":\")[1] # TODO: make this a little smarter\narbitraryRunLimit = 99 # The number of times to run the simulation\n\nfor r in range(0, arbitraryRunLimit): \n\n\tdataset_list = []\n\tif r == 0: # only strip this the first time\n\t\tpath_to_csv_files = path_to_csv_files.strip() + \"/*.csv\"\n\tfiles = glob.glob(path_to_csv_files) # Get all the CSV files\n\tfiles.sort(key=os.path.getmtime) # Sort the files based on latest\n\t\n\tfor csvfile in reversed(files):\n\t\tdataset_list.append(csvfile) # Add the files to a list\n\n\tfor dataset in dataset_list[:1]: # Loop up to [:N] datasets e.g. [:3]\t\t\n\t\tlookback = 0 # Default is 1. To loop iterations within a dataset use following loop with lookback. e.g., setting this to 60 will use one dataset to create 60 simulations, each one starting a candle earlier. Useful for looking for patterns on old data. Set lookback to 1 when running in a production/trading mode, assuming your CSV file is being updated in real time.\t\n\t\ti = 0\n\t\twhile i <= lookback:\n\n\t\t\tcvt = ControlVolumeTank() # The ControlVolumeTank is the class running the simulation.\n\t\t\tlookback = int(cvt.sample_period_size) # override if this was passed in\n\t\t\tcvt.permutation_index = r\n\n\t\t\tif lookback > 0:\n\t\t\t\tcvt.offset_index = i # Sets an index based on where we are at in the lookback sequence. If lookback is 1 then we aren't running multiple simulations off the same dataset, but fresh ones every time.\n\t\t\tif cvt.offset_index_override != 0:\n\t\t\t\tcvt.offset_index = cvt.offset_index_override - i\n\t\t\t\tprint(\"Beginning at candle \" + str( cvt.offset_index ))\n\t\t\tcvt.dataset_file = dataset\n\t\t\tprint( \"Current OHLC dataset: \" + TextColors.HEADERLEFT2 + TextColors.INVERTED + dataset + TextColors.ENDC)\n\t\t\trandom.seed()\n\t\t\tcvt.set_particles_diameter( 2 )\n\t\t\tcvt.set_candlestick_width( 3 )\n\t\t\tcvt.set_particles_birth_count( particle_birth_count )\n\t\t\tcvt.set_candle_gutter( 1 )\n\t\t\tcvt.game_run()\n\t\t\ti += 1\n" ]
[ [ "numpy.interp" ] ]
vishalbelsare/bbopt
[ "408e210e57b7a2aaf3cfd3a3c225fc2af6b3c56d" ]
[ "bbopt/optimizer.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __coconut_hash__ = 0x3af381c\n\n# Compiled with Coconut version 2.0.0-a_dev9 [How Not to Be Seen]\n\n\"\"\"\nThe main BBopt interface.\n\"\"\"\n\n# Coconut Header: -------------------------------------------------------------\n\nfrom __future__ import print_function, absolute_import, unicode_literals, division\nimport sys as _coconut_sys, os as _coconut_os\n_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.abspath(__file__))\n_coconut_cached_module = _coconut_sys.modules.get(str(\"__coconut__\"))\nif _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir:\n del _coconut_sys.modules[str(\"__coconut__\")]\n_coconut_sys.path.insert(0, _coconut_file_dir)\n_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]\nif _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and \"__init__.py\" in _coconut_os.listdir(_coconut_file_dir):\n _coconut_full_module_name = str(_coconut_module_name + \".__coconut__\")\n import __coconut__ as _coconut__coconut__\n _coconut__coconut__.__name__ = _coconut_full_module_name\n for _coconut_v in vars(_coconut__coconut__).values():\n if getattr(_coconut_v, \"__module__\", None) == str(\"__coconut__\"):\n try:\n _coconut_v.__module__ = _coconut_full_module_name\n except AttributeError:\n _coconut_v_type = type(_coconut_v)\n if getattr(_coconut_v_type, \"__module__\", None) == str(\"__coconut__\"):\n _coconut_v_type.__module__ = _coconut_full_module_name\n _coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__\nfrom __coconut__ import *\nfrom __coconut__ import _coconut_call_set_names, _coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, _coconut, _coconut_MatchError, _coconut_iter_getitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec, _coconut_comma_op\n_coconut_sys.path.pop(0)\n\n# Compiled Coconut: -----------------------------------------------------------\n\n\n\nimport os\nsys = _coconut_sys\nimport json\nif _coconut_sys.version_info < (3,):\n import cPickle as pickle\nelse:\n import pickle\nimport math\nimport itertools\nimport time\nfrom collections import defaultdict\nfrom pprint import pprint\n\nimport numpy as np\n\nfrom bbopt import constants\nfrom bbopt.registry import alg_registry\nfrom bbopt.registry import meta_registry\nfrom bbopt.util import Str\nfrom bbopt.util import norm_path\nfrom bbopt.util import json_serialize\nfrom bbopt.util import best_example\nfrom bbopt.util import sync_file\nfrom bbopt.util import ensure_file\nfrom bbopt.util import clear_file\nfrom bbopt.util import denumpy_all\nfrom bbopt.util import sorted_examples\nfrom bbopt.util import running_best\nfrom bbopt.util import plot\nfrom bbopt.util import open_with_lock\nfrom bbopt.util import printerr\nfrom bbopt.util import convert_match_errors\nfrom bbopt.params import param_processor\nfrom bbopt.backends.util import get_backend\nfrom bbopt.backends.serving import ServingBackend\n\n\n# Utilities:\n\ndef array_param(func, name, shape, kwargs):\n \"\"\"Create a new array parameter for the given name and shape with entries from func.\"\"\"\n if not isinstance(name, Str):\n raise TypeError(\"name must be string, not {_coconut_format_0}\".format(_coconut_format_0=(name)))\n arr = np.zeros(shape)\n for indices in itertools.product(*map(range, shape)):\n index_str = \",\".join(map(str, indices))\n cell_name = \"{_coconut_format_0}[{_coconut_format_1}]\".format(_coconut_format_0=(name), _coconut_format_1=(index_str))\n proc_kwargs = (param_processor.modify_kwargs)(lambda _=None: _[indices], kwargs)\n arr[indices] = func(cell_name, **proc_kwargs)\n return arr\n\n\n# Optimizer:\n\nclass BlackBoxOptimizer(_coconut.object):\n \"\"\"Main bbopt optimizer object. See https://github.com/evhub/bbopt for documentation.\"\"\"\n backend = None\n _new_params = None\n _current_example = None\n\n @_coconut_mark_as_match\n def __init__(*_coconut_match_args, **_coconut_match_kwargs):\n _coconut_match_check_0 = False\n _coconut_match_set_name_self = _coconut_sentinel\n _coconut_match_set_name_file = _coconut_sentinel\n _coconut_match_set_name_tag = _coconut_sentinel\n _coconut_match_set_name_protocol = _coconut_sentinel\n _coconut_FunctionMatchError = _coconut_get_function_match_error()\n if (_coconut.len(_coconut_match_args) <= 2) and (_coconut.sum((_coconut.len(_coconut_match_args) > 0, \"self\" in _coconut_match_kwargs)) == 1) and (_coconut.sum((_coconut.len(_coconut_match_args) > 1, \"file\" in _coconut_match_kwargs)) == 1):\n _coconut_match_temp_0 = _coconut_match_args[0] if _coconut.len(_coconut_match_args) > 0 else _coconut_match_kwargs.pop(\"self\")\n _coconut_match_temp_1 = _coconut_match_args[1] if _coconut.len(_coconut_match_args) > 1 else _coconut_match_kwargs.pop(\"file\")\n _coconut_match_temp_2 = _coconut_match_kwargs.pop(\"tag\") if \"tag\" in _coconut_match_kwargs else None\n _coconut_match_temp_3 = _coconut_match_kwargs.pop(\"protocol\") if \"protocol\" in _coconut_match_kwargs else None\n if ((isinstance)(_coconut_match_temp_1, Str)) and (not _coconut_match_kwargs):\n _coconut_match_set_name_self = _coconut_match_temp_0\n _coconut_match_set_name_file = _coconut_match_temp_1\n _coconut_match_set_name_tag = _coconut_match_temp_2\n _coconut_match_set_name_protocol = _coconut_match_temp_3\n _coconut_match_check_0 = True\n if _coconut_match_check_0:\n if _coconut_match_set_name_self is not _coconut_sentinel:\n self = _coconut_match_temp_0\n if _coconut_match_set_name_file is not _coconut_sentinel:\n file = _coconut_match_temp_1\n if _coconut_match_set_name_tag is not _coconut_sentinel:\n tag = _coconut_match_temp_2\n if _coconut_match_set_name_protocol is not _coconut_sentinel:\n protocol = _coconut_match_temp_3\n if not _coconut_match_check_0:\n raise _coconut_FunctionMatchError('match def __init__(self, file `isinstance` Str, *, tag=None, protocol=None):', _coconut_match_args)\n\n self._backend_creation_counts = defaultdict(int)\n\n self._file = norm_path(file)\n self._tag = tag\n\n if protocol is None:\n# auto-detect protocol\n self.protocol = \"json\"\n if not os.path.exists(self.data_file):\n self.protocol = constants.default_protocol\n else:\n self.protocol = protocol\n\n self.reload()\n\n @convert_match_errors\n @_coconut_addpattern(__init__)\n @_coconut_mark_as_match\n def __init__(*_coconut_match_args, **_coconut_match_kwargs):\n \"\"\"\n Construct a new BlackBoxOptimizer. You must either pass file=__file__ or\n both data_dir=\"/path/to/some/dir\" and data_name=\"my_project_name\".\n \"\"\"\n _coconut_match_check_1 = False\n _coconut_match_set_name_self = _coconut_sentinel\n _coconut_match_set_name_data_dir = _coconut_sentinel\n _coconut_match_set_name_data_name = _coconut_sentinel\n _coconut_match_set_name_kwargs = _coconut_sentinel\n _coconut_FunctionMatchError = _coconut_get_function_match_error()\n if (_coconut.len(_coconut_match_args) <= 3) and (_coconut.sum((_coconut.len(_coconut_match_args) > 0, \"self\" in _coconut_match_kwargs)) == 1) and (_coconut.sum((_coconut.len(_coconut_match_args) > 1, \"data_dir\" in _coconut_match_kwargs)) == 1) and (_coconut.sum((_coconut.len(_coconut_match_args) > 2, \"data_name\" in _coconut_match_kwargs)) == 1):\n _coconut_match_temp_0 = _coconut_match_args[0] if _coconut.len(_coconut_match_args) > 0 else _coconut_match_kwargs.pop(\"self\")\n _coconut_match_temp_1 = _coconut_match_args[1] if _coconut.len(_coconut_match_args) > 1 else _coconut_match_kwargs.pop(\"data_dir\")\n _coconut_match_temp_2 = _coconut_match_args[2] if _coconut.len(_coconut_match_args) > 2 else _coconut_match_kwargs.pop(\"data_name\")\n if ((isinstance)(_coconut_match_temp_1, Str)) and ((isinstance)(_coconut_match_temp_2, Str)):\n _coconut_match_set_name_self = _coconut_match_temp_0\n _coconut_match_set_name_data_dir = _coconut_match_temp_1\n _coconut_match_set_name_data_name = _coconut_match_temp_2\n _coconut_match_set_name_kwargs = _coconut_match_kwargs\n _coconut_match_check_1 = True\n if _coconut_match_check_1:\n if _coconut_match_set_name_self is not _coconut_sentinel:\n self = _coconut_match_temp_0\n if _coconut_match_set_name_data_dir is not _coconut_sentinel:\n data_dir = _coconut_match_temp_1\n if _coconut_match_set_name_data_name is not _coconut_sentinel:\n data_name = _coconut_match_temp_2\n if _coconut_match_set_name_kwargs is not _coconut_sentinel:\n kwargs = _coconut_match_kwargs\n if not _coconut_match_check_1:\n raise _coconut_FunctionMatchError('addpattern def __init__(self, data_dir `isinstance` Str, data_name `isinstance` Str, **kwargs):', _coconut_match_args)\n\n self.__init__(os.path.join(data_dir, data_name), **kwargs)\n\n# Private utilities:\n\n def _loads(self, raw_contents):\n \"\"\"Load data from the given raw data string.\"\"\"\n if self.using_json:\n return json.loads(str(raw_contents, encoding=\"utf-8\"))\n else:\n return pickle.loads(raw_contents)\n\n def _dumps(self, unserialized_data):\n \"\"\"Dump data to a raw data string.\"\"\"\n if self.using_json:\n return json.dumps((json_serialize)(unserialized_data)).encode(encoding=\"utf-8\")\n else:\n return pickle.dumps(unserialized_data, protocol=self.protocol)\n\n @property\n def _got_reward(self):\n \"\"\"Whether we have seen a maximize/minimize call yet.\"\"\"\n return \"loss\" in self._current_example or \"gain\" in self._current_example\n\n def _set_reward(self, reward_type, value):\n \"\"\"Set the gain or loss to the given value.\"\"\"\n if self._got_reward:\n raise ValueError(\"only one call to maximize or minimize is allowed\")\n if isinstance(value, np.ndarray):\n if len(value.shape) != 1:\n raise ValueError(\"gain/loss must be a scalar or 1-dimensional array, not {_coconut_format_0}\".format(_coconut_format_0=(value)))\n value = tuple(value)\n self._current_example[reward_type] = denumpy_all(value)\n if not self.is_serving:\n self._save_current_data()\n# _save_current_data ensures that _old_params has already been\n# updated with _new_params, so _new_params can safely be cleared\n self._new_params = {}\n\n def _add_examples(self, examples):\n \"\"\"Load the given examples into memory.\"\"\"\n for ex in examples:\n if ex not in self._examples:\n for name, val in (list)(ex[\"values\"].items()):\n func, args, kwargs = (lambda _coconut_x: self._old_params[name] if _coconut_x is None else _coconut_x)((lambda _coconut_x: None if _coconut_x is None else _coconut_x.get(name))(self._new_params))\n ex[\"values\"][name] = param_processor.verify_support(name, val, func, *args, **kwargs)\n self._examples.append(ex)\n\n def _load_from(self, df):\n \"\"\"Load data from the given file.\"\"\"\n contents = df.read()\n if contents:\n _coconut_match_to_0 = self._loads(contents)\n _coconut_match_check_2 = False\n _coconut_match_set_name_params = _coconut_sentinel\n _coconut_match_set_name_examples = _coconut_sentinel\n if _coconut.isinstance(_coconut_match_to_0, _coconut.abc.Mapping):\n _coconut_match_temp_0 = _coconut_match_to_0.get(\"params\", _coconut_sentinel)\n _coconut_match_temp_1 = _coconut_match_to_0.get(\"examples\", _coconut_sentinel)\n if (_coconut_match_temp_0 is not _coconut_sentinel) and (_coconut_match_temp_1 is not _coconut_sentinel):\n _coconut_match_set_name_params = _coconut_match_temp_0\n _coconut_match_set_name_examples = _coconut_match_temp_1\n _coconut_match_check_2 = True\n if _coconut_match_check_2:\n if _coconut_match_set_name_params is not _coconut_sentinel:\n params = _coconut_match_temp_0\n if _coconut_match_set_name_examples is not _coconut_sentinel:\n examples = _coconut_match_temp_1\n if not _coconut_match_check_2:\n raise _coconut_MatchError('{\"params\": params, \"examples\": examples} = self._loads(contents)', _coconut_match_to_0)\n\n self._old_params = params\n self._add_examples(examples)\n\n def _load_data(self):\n \"\"\"Load examples from data file.\"\"\"\n ensure_file(self.data_file)\n with open_with_lock(self.data_file) as df:\n self._load_from(df)\n\n def _save_current_data(self):\n \"\"\"Save examples to data file.\"\"\"\n assert \"timestamp\" not in self._current_example, \"multiple _save_current_data calls on _current_example = {_coconut_format_0}\".format(_coconut_format_0=(self._current_example))\n with open_with_lock(self.data_file) as df:\n# we create the timestamp while we have the lock to ensure its uniqueness\n self._current_example[\"timestamp\"] = time.time()\n self._add_examples([self._current_example, ])\n self._save_to(df)\n\n def _save_to(self, df):\n \"\"\"Save to the given open data file.\"\"\"\n self._load_from(df)\n clear_file(df)\n ((df.write)((self._dumps)(self.get_data())))\n sync_file(df)\n\n def _get_backend(self, backend, *args, **options):\n \"\"\"Get the given backend, attempting to load from stored backends.\"\"\"\n def _coconut_lambda_0(backend):\n self._backend_creation_counts[type(backend)] += 1\n return get_backend(self._backend_store, backend, self._examples, self._old_params, *args, _current_backend=self.backend, _on_new_backend=(_coconut_lambda_0), **options)\n\n def _get_skopt_backend(self):\n \"\"\"Get a scikit-optimize backend regardless of whether currently using one.\"\"\"\n from bbopt.backends.skopt import SkoptBackend\n\n if isinstance(self.backend, SkoptBackend):\n return self.backend\n else:\n return self._get_backend(SkoptBackend)\n\n @property\n def _file_name(self):\n \"\"\"The base name of the given file.\"\"\"\n return os.path.splitext(os.path.basename(self._file))[0] + (\"_\" + self._tag if self._tag is not None else \"\")\n\n# External but undocumented:\n\n def reload(self):\n \"\"\"Completely reload the optimizer.\"\"\"\n self._backend_store = defaultdict(list)\n self._old_params = {}\n self._examples = []\n self._load_data()\n self.run_backend(ServingBackend)\n\n def save_data(self):\n \"\"\"Forcibly saves data.\"\"\"\n with open_with_lock(self.data_file) as df:\n self._save_to(df)\n\n @property\n def metric(self):\n \"\"\"Whether using a gain or a loss.\"\"\"\n assert self._examples, \"cannot determine metric from empty examples\"\n return \"gain\" if \"gain\" in self._examples[0] else \"loss\"\n\n @property\n def using_json(self):\n \"\"\"Whether we are currently saving in json or pickle.\"\"\"\n return self.protocol == \"json\"\n\n @property\n def num_examples(self):\n \"\"\"The number of examples seen so far (current example not counted until maximize/minimize call).\"\"\"\n return len(self._examples)\n\n# Public API:\n\n def param(self, name, func, *args, **kwargs):\n \"\"\"Create a black box parameter and return its value.\"\"\"\n if self._got_reward:\n raise ValueError(\"all parameter definitions must come before maximize/minimize\")\n if not isinstance(name, Str):\n raise TypeError(\"name must be a string, not {_coconut_format_0}\".format(_coconut_format_0=(name)))\n if name in self._new_params:\n raise ValueError(\"parameter of name {_coconut_format_0} already exists\".format(_coconut_format_0=(name)))\n\n args = param_processor.standardize_args(func, args)\n kwargs = param_processor.standardize_kwargs(kwargs)\n\n _coconut_match_to_1 = self._old_params\n _coconut_match_check_3 = False\n _coconut_match_set_name_old_func = _coconut_sentinel\n _coconut_match_set_name_old_args = _coconut_sentinel\n _coconut_match_set_name_old_kwargs = _coconut_sentinel\n if _coconut.isinstance(_coconut_match_to_1, _coconut.abc.Mapping):\n _coconut_match_temp_0 = _coconut_match_to_1.get(name, _coconut_sentinel)\n if (_coconut_match_temp_0 is not _coconut_sentinel) and (_coconut.isinstance(_coconut_match_temp_0, _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_temp_0) == 3):\n _coconut_match_set_name_old_func = _coconut_match_temp_0[0]\n _coconut_match_set_name_old_args = _coconut_match_temp_0[1]\n _coconut_match_set_name_old_kwargs = _coconut_match_temp_0[2]\n _coconut_match_check_3 = True\n if _coconut_match_check_3:\n if _coconut_match_set_name_old_func is not _coconut_sentinel:\n old_func = _coconut_match_temp_0[0]\n if _coconut_match_set_name_old_args is not _coconut_sentinel:\n old_args = _coconut_match_temp_0[1]\n if _coconut_match_set_name_old_kwargs is not _coconut_sentinel:\n old_kwargs = _coconut_match_temp_0[2]\n if _coconut_match_check_3:\n if (func, args) != (old_func, old_args):\n printerr(\"BBopt Warning: detected change in parameter {_coconut_format_0} ({_coconut_format_1} != {_coconut_format_2}) (you may need to delete your old BBopt data)\".format(_coconut_format_0=(name), _coconut_format_1=((func, args)), _coconut_format_2=((old_func, old_args))))\n\n value = self.backend.param(name, func, *args, **kwargs)\n self._new_params[name] = (func, args, kwargs)\n self._current_example[\"values\"][name] = value\n return value\n\n def run_backend(self, backend, *args, **options):\n \"\"\"Optimize parameters using the given backend.\"\"\"\n if self._new_params:\n raise ValueError(\"run must come before parameter definitions or after maximize/minimize\")\n self.backend = self._get_backend(backend, *args, **options)\n self._new_params = {}\n self._current_example = {\"values\": {}}\n\n @property\n def algs(self):\n \"\"\"All algorithms supported by run.\"\"\"\n return alg_registry.asdict()\n\n def run(self, alg=constants.default_alg_sentinel):\n \"\"\"Optimize parameters using the given algorithm\n (use .algs to get the list of valid algorithms).\"\"\"\n if alg is constants.default_alg_sentinel:\n alg = constants.default_alg\n if alg in meta_registry:\n algs, meta_alg = meta_registry[alg]\n self.run_meta(algs, meta_alg)\n else:\n backend, options = alg_registry[alg]\n self.run_backend(backend, **options)\n\n def run_meta(self, algs, meta_alg=constants.default_alg_sentinel):\n \"\"\"Dynamically choose the best algorithm from the given set of algorithms.\"\"\"\n if meta_alg is constants.default_alg_sentinel:\n meta_alg = constants.default_meta_alg\n self.run(meta_alg)\n alg = self.choice(constants.meta_opt_alg_var, algs)\n backend, options = alg_registry[alg]\n self.backend = self._get_backend(backend, **options)\n\n def remember(self, info):\n \"\"\"Store a dictionary of information about the current run.\"\"\"\n if self._got_reward:\n raise ValueError(\"remember calls must come before maximize/minimize\")\n self._current_example.setdefault(\"memo\", {}).update(info)\n\n def minimize(self, value):\n \"\"\"Set the loss of the current run.\"\"\"\n self._set_reward(\"loss\", value)\n\n def maximize(self, value):\n \"\"\"Set the gain of the current run.\"\"\"\n self._set_reward(\"gain\", value)\n\n @property\n def is_serving(self):\n \"\"\"Whether we are currently using the serving backend or not.\"\"\"\n return isinstance(self.backend, ServingBackend) and not self.backend.allow_missing_data\n\n @property\n def data_file(self):\n \"\"\"The path to the file we are saving data to.\"\"\"\n return os.path.join(os.path.dirname(self._file), self._file_name) + constants.data_file_ext + (\".json\" if self.using_json else \".pickle\")\n\n def get_data(self, print_data=False):\n \"\"\"Get all currently-loaded data as a dictionary containing params and examples.\"\"\"\n self._old_params.update(self._new_params)\n data_dict = {\"params\": self._old_params, \"examples\": self._examples}\n if print_data:\n pprint(data_dict)\n return data_dict\n\n def tell_examples(self, examples):\n \"\"\"Adds the given examples to memory and writes the current memory to disk.\"\"\"\n self._add_examples(examples)\n self.save_data()\n\n def get_current_run(self):\n \"\"\"Return a dictionary containing the current parameters and reward.\"\"\"\n if self._current_example is None:\n raise ValueError(\"get_current_run calls must come after run\")\n return self._current_example\n\n def get_best_run(self):\n \"\"\"Return a dictionary containing the best parameters and reward computed so far.\"\"\"\n return best_example(self._examples)\n\n get_optimal_run = get_best_run\n\n @property\n def run_id(self):\n \"\"\"The run ID number if using bbopt CLI.\"\"\"\n return (lambda _coconut_x: None if _coconut_x is None else (int)(_coconut_x))(os.environ.get(constants.run_id_env_var))\n\n# Plotting functions:\n\n def plot_convergence(self, ax=None, yscale=None):\n \"\"\"Plot the best gain/loss over the history of optimization.\n Based on skopt.plots.plot_convergence.\"\"\"\n if not self._examples:\n raise ValueError(\"no existing data available to be plotted\")\n\n iterations = range(1, len(self._examples) + 1)\n best_metrics = ((list)((map)(_coconut.operator.itemgetter((self.metric)), (running_best)((sorted_examples)(self._examples)))))\n\n return plot(iterations, best_metrics, ax=ax, yscale=yscale, title=\"Convergence plot for {_coconut_format_0}\".format(_coconut_format_0=(self._file_name)), xlabel=\"Number of trials $n$\", ylabel=\"Best {_coconut_format_0} after $n$ trials\".format(_coconut_format_0=(self.metric)))\n\n def plot_history(self, ax=None, yscale=None):\n \"\"\"Plot the gain/loss of every point in the order in which they were sampled.\"\"\"\n if not self._examples:\n raise ValueError(\"no existing data available to be plotted\")\n\n iterations = range(1, len(self._examples) + 1)\n metrics = ((list)((map)(_coconut.operator.itemgetter((self.metric)), (sorted_examples)(self._examples))))\n\n return plot(iterations, metrics, ax=ax, yscale=yscale, title=\"History plot for {_coconut_format_0}\".format(_coconut_format_0=(self._file_name)), xlabel=\"Number of trials $n$\", ylabel=\"The {_coconut_format_0} on the $n$th trial\".format(_coconut_format_0=(self.metric)))\n\n def partial_dependence(self, i_name, j_name=None, *args, **kwargs):\n \"\"\"Calls skopt.plots.partial_dependence where i_name and j_name are parameter names.\"\"\"\n def _coconut_mock_9(self, i_name, j_name=None, *args, **kwargs): return self, i_name, j_name, args, kwargs\n while True:\n from skopt.plots import partial_dependence\n if not self._examples:\n raise ValueError(\"no existing data available to be plotted\")\n\n skopt_backend = self._get_skopt_backend()\n\n sorted_names = list(sorted(self._old_params))\n i = sorted_names.index(i_name)\n j = None if j_name is None else sorted_names.index(j_name)\n\n try:\n _coconut_tre_check_0 = partial_dependence is _coconut_recursive_func_27\n except _coconut.NameError:\n _coconut_tre_check_0 = False\n if _coconut_tre_check_0:\n self, i_name, j_name, args, kwargs = _coconut_mock_9(skopt_backend.space, skopt_backend.model, i, j, *args, **kwargs)\n continue\n else:\n return partial_dependence(skopt_backend.space, skopt_backend.model, i, j, *args, **kwargs)\n\n\n return None\n _coconut_recursive_func_27 = partial_dependence\n def plot_partial_dependence_1D(self, i_name, ax=None, yscale=None, **kwargs):\n \"\"\"Constructs a 1D partial dependence plot using self.partial_dependence.\"\"\"\n xi, yi = self.partial_dependence(i_name, **kwargs)\n return plot(xi, yi, ax=ax, yscale=yscale, title=\"Partial dependence of {_coconut_format_0}\".format(_coconut_format_0=(i_name)), xlabel=\"Values of {_coconut_format_0}\".format(_coconut_format_0=(i_name)), ylabel=\"The loss at each point\".format())\n\n def get_skopt_result(self):\n \"\"\"Get a result object usable by skopt.plots functions.\"\"\"\n if not self._examples:\n raise ValueError(\"no existing data available to be plotted\")\n return self._get_skopt_backend().result\n\n def plot_evaluations(self, *args, **kwargs):\n \"\"\"Calls skopt.plots.plot_evaluations.\"\"\"\n def _coconut_mock_11(self, *args, **kwargs): return self, args, kwargs\n while True:\n from skopt.plots import plot_evaluations\n try:\n _coconut_tre_check_1 = plot_evaluations is _coconut_recursive_func_30\n except _coconut.NameError:\n _coconut_tre_check_1 = False\n if _coconut_tre_check_1:\n self, args, kwargs = _coconut_mock_11(self.get_skopt_result(), *args, **kwargs)\n continue\n else:\n return plot_evaluations(self.get_skopt_result(), *args, **kwargs)\n\n\n return None\n _coconut_recursive_func_30 = plot_evaluations\n def plot_objective(self, *args, **kwargs):\n \"\"\"Calls skopt.plots.plot_objective.\"\"\"\n def _coconut_mock_12(self, *args, **kwargs): return self, args, kwargs\n while True:\n from skopt.plots import plot_objective\n try:\n _coconut_tre_check_2 = plot_objective is _coconut_recursive_func_31\n except _coconut.NameError:\n _coconut_tre_check_2 = False\n if _coconut_tre_check_2:\n self, args, kwargs = _coconut_mock_12(self.get_skopt_result(), *args, **kwargs)\n continue\n else:\n return plot_objective(self.get_skopt_result(), *args, **kwargs)\n\n\n return None\n _coconut_recursive_func_31 = plot_objective\n def plot_regret(self, *args, **kwargs):\n \"\"\"Calls skopt.plots.plot_regret.\"\"\"\n def _coconut_mock_13(self, *args, **kwargs): return self, args, kwargs\n while True:\n from skopt.plots import plot_regret\n try:\n _coconut_tre_check_3 = plot_regret is _coconut_recursive_func_32\n except _coconut.NameError:\n _coconut_tre_check_3 = False\n if _coconut_tre_check_3:\n self, args, kwargs = _coconut_mock_13(self.get_skopt_result(), *args, **kwargs)\n continue\n else:\n return plot_regret(self.get_skopt_result(), *args, **kwargs)\n\n\n# Base random functions:\n\n return None\n _coconut_recursive_func_32 = plot_regret\n def randrange(self, name, *args, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.randrange(*args).\"\"\"\n return self.param(name, \"randrange\", *args, **kwargs)\n\n def uniform(self, name, a, b, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.uniform(a, b).\"\"\"\n return self.param(name, \"uniform\", a, b, **kwargs)\n\n def triangular(self, name, low, high, mode, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.triangular(low, high, mode).\"\"\"\n return self.param(name, \"triangular\", low, high, mode, **kwargs)\n\n def betavariate(self, name, alpha, beta, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.betavariate(alpha, beta).\"\"\"\n return self.param(name, \"betavariate\", alpha, beta, **kwargs)\n\n def expovariate(self, name, lambd, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.expovariate(lambd).\"\"\"\n return self.param(name, \"expovariate\", lambd, **kwargs)\n\n def gammavariate(self, name, alpha, beta, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.gammavariate(alpha, beta).\"\"\"\n return self.param(name, \"gammavariate\", alpha, beta, **kwargs)\n\n def normalvariate(self, name, mu, sigma, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.gauss(mu, sigma).\"\"\"\n return self.param(name, \"normalvariate\", mu, sigma, **kwargs)\n\n def vonmisesvariate(self, name, kappa, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.vonmisesvariate(kappa).\"\"\"\n return self.param(name, \"vonmisesvariate\", kappa, **kwargs)\n\n def paretovariate(self, name, alpha, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.paretovariate(alpha).\"\"\"\n return self.param(name, \"paretovariate\", alpha, **kwargs)\n\n def weibullvariate(self, name, alpha, beta, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.weibullvariate(alpha, beta).\"\"\"\n return self.param(name, \"weibullvariate\", alpha, beta, **kwargs)\n\n# Choice functions:\n\n def _categorical(self, name, num_categories, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.choice(range(num_categories)).\"\"\"\n return self.param(name, \"choice\", range(num_categories), **kwargs)\n\n def choice(self, name, seq, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.choice(seq).\"\"\"\n if constants.use_generic_categories_for_categorical_data:\n (param_processor.modify_kwargs)(seq.index, kwargs)\n return seq[self._categorical(name, len(seq), **kwargs)]\n else:\n return self.param(name, \"choice\", seq, **kwargs)\n\n# Derived random functions:\n\n def randint(self, name, a, b, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.randint(a, b).\"\"\"\n start, stop = a, b - 1\n return self.randrange(name, start, stop, **kwargs)\n\n def random(self, name, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.random().\n Equivalent to random.uniform(0, 1) except that 1 is disallowed.\"\"\"\n result = self.uniform(name, 0, 1, **kwargs)\n if result >= 1:\n result -= sys.float_info.epsilon\n return result\n\n def getrandbits(self, name, k, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.getrandbits(k).\"\"\"\n stop = 2**k\n return self.randrange(name, stop, **kwargs)\n\n gauss = normalvariate\n\n def loguniform(self, name, min_val, max_val, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by\n math.exp(random.uniform(math.log(min_val), math.log(max_val))).\"\"\"\n kwargs = (param_processor.modify_kwargs)(math.log, kwargs)\n log_a, log_b = math.log(min_val), math.log(max_val)\n return math.exp(self.uniform(name, log_a, log_b, **kwargs))\n\n def lognormvariate(self, name, mu, sigma, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.lognormvariate(mu, sigma).\"\"\"\n kwargs = (param_processor.modify_kwargs)(math.log, kwargs)\n return math.exp(self.normalvariate(name, mu, sigma, **kwargs))\n\n def randbool(self, name, **kwargs):\n \"\"\"Create a new boolean parameter with the given name.\"\"\"\n return bool(self.choice(name, [False, True], **kwargs))\n\n def sample(self, name, population, k, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.sample(population, k).\"\"\"\n if not isinstance(name, Str):\n raise TypeError(\"name must be string, not {_coconut_format_0}\".format(_coconut_format_0=(name)))\n sampling_population = [x for x in population]\n sample = []\n for i in range(k):\n if len(sampling_population) <= 1:\n sample.append(sampling_population[0])\n else:\n def _coconut_lambda_1(val):\n elem = _coconut_iter_getitem(val, i)\n return sampling_population.index(elem) if elem in sampling_population else 0\n proc_kwargs = (param_processor.modify_kwargs)(_coconut_lambda_1, kwargs)\n ind = self.randrange(\"{_coconut_format_0}[{_coconut_format_1}]\".format(_coconut_format_0=(name), _coconut_format_1=(i)), len(sampling_population), **proc_kwargs)\n sample.append(sampling_population.pop(ind))\n return sample\n\n def shuffled(self, name, population, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by\n random.shuffle(population) except returned instead of modified in place.\"\"\"\n return self.sample(name, population, len(population), **kwargs)\n\n def shuffle(self, name, population, **kwargs):\n \"\"\"Create a new parameter with the given name modeled by random.shuffle(population).\"\"\"\n population[:] = self.shuffled(name, population, **kwargs)\n\n def stdnormal(self, name, **kwargs):\n \"\"\"Equivalent to bb.normalvariate(name, 0, 1).\"\"\"\n return self.normalvariate(name, 0, 1, **kwargs)\n\n# Array-based random functions:\n\n def rand(self, name, *shape, **kwargs):\n \"\"\"Create a new array parameter for the given name and shape modeled by np.random.rand.\"\"\"\n return array_param(self.random, name, shape, kwargs)\n\n def randn(self, name, *shape, **kwargs):\n \"\"\"Create a new array parameter for the given name and shape modeled by np.random.randn.\"\"\"\n return array_param(self.stdnormal, name, shape, kwargs)\n\n_coconut_call_set_names(BlackBoxOptimizer)\n" ]
[ [ "numpy.zeros" ] ]
shlomihod/smartnoise-sdk-synth
[ "cc143390d96f3dd8b3af365094f969dfea0d4f0b" ]
[ "snsynth/pytorch/nn/patectgan.py" ]
[ "import math\nimport numpy as np\nimport torch\nfrom torch import optim\nfrom torch import nn\nimport torch.utils.data\nfrom torch.nn import BatchNorm1d, Dropout, LeakyReLU, Linear, Module, ReLU, Sequential, Sigmoid\nfrom torch.autograd import Variable\nimport warnings\n\nfrom .data_sampler import DataSampler\nfrom ctgan.data_transformer import DataTransformer\nfrom ctgan.synthesizers import CTGANSynthesizer\n\nfrom .privacy_utils import weights_init, pate, moments_acc\n\n\nclass Discriminator(Module):\n\n def __init__(self, input_dim, discriminator_dim, loss, pac=10):\n super(Discriminator, self).__init__()\n torch.cuda.manual_seed(0)\n torch.manual_seed(0)\n\n dim = input_dim * pac\n # print ('now dim is {}'.format(dim))\n self.pac = pac\n self.pacdim = dim\n\n seq = []\n for item in list(discriminator_dim):\n seq += [Linear(dim, item), LeakyReLU(0.2), Dropout(0.5)]\n dim = item\n\n seq += [Linear(dim, 1)]\n if loss == \"cross_entropy\":\n seq += [Sigmoid()]\n self.seq = Sequential(*seq)\n\n def dragan_penalty(self, real_data, device='cpu', pac=10, lambda_=10):\n # real_data = torch.from_numpy(real_data).to(device)\n alpha = torch.rand(real_data.shape[0], 1, device=device).squeeze().expand(real_data.shape[0])\n delta = torch.normal(\n mean=0.0, std=float(pac), size=real_data.shape, device=device\n ) # 0.5 * real_data.std() * torch.rand(real_data.shape)\n x_hat = Variable((alpha * real_data.T + (1 - alpha) * (real_data + delta).T).T, requires_grad=True)\n\n pred_hat = self(x_hat.float())\n\n gradients = torch.autograd.grad(\n outputs=pred_hat,\n inputs=x_hat,\n grad_outputs=torch.ones(pred_hat.size(), device=device),\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n dragan_penalty = lambda_ * ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n\n return dragan_penalty\n\n def forward(self, input):\n assert input.size()[0] % self.pac == 0\n return self.seq(input.view(-1, self.pacdim))\n\n\nclass Residual(Module):\n\n def __init__(self, i, o):\n super(Residual, self).__init__()\n self.fc = Linear(i, o)\n self.bn = BatchNorm1d(o)\n self.relu = ReLU()\n\n def forward(self, input):\n out = self.fc(input)\n out = self.bn(out)\n out = self.relu(out)\n return torch.cat([out, input], dim=1)\n\n\nclass Generator(Module):\n\n def __init__(self, embedding_dim, generator_dim, data_dim):\n super(Generator, self).__init__()\n dim = embedding_dim\n seq = []\n for item in list(generator_dim):\n seq += [Residual(dim, item)]\n dim += item\n seq.append(Linear(dim, data_dim))\n self.seq = Sequential(*seq)\n\n def forward(self, input):\n data = self.seq(input)\n return data\n\n\nclass PATECTGAN(CTGANSynthesizer):\n\n def __init__(self,\n embedding_dim=128,\n generator_dim=(256, 256),\n discriminator_dim=(256, 256),\n generator_lr=2e-4,\n generator_decay=1e-6,\n discriminator_lr=2e-4,\n discriminator_decay=1e-6,\n batch_size=500,\n discriminator_steps=1,\n log_frequency=False,\n verbose=False,\n epochs=300,\n pac=1,\n cuda=True,\n epsilon=1,\n binary=False,\n regularization=None,\n loss=\"cross_entropy\",\n teacher_iters=5,\n student_iters=5,\n sample_per_teacher=1000,\n delta=None,\n noise_multiplier=1e-3,\n moments_order=100,\n category_epsilon_pct=0.1\n ):\n\n assert batch_size % 2 == 0\n\n self._embedding_dim = embedding_dim\n self._generator_dim = generator_dim\n self._discriminator_dim = discriminator_dim\n\n self._generator_lr = generator_lr\n self._generator_decay = generator_decay\n self._discriminator_lr = discriminator_lr\n self._discriminator_decay = discriminator_decay\n\n self._batch_size = batch_size\n self._discriminator_steps = discriminator_steps\n self._log_frequency = log_frequency\n self._verbose = verbose\n self._epochs = epochs\n self.pac = pac\n self.epsilon = epsilon\n\n self._category_epsilon_pct = category_epsilon_pct\n\n self.verbose = verbose\n self.loss = loss\n\n # PATE params\n self.regularization = regularization if self.loss != \"wasserstein\" else \"dragan\"\n self.teacher_iters = teacher_iters\n self.student_iters = student_iters\n self.pd_cols = None\n self.pd_index = None\n self.binary = binary\n self.sample_per_teacher = sample_per_teacher\n self.noise_multiplier = noise_multiplier\n self.moments_order = moments_order\n self.delta = delta\n\n if not cuda or not torch.cuda.is_available():\n device = 'cpu'\n elif isinstance(cuda, str):\n device = cuda\n else:\n device = 'cuda'\n\n self._device = torch.device(device)\n\n if self._log_frequency:\n warnings.warn(\n \"log_frequency is selected. This may result in oversampling frequent \"\n \"categories, which could cause privacy leaks.\"\n )\n\n def train(self, data, categorical_columns=None, ordinal_columns=None, update_epsilon=None):\n if update_epsilon:\n self.epsilon = update_epsilon\n\n for col in categorical_columns:\n if str(data[col].dtype).startswith('float'):\n raise ValueError(\n \"It looks like you are passing in a vector of continuous values\"\n f\"to a categorical column at [{col}].\"\n \"Please discretize and pass in categorical columns with\"\n \"unsigned integer or string category names.\"\n )\n\n sample_per_teacher = (\n self.sample_per_teacher if self.sample_per_teacher < len(data) else 1000\n )\n self.num_teachers = int(len(data) / sample_per_teacher) + 1\n\n self._transformer = DataTransformer()\n self._transformer.fit(data, discrete_columns=categorical_columns)\n for tinfo in self._transformer._column_transform_info_list:\n if tinfo.column_type == \"continuous\":\n raise ValueError(\"We don't support continuous values on this synthesizer. Please discretize values.\")\n\n train_data = self._transformer.transform(data)\n\n data_partitions = np.array_split(train_data, self.num_teachers)\n\n data_dim = self._transformer.output_dimensions\n\n sampler_eps = 0.0\n if categorical_columns and self._category_epsilon_pct:\n sampler_eps = self.epsilon * self._category_epsilon_pct\n per_col_sampler_eps = sampler_eps / len(categorical_columns)\n self.epsilon = self.epsilon - sampler_eps\n else:\n per_col_sampler_eps = None\n\n self.cond_generator = DataSampler(\n train_data,\n self._transformer.output_info_list,\n self._log_frequency,\n per_column_epsilon=per_col_sampler_eps\n )\n\n spent = self.cond_generator.total_spent\n if (\n spent > sampler_eps\n and not np.isclose(spent, sampler_eps)\n ):\n raise AssertionError(\n f\"The data sampler used {spent} epsilon and was budgeted for {sampler_eps}\"\n )\n # create conditional generator for each teacher model\n\n # Note: Previously, there existed a ConditionalGenerator object in CTGAN\n # - that functionality has been subsumed by DataSampler, but switch is\n # essentially 1 for 1\n # don't need to count eps for each teacher, because these are disjoint partitions\n cached_probs = self.cond_generator.discrete_column_category_prob\n\n cond_generator = [\n DataSampler(\n d,\n self._transformer.output_info_list,\n self._log_frequency,\n per_column_epsilon=None,\n discrete_column_category_prob=cached_probs\n )\n for d in data_partitions\n ]\n\n self._generator = Generator(\n self._embedding_dim + self.cond_generator.dim_cond_vec(),\n self._generator_dim,\n data_dim\n ).to(self._device)\n\n discriminator = Discriminator(\n data_dim + self.cond_generator.dim_cond_vec(),\n self._discriminator_dim,\n self.loss,\n self.pac\n ).to(self._device)\n\n student_disc = discriminator\n student_disc.apply(weights_init)\n\n teacher_disc = [discriminator for i in range(self.num_teachers)]\n for i in range(self.num_teachers):\n teacher_disc[i].apply(weights_init)\n\n optimizerG = optim.Adam(\n self._generator.parameters(),\n lr=self._generator_lr,\n betas=(0.5, 0.9),\n weight_decay=self._generator_decay\n )\n\n optimizer_s = optim.Adam(student_disc.parameters(), lr=2e-4, betas=(0.5, 0.9))\n optimizer_t = [\n optim.Adam(\n teacher_disc[i].parameters(), lr=self._discriminator_lr,\n betas=(0.5, 0.9), weight_decay=self._discriminator_decay\n )\n for i in range(self.num_teachers)\n ]\n\n noise_multiplier = self.noise_multiplier\n alphas = torch.tensor([0.0 for i in range(self.moments_order)], device=self._device)\n l_list = 1 + torch.tensor(range(self.moments_order), device=self._device)\n eps = torch.zeros(1)\n\n mean = torch.zeros(self._batch_size, self._embedding_dim, device=self._device)\n std = mean + 1\n\n real_label = 1\n fake_label = 0\n\n criterion = nn.BCELoss() if (self.loss == \"cross_entropy\") else self.w_loss\n\n if self.verbose:\n print(\"using loss {} and regularization {}\".format(self.loss, self.regularization))\n\n iteration = 0\n\n if self.delta is None:\n self.delta = 1 / (train_data.shape[0] * np.sqrt(train_data.shape[0]))\n\n while eps.item() < self.epsilon:\n iteration += 1\n\n eps = min((alphas - math.log(self.delta)) / l_list)\n\n if eps.item() > self.epsilon:\n if iteration == 1:\n raise ValueError(\n \"Inputted epsilon parameter is too small to\"\n + \" create a private dataset. Try increasing epsilon and rerunning.\"\n )\n break\n\n # train teacher discriminators\n for t_2 in range(self.teacher_iters):\n for i in range(self.num_teachers):\n partition_data = data_partitions[i]\n data_sampler = DataSampler(\n partition_data,\n self._transformer.output_info_list,\n self._log_frequency,\n per_column_epsilon=None,\n discrete_column_category_prob=cached_probs\n )\n fakez = torch.normal(mean, std=std).to(self._device)\n\n condvec = cond_generator[i].sample_condvec(self._batch_size)\n\n if condvec is None:\n c1, m1, col, opt = None, None, None, None\n real = data_sampler.sample_data(self._batch_size, col, opt)\n else:\n c1, m1, col, opt = condvec\n c1 = torch.from_numpy(c1).to(self._device)\n m1 = torch.from_numpy(m1).to(self._device)\n fakez = torch.cat([fakez, c1], dim=1)\n perm = np.arange(self._batch_size)\n np.random.shuffle(perm)\n real = data_sampler.sample_data(self._batch_size, col[perm], opt[perm])\n c2 = c1[perm]\n\n fake = self._generator(fakez)\n fakeact = self._apply_activate(fake)\n\n real = torch.from_numpy(real.astype(\"float32\")).to(self._device)\n\n if c1 is not None:\n fake_cat = torch.cat([fakeact, c1], dim=1)\n real_cat = torch.cat([real, c2], dim=1)\n else:\n real_cat = real\n fake_cat = fake\n\n optimizer_t[i].zero_grad()\n\n y_all = torch.cat([teacher_disc[i](fake_cat), teacher_disc[i](real_cat)])\n label_fake = torch.full(\n (int(self._batch_size / self.pac), 1),\n fake_label,\n dtype=torch.float,\n device=self._device,\n )\n label_true = torch.full(\n (int(self._batch_size / self.pac), 1),\n real_label,\n dtype=torch.float,\n device=self._device,\n )\n labels = torch.cat([label_fake, label_true])\n\n error_d = criterion(y_all.squeeze(), labels.squeeze())\n error_d.backward()\n\n if self.regularization == \"dragan\":\n pen = teacher_disc[i].dragan_penalty(real_cat, device=self._device)\n pen.backward(retain_graph=True)\n\n optimizer_t[i].step()\n ###\n # train student discriminator\n for t_3 in range(self.student_iters):\n data_sampler = DataSampler(\n train_data,\n self._transformer.output_info_list,\n self._log_frequency,\n per_column_epsilon=None,\n discrete_column_category_prob=cached_probs\n )\n fakez = torch.normal(mean=mean, std=std)\n\n condvec = self.cond_generator.sample_condvec(self._batch_size)\n\n if condvec is None:\n c1, m1, col, opt = None, None, None, None\n real = data_sampler.sample_data(self._batch_size, col, opt)\n else:\n c1, m1, col, opt = condvec\n c1 = torch.from_numpy(c1).to(self._device)\n m1 = torch.from_numpy(m1).to(self._device)\n fakez = torch.cat([fakez, c1], dim=1)\n\n perm = np.arange(self._batch_size)\n np.random.shuffle(perm)\n real = data_sampler.sample_data(\n self._batch_size, col[perm], opt[perm])\n c2 = c1[perm]\n\n fake = self._generator(fakez)\n fakeact = self._apply_activate(fake)\n\n if c1 is not None:\n fake_cat = torch.cat([fakeact, c1], dim=1)\n else:\n fake_cat = fakeact\n\n fake_data = fake_cat\n\n ###\n predictions, votes = pate(\n fake_data, teacher_disc, noise_multiplier, device=self._device\n )\n\n output = student_disc(fake_data.detach())\n\n # update moments accountant\n alphas = alphas + moments_acc(\n self.num_teachers, votes, noise_multiplier, l_list, device=self._device\n )\n\n loss_s = criterion(output.squeeze(), predictions.float().to(self._device).squeeze())\n\n optimizer_s.zero_grad()\n loss_s.backward()\n\n if self.regularization == \"dragan\":\n vals = torch.cat([predictions, fake_data], axis=1)\n ordered = vals[vals[:, 0].sort()[1]]\n data_list = torch.split(\n ordered, predictions.shape[0] - int(predictions.sum().item())\n )\n synth_cat = torch.cat(data_list[1:], axis=0)[:, 1:]\n pen = student_disc.dragan_penalty(synth_cat, device=self._device)\n pen.backward(retain_graph=True)\n\n optimizer_s.step()\n\n # print ('iterator {i}, student discriminator loss is {j}'.format(i=t_3, j=loss_s))\n\n # train generator\n fakez = torch.normal(mean=mean, std=std)\n condvec = self.cond_generator.sample_condvec(self._batch_size)\n\n if condvec is None:\n c1, m1, col, opt = None, None, None, None\n else:\n c1, m1, col, opt = condvec\n c1 = torch.from_numpy(c1).to(self._device)\n m1 = torch.from_numpy(m1).to(self._device)\n fakez = torch.cat([fakez, c1], dim=1)\n\n fake = self._generator(fakez)\n fakeact = self._apply_activate(fake)\n\n if c1 is not None:\n y_fake = student_disc(torch.cat([fakeact, c1], dim=1))\n else:\n y_fake = student_disc(fakeact)\n\n if condvec is None:\n cross_entropy = 0\n else:\n cross_entropy = self._cond_loss(fake, c1, m1)\n\n if self.loss == \"cross_entropy\":\n label_g = torch.full(\n (int(self._batch_size / self.pac), 1),\n real_label,\n dtype=torch.float,\n device=self._device,\n )\n loss_g = criterion(y_fake.squeeze(), label_g.float().squeeze())\n loss_g = loss_g + cross_entropy\n else:\n loss_g = -torch.mean(y_fake) + cross_entropy\n\n optimizerG.zero_grad()\n loss_g.backward()\n optimizerG.step()\n\n if self.verbose:\n print(\n \"eps: {:f} \\t G: {:f} \\t D: {:f}\".format(\n eps, loss_g.detach().cpu(), loss_s.detach().cpu()\n )\n )\n\n def w_loss(self, output, labels):\n vals = torch.cat([labels[None, :], output[None, :]], axis=1)\n ordered = vals[vals[:, 0].sort()[1]]\n data_list = torch.split(ordered, labels.shape[0] - int(labels.sum().item()))\n fake_score = data_list[0][:, 1]\n true_score = torch.cat(data_list[1:], axis=0)[:, 1]\n w_loss = -(torch.mean(true_score) - torch.mean(fake_score))\n return w_loss\n\n def generate(self, n, condition_column=None, condition_value=None):\n \"\"\"\n TODO: Add condition_column support from CTGAN\n \"\"\"\n self._generator.eval()\n\n # output_info = self._transformer.output_info\n steps = n // self._batch_size + 1\n data = []\n for i in range(steps):\n mean = torch.zeros(self._batch_size, self._embedding_dim)\n std = mean + 1\n fakez = torch.normal(mean=mean, std=std).to(self._device)\n\n condvec = self.cond_generator.sample_original_condvec(self._batch_size)\n\n if condvec is None:\n pass\n else:\n c1 = condvec\n c1 = torch.from_numpy(c1).to(self._device)\n fakez = torch.cat([fakez, c1], dim=1)\n\n fake = self._generator(fakez)\n fakeact = self._apply_activate(fake)\n data.append(fakeact.detach().cpu().numpy())\n\n data = np.concatenate(data, axis=0)\n data = data[:n]\n\n return self._transformer.inverse_transform(data)\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.cuda.manual_seed", "numpy.isclose", "torch.nn.LeakyReLU", "torch.cuda.is_available", "numpy.concatenate", "torch.autograd.Variable", "torch.normal", "torch.manual_seed", "numpy.arange", "torch.nn.BCELoss", "numpy.sqrt", "torch.zeros", "torch.device", "torch.nn.Sequential", "numpy.random.shuffle", "torch.nn.ReLU", "numpy.array_split", "torch.rand", "torch.nn.Dropout", "torch.nn.Sigmoid", "torch.from_numpy", "torch.nn.BatchNorm1d", "torch.mean" ] ]
shhong/nrn
[ "0d64e94330c6072529e31033d579b270f742454e" ]
[ "test/pynrn/test_vector_api.py" ]
[ "import sys\nimport numpy as np\nfrom neuron import h, hoc, numpy_element_ref as npyref\n\n\ndef copy(src, result, *args, dest=None):\n if dest is None:\n dest = h.Vector()\n dest.copy(src, *args)\n assert dest.to_python() == result\n\ndef vwrite_type(src, vtype):\n f = h.File()\n fname = \"vwrite.{}.tmp\".format(str(vtype))\n f.wopen(fname)\n src.c().vwrite(f, vtype)\n f.close()\n f.ropen(fname)\n vr = h.Vector(vtype)\n vr.vread(f)\n f.close()\n f.unlink()\n assert src.to_python() == vr.to_python()\n\n\ndef test_vector_api():\n \"\"\"\n Construction\n \"\"\"\n vi = h.Vector((1, 2, 3))\n assert vi.to_python() == [1.0, 2.0, 3.0]\n assert vi.get(1) == 2.0\n vi.set(1, 2.1)\n assert vi.to_python() == [1.0, 2.1, 3.0]\n del vi\n\n v = h.Vector(np.array([5, 1, 6], 'd'))\n assert v.to_python() == [5.0, 1.0, 6.0]\n v.clear()\n assert v.size() == 0\n del v\n\n v = h.Vector(3)\n assert v.to_python() == [0.0, 0.0, 0.0]\n del v\n\n v = h.Vector(3, 1)\n assert v.to_python() == [1.0, 1.0, 1.0]\n del v\n\n assert h.Vector().from_python((1, 2, 3)).to_python() == [1.0, 2.0, 3.0]\n\n v = h.Vector()\n v.append(3, 3)\n v.append(2)\n v.append(1)\n v.append(5)\n\n \"\"\"\n Vector size & capacity\n \"\"\"\n assert v.to_python() == [3.0, 3.0, 2.0, 1.0, 5.0]\n assert v.size() == 5\n v.buffer_size(v.size())\n assert v.buffer_size() >= v.size()\n v.buffer_size(6)\n assert v.buffer_size() >= 6\n assert v.to_python() == [3.0, 3.0, 2.0, 1.0, 5.0]\n assert v.eq(v.c())\n\n \"\"\"\n Calculations\n \"\"\"\n assert v.median() == 3.0\n assert v.mean() == 2.8\n assert v.mean(1, 3) == 2.0\n assert np.allclose(v.stdev(), 1.4832396974191326)\n assert np.allclose(v.stdev(0, 3), 0.9574271077563381)\n assert np.allclose(v.stderr(), 0.6633249580710799)\n assert np.allclose(v.stderr(1, 3), 0.5773502691896258)\n assert v.sum() == 14.0\n assert v.sum(1, 3) == 6.0\n assert np.allclose(v.sumgauss(-1, 1, .5, 1).to_python(), [0.05869048145253869, 0.14879136924715222, 0.30482687457572216, 0.5166555071584352])\n assert np.allclose(v.sumgauss(-1, 1, .5, 1, h.Vector((1, 3, 2, 5, 4))).to_python(), [0.2793538745964073, 0.6861357408871805, 1.3355688961479038, 2.0895389620919826])\n assert np.allclose(v.cl().smhist(v.cl(), 1, 3, 2, 1).to_python(), [0.9060003240550064, 0.9598574603424295, 0.5071918738793386])\n assert np.allclose(v.cl().smhist(v.cl(), 1, 3, 2, 1, h.Vector((1, 3, 2, 5, 4))).to_python(), [3.009095149765841, 2.1896697532507994, 1.8126697992388372])\n assert v.sumsq() == 48.0\n assert v.sumsq(2, 4) == 30.0\n assert v.var() == 2.2\n assert v.var(2, 3) == 0.5\n assert v.min() == 1.0\n assert v.min(0, 2) == 2.0\n assert h.Vector().min() == 0.0\n assert v.min_ind() == 3\n assert v.min_ind(0, 2) == 2\n assert h.Vector().min_ind() == -1.0\n assert v.max() == 5.0\n assert v.max(0, 2) == 3.0\n assert h.Vector().max() == 0.0\n assert v.max_ind() == 4\n assert v.max_ind(0, 2) == 0\n assert h.Vector().max_ind() == -1.0\n assert v.dot(h.Vector((1, 2, 3, 4, 5))) == 44.0\n assert np.allclose(v.mag(), 6.928203230275509)\n assert v.c().reverse().meansqerr(v) == 3.2\n assert v.c().reverse().meansqerr(v, h.Vector((1, 2, 5, 4, 3))) == 8.0\n assert v.c().trigavg(h.Vector((1, 2, 3, 4, 5)), h.Vector((1, 2, 3, 4, 5)), 1, 2) == 2\n\n \"\"\"\n Copying\n \"\"\"\n # vdest.copy(vsrc)\n copy(v, [3.0, 3.0, 2.0, 1.0, 5.0])\n # vdest.copy(vsrc, dest_start)\n copy(v, [0.0, 0.0, 3.0, 3.0, 2.0, 1.0, 5.0], 2)\n # vdest.copy(vsrc, src_start, src_end)\n copy(v, [2.0, 1.0], 2, 3)\n copy(v, [2.0, 1.0, 5.0], 2, -1) # -1 for actual end\n # vdest.copy(vsrc, dest_start, src_start, src_end)\n copy(v, [0.0, 2.0, 1.0], 1, 2, 3)\n # vdest.copy(vsrc, dest_start, src_start, src_end, dest_inc, src_inc)\n copy(v, [0.0, 3.0, 1.0], 1, 1, -1, 1, 2)\n # vdest.copy(vsrc, vsrcdestindex)\n copy(v, [0.0, 3.0, 2.0], h.Vector((1, 2)), dest=h.Vector(3, 0.))\n # vdest.copy(vsrc, vsrcindex, vdestindex)copy(v, [3.0, 3.0, 2.0, 1.0, 5.0])\n copy(v, [3.0, 2.0, 0.0], h.Vector((1, 2)), h.Vector((0, 1)), dest=h.Vector(3, 0.))\n copy(v, [3.0], h.Vector((1, 2)), h.Vector((0, 1)), dest=h.Vector(1, 0.))\n assert v.c().to_python() == v.to_python()\n assert v.c(1).to_python() == [3.0, 2.0, 1.0, 5.0]\n assert v.c(1, 3).to_python() == [3.0, 2.0, 1.0]\n assert v.at().to_python() == v.to_python()\n assert v.at(1).to_python() == [3.0, 2.0, 1.0, 5.0]\n assert v.at(1, 3).to_python() == [3.0, 2.0, 1.0]\n\n \"\"\"\n Data morphing and operations\n \"\"\"\n assert v.resize(4).size() == 4\n assert v.fill(1.).to_python() == [1.0, 1.0, 1.0, 1.0]\n assert v.fill(1.1, 1, 2).to_python() == [1.0, 1.1, 1.1, 1.0]\n # obj = vsrcdest.indgen()\n assert v.indgen().to_python() == [0.0, 1.0, 2.0, 3.0]\n # obj = vsrcdest.indgen(stepsize)\n assert v.indgen(2).to_python() == [0.0, 2.0, 4.0, 6.0]\n # obj = vsrcdest.indgen(start,stepsize)\n assert v.indgen(2, 5).to_python() == [2.0, 7.0, 12.0, 17.0]\n # obj = vsrcdest.indgen(start,stop,stepsize)\n assert v.indgen(1, 20, 5).to_python() == [1.0, 6.0, 11.0, 16.0]\n assert v.append(h.Vector(1, 17.), 18., 19.).to_python() == [1.0, 6.0, 11.0, 16.0, 17.0, 18.0, 19.0]\n assert v.insrt(1, 3.0).to_python() == [1.0, 3.0, 6.0, 11.0, 16.0, 17.0, 18.0, 19.0]\n assert v.insrt(3, h.Vector(1, 9.0)).to_python() == [1.0, 3.0, 6.0, 9.0, 11.0, 16.0, 17.0, 18.0, 19.0]\n assert v.remove(4).to_python() == [1.0, 3.0, 6.0, 9.0, 16.0, 17.0, 18.0, 19.0]\n assert v.remove(1, 5).to_python() == [1.0, 18.0, 19.0]\n h('double px[3]')\n h.px[0] = 5\n h.px[2] = 2\n assert v.from_double(3, h._ref_px[0]).to_python() == [5.0, 0.0, 2.0]\n a = np.array([5, 1, 6], 'd')\n assert v.from_double(3, npyref(a, 0)).to_python() == [5.0, 1.0, 6.0]\n v.indgen(1, 30, 5)\n assert v.to_python() == [1.0, 6.0, 11.0, 16.0, 21.0, 26.0]\n assert v.contains(6.0)\n assert not v.contains(7.0)\n assert h.Vector().where(v, \">=\", 10).to_python() == [11.0, 16.0, 21.0, 26.0]\n assert h.Vector().where(v, \"<=\", 11).to_python() == [1.0, 6.0, 11.0]\n assert h.Vector().where(v, \"!=\", 11).to_python() == [1.0, 6.0, 16.0, 21.0, 26.0]\n assert h.Vector().where(v, \"==\", 11).to_python() == [11.0]\n assert h.Vector().where(v, \"<\", 11).to_python() == [1.0, 6.0]\n assert h.Vector().where(v, \">\", 11).to_python() == [16.0, 21.0, 26.0]\n assert h.Vector().where(v, \"[)\", 9, 21).to_python() == [11.0, 16.0]\n assert h.Vector().where(v, \"[]\", 9, 21).to_python() == [11.0, 16.0, 21.0]\n assert h.Vector().where(v, \"(]\", 11, 21).to_python() == [16.0, 21.0]\n assert h.Vector().where(v, \"()\", 11, 21).to_python() == [16.0]\n assert v.where(\">\", 1.0).to_python() == [6.0, 11.0, 16.0, 21.0, 26.0]\n assert v.where(\"[)\", 6.0, 26.0).to_python() == [6.0, 11.0, 16.0, 21.0]\n assert v.indwhere(\">\", 11.0) == 2\n assert v.indwhere(\"<\", 11.0) == 0\n assert v.indwhere(\"!=\", 11.0) == 0\n assert v.indwhere(\">=\", 11.0) == 1\n assert v.indwhere(\"<=\", 11.0) == 0\n assert v.indwhere(\"[)\", 11.1, 16.0) == -1\n assert v.indwhere(\"[)\", 11.0, 16.0) == 1\n assert v.indwhere(\"(]\", 11.0, 16.0) == 2\n assert v.indwhere(\"[]\", 11.0, 16.0) == 1\n assert v.indwhere(\"()\", 16.0, 11.0) == -1\n assert h.Vector().indvwhere(v, \"()\", 11, 21).to_python() == [2.0]\n assert h.Vector().indvwhere(v, \"==\", 11).to_python() == [1.0]\n assert h.Vector().indvwhere(v, \"[]\", 1, 17).to_python() == [0.0, 1.0, 2.0]\n assert h.Vector().indvwhere(v, \"(]\", 1, 16).to_python() == [0.0, 1.0, 2.0]\n assert h.Vector().indvwhere(v, \"[)\", 1, 16).to_python() == [0.0, 1.0]\n assert h.Vector().indvwhere(v, \"!=\", 11).to_python() == [0.0, 2.0, 3.0]\n assert h.Vector().indvwhere(v, \"<\", 11).to_python() == [0.0]\n assert h.Vector().indvwhere(v, \"<=\", 11).to_python() == [0.0, 1.0]\n assert h.Vector().indvwhere(v, \">\", 16).to_python() == [3.0]\n assert h.Vector().indvwhere(v, \">=\", 16).to_python() == [2.0, 3.0]\n\n assert v.histogram(1.0, 20.0, 10).to_python() == [0.0, 1.0, 2.0]\n assert h.Vector().hist(v, 1.0, 2.0, 10).to_python() == [1.0, 2.0]\n assert v.ind(h.Vector((1, 3))).to_python() == [11.0, 21.0]\n assert h.Vector().spikebin(v.c(), 12.0).to_python() == [0.0, 0.0, 1.0, 0.0]\n\n \"\"\"\n Vector metadata\n \"\"\"\n assert v.label() == ''\n v.label('v')\n assert v.label() == 'v'\n assert v.cl().label() == 'v'\n v.label('v2')\n assert v.label() == 'v2'\n\n \"\"\"\n Transformations\n \"\"\"\n v = h.Vector((3, 2, 15, 16))\n assert np.allclose(v.c().apply(\"sin\").to_python(), [0.1411200080598672, 0.9092974268256817, 0.6502878401571169, -0.2879033166650653])\n assert np.allclose(v.c().apply(\"sin\", 1, 2).to_python(), [3.0, 0.9092974268256817, 0.6502878401571169, 16.0])\n h(\"func sq(){return $1*$1}\")\n assert np.allclose(v.c().apply(\"sq\").to_python(), [9.0, 4.0, 225.0, 256.0])\n assert v.reduce(\"sq\", 100) == 594.0\n assert h.Vector().deriv(v, 0.1).to_python() == [-10.0, 60.0, 70.0, 10.0]\n assert h.Vector().deriv(v, 1, 1).to_python() == [-1.0, 13.0, 1.0]\n assert h.Vector().deriv(v, 1, 2).to_python() == [-1.0, 6.0, 7.0, 1.0]\n assert np.allclose(v.c().interpolate(v.c(), v.c().apply(\"sqrt\")).to_python(), [10.384365150689874, 5.097168242109362, 16.0, 16.0])\n assert np.allclose(v.c().interpolate(v.c(), v.c().apply(\"sqrt\"), h.Vector((1, 2, 3, 4))).to_python(), [2.644951165437683, 2.2382437109314894, 4.0, 4.0])\n assert h.Vector().integral(v).to_python() == [3.0, 5.0, 20.0, 36.0]\n assert np.allclose(h.Vector().integral(v, 0.1).to_python(), [3.0, 3.2, 4.7, 6.300000000000001])\n assert v.c().medfltr().to_python() == [3.0, 3.0, 3.0, 3.0]\n assert v.c().medfltr(h.Vector((1, 2, 3, 4))).to_python() == [2.0, 2.0, 2.0, 2.0]\n assert v.c().sort().to_python() == [2.0, 3.0, 15.0, 16.0]\n assert v.sortindex().to_python() == [1.0, 0.0, 2.0, 3.0]\n assert v.c().reverse().to_python() == [16.0, 15.0, 2.0, 3.0]\n assert v.c().rotate(3).to_python() == [2.0, 15.0, 16.0, 3.0]\n assert v.c().rotate(3, 0).to_python() == [0.0, 0.0, 0.0, 3.0]\n assert h.Vector().rebin(v, 2).to_python() == [5.0, 31.0]\n assert v.c().rebin(2).to_python() == [5.0, 31.0]\n assert h.Vector().pow(v, 2).to_python() == [9.0, 4.0, 225.0, 256.0]\n assert np.allclose(v.c().pow(v, 0).to_python(), [1.0, 1.0, 1.0, 1.0])\n assert np.allclose(v.c().pow(v, 0.5).to_python(), [1.7320508075688772, 1.4142135623730951, 3.872983346207417, 4.0])\n assert np.allclose(v.c().pow(v, -1).to_python(), [0.3333333333333333, 0.5, 0.06666666666666667, 0.0625])\n assert np.allclose(v.c().pow(v, 1).to_python(), [3.0, 2.0, 15.0, 16.0])\n assert np.allclose(v.c().pow(v, 3).to_python(), [27.0, 8.0, 3375.0, 4096.0])\n assert v.c().pow(2).to_python() == [9.0, 4.0, 225.0, 256.0]\n assert np.allclose(h.Vector().sqrt(v).to_python(), [1.7320508075688772, 1.4142135623730951, 3.872983346207417, 4.0])\n assert np.allclose(v.c().sqrt().to_python(), [1.7320508075688772, 1.4142135623730951, 3.872983346207417, 4.0])\n assert np.allclose(h.Vector().log(v).to_python(), [1.0986122886681098, 0.6931471805599453, 2.70805020110221, 2.772588722239781])\n assert np.allclose(v.c().log().to_python(), [1.0986122886681098, 0.6931471805599453, 2.70805020110221, 2.772588722239781])\n assert np.allclose(h.Vector().log10(v).to_python(), [0.47712125471966244, 0.3010299956639812, 1.1760912590556813, 1.2041199826559248])\n assert np.allclose(v.c().log10().to_python(), [0.47712125471966244, 0.3010299956639812, 1.1760912590556813, 1.2041199826559248])\n assert np.allclose(h.Vector().tanh(v).to_python() , [0.9950547536867305, 0.9640275800758169, 0.9999999999998128, 0.9999999999999747])\n assert np.allclose(v.c().tanh().to_python(), [0.9950547536867305, 0.9640275800758169, 0.9999999999998128, 0.9999999999999747])\n assert h.Vector([1.2312414, 3.1231, 5.49554, 6.5000000001]).floor().to_python() == [1.0, 3.0, 5.0, 6.0]\n assert h.Vector([-1.0, -3.0, -5.0, -6.0]).abs().to_python() == [1.0, 3.0, 5.0, 6.0]\n assert v.c().add(h.Vector((1.1, 2.2, 3.3, 4.4))).to_python() == [4.1, 4.2, 18.3, 20.4]\n assert v.c().add(1.3).to_python() == [4.3, 3.3, 16.3, 17.3]\n assert v.c().sub(h.Vector((1.1, 2, 3.3, 4))).to_python() == [1.9, 0.0, 11.7, 12.0]\n assert v.c().sub(1.3).to_python() == [1.7, 0.7, 13.7, 14.7]\n assert v.c().mul(h.Vector((1.5, 2, 3, 4))).to_python() == [4.5, 4.0, 45.0, 64.0]\n assert v.c().mul(2.5).to_python() == [7.5, 5.0, 37.5, 40.0]\n assert v.c().div(h.Vector((1.5, 2, 3, 4))).to_python() == [2.0, 1.0, 5.0, 4.0]\n assert v.c().div(2.5).to_python() == [1.2, 0.8, 6.0, 6.4]\n vs = v.c()\n assert np.allclose(vs.scale(2, 5), 0.21428571428571427)\n assert np.allclose(vs.to_python(), [2.2142857142857144, 2.0, 4.785714285714286, 5.0])\n assert np.allclose(v.c().sin(1, 1).to_python(), [0.8414709848078965, 0.844849172063764, 0.8481940061209319, 0.8515053549310787])\n assert np.allclose(v.c().sin(1, 1, 2).to_python(), [0.8414709848078965, 0.8481940061209319, 0.8547830877678237, 0.8612371892561972])\n\n \"\"\"\n Fourier\n \"\"\"\n assert v.to_python() == [3.0, 2.0, 15.0, 16.0]\n assert h.Vector(v.size()).correl(v).to_python() == [494.0, 324.0, 154.0, 324.0]\n assert h.Vector(v.size()).convlv(v, v.c().reverse()).to_python() == [294.0, 122.0, 318.0, 490.0]\n assert np.allclose(h.Vector(v.size()).convlv(v, v.c().reverse(), -1).to_python(), [305.9999866504336, 306.0, 306.0000133495664, 306.0])\n assert v.c().spctrm(v).to_python() == [60.625, 2.0, 15.0, 16.0]\n assert h.Vector(v.size()).filter(v, v.c().reverse()).to_python() == [308.0, -66.0, 376.0, 750.0]\n assert h.Vector(v.size()).fft(v, -1).to_python() == [17.5, 16.5, -12.5, -15.5]\n assert v.c().fft(-1).to_python() == h.Vector(v.size()).fft(v, -1).to_python()\n \n \"\"\"\n I/O\n \"\"\"\n assert v.to_python() == [3.0, 2.0, 15.0, 16.0]\n f = h.File()\n f.wopen(\"temp.tmp\")\n v.vwrite(f)\n f.close()\n assert v.to_python() == [3.0, 2.0, 15.0, 16.0]\n\n vr = h.Vector()\n f.ropen(\"temp.tmp\")\n vr.vread(f)\n assert vr.to_python() == v.to_python()\n f.close()\n f.unlink()\n\n f.wopen(\"temp.tmp\")\n f.printf(\"%d %d %d %d\\n\", 3, 2, 15, 16)\n f.close()\n f.ropen(\"temp.tmp\")\n\n vr.resize(0)\n vr.scanf(f)\n assert vr.to_python() == v.to_python()\n f.seek(0)\n vr2 = h.Vector(4)\n vr2.scanf(f)\n assert vr.to_python() == vr2.to_python()\n f.seek(0)\n vr.resize(0)\n vr.scanf(f, 1)\n assert vr.to_python() == [3.0]\n vr.scanf(f, 1)\n assert vr.to_python() == [2.0]\n vr.resize(0)\n f.seek(0)\n vr.scantil(f, 15.0)\n assert vr.to_python() == [3.0, 2.0]\n f.close()\n f.unlink()\n\n # Columns\n f.wopen(\"col.tmp\")\n f.printf(\"%d %d %d %d\\n\", 3, 2, 15, 16)\n f.printf(\"%d %d %d %d\\n\", 6, 9, 7, 21)\n f.printf(\"%d %d %d %d\\n\", 1, 4, 5, 22)\n f.printf(\"%d %d %d %d\\n\", 3, 8, 14, 23)\n f.close()\n f.ropen(\"col.tmp\")\n vc = h.Vector()\n vc.scanf(f, 3, 2, 4)\n assert vc.to_python() == [2.0, 9.0, 4.0]\n vc.scanf(f, 3, 2, 4)\n assert vc.to_python() == [8.0]\n f.close()\n f.ropen(\"col.tmp\")\n vc = h.Vector()\n vc.scanf(f, 3, 4)\n assert vc.to_python() == [15.0, 7.0, 5.0, 14.0]\n f.seek(0)\n vc.scantil(f, 5.0, 3, 4)\n assert vc.to_python() == [15.0, 7.0]\n vc.printf() # code cov\n vc.printf(\"%8.4f \")\n vc.printf(\"%8.4f \", 0, 1)\n f.close()\n f.unlink()\n\n # Vwrite types\n vwrite_type(h.Vector([1, 2, 3, 4]), 1)\n vwrite_type(h.Vector([4, 3, 2, 1]), 2)\n vwrite_type(h.Vector([4, 5, 6, 7]), 3)\n vwrite_type(h.Vector([7, 8, 9, 10]), 4)\n vwrite_type(h.Vector([0, 1, 2, 33]), 5)\n\n\n \"\"\"\n Random \n \"\"\"\n vrand = h.Vector((1, 2, 3))\n r = h.Random()\n r.poisson(12)\n assert vrand.cl().setrand(r).to_python() == [10.0, 16.0, 11.0]\n assert vrand.cl().setrand(r, 1, 2).to_python() == [1.0, 9.0, 18.0]\n assert vrand.cl().addrand(r).to_python() == [9.0, 9.0, 16.0]\n assert vrand.cl().addrand(r, 0, 1).to_python() == [13.0, 16.0, 3.0]\n\n \"\"\"\n Misc \n \"\"\"\n assert h.Vector().inf(h.Vector((3, 2, 4)), 2, 3, 4, 5, 6, 7).to_python() == [4.0, 5.2, 4.56]\n assert h.Vector().resample(h.Vector((3, 2, 4, 6, 7)), 2).to_python() == [3.0, 3.0, 2.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0, 7.0]\n assert h.Vector().psth(h.Vector((3, 2, 4, 6, 7, 6, 7, 8)), 1, 2, 3).to_python() == [1500.0, 1500.0, 2000.0, 3000.0, 3500.0, 3000.0, 3500.0, 4000.0]\n\n h(\"func fun () { return ($1 - 0.5) * 2 + ($2 - 0.5) * 2 }\")\n dvec = h.Vector(2)\n fvec = h.Vector(2)\n fvec.fill(1)\n ivec = h.Vector(2)\n ivec.indgen()\n a = h.ref(2)\n b = h.ref(1)\n error = dvec.fit(fvec, \"fun\", ivec, a, b)\n assert np.allclose([error], [1.0005759999999997])\n assert np.allclose(fvec.to_python(), [-0.976, 1.024])\n assert ivec.to_python() == [0.0, 1.0]\n assert dvec.to_python() == [0.0, 0.0]\n aftau = np.array([5, 1, 6, 8], 'd')\n error = dvec.fit(fvec, \"exp2\", ivec, npyref(aftau, 0), npyref(aftau, 1), npyref(aftau, 2), npyref(aftau, 3))\n assert np.allclose([error], [8.442756842706686])\n error = dvec.fit(fvec, \"exp1\", ivec, npyref(aftau, 0), npyref(aftau, 1), npyref(aftau, 2), npyref(aftau, 3))\n assert np.allclose([error], [2.5653639385348724e-06])\n error = dvec.fit(fvec, \"charging\", ivec, npyref(aftau, 0), npyref(aftau, 1), npyref(aftau, 2), npyref(aftau, 3))\n assert np.allclose([error], [7.288763182752445e-08])\n error = dvec.fit(fvec, \"quad\", ivec, npyref(aftau, 0), npyref(aftau, 1), npyref(aftau, 2))\n assert np.allclose([error], [0.0010239221022848835])\n error = dvec.fit(fvec, \"line\", ivec, npyref(aftau, 0), npyref(aftau, 1))\n assert np.allclose([error], [6.593786238758728e-06])\n" ]
[ [ "numpy.allclose", "numpy.array" ] ]
sommoMicc/FairMOT
[ "fb6ef23008a903c77502d25deb7fc63adae95f82" ]
[ "src/track.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport os.path as osp\nimport cv2\nimport logging\nimport argparse\nimport motmetrics as mm\nimport numpy as np\nimport torch\n\nfrom tracker.multitracker import JDETracker\nfrom tracking_utils import visualization as vis\nfrom tracking_utils.log import logger\nfrom tracking_utils.timer import Timer\nfrom tracking_utils.evaluation import Evaluator\nimport datasets.dataset.jde as datasets\n\nfrom tracking_utils.utils import mkdir_if_missing\nfrom opts import opts\n\n\ndef write_results(filename, results, data_type):\n \n \n #data type rappresenta il tipo di dato che viene passato in input che può essere di tipo MOT o di tipo Kitti\n #a seconda del formato in cui viene salvato produce una variabile save_format che contiene un determinato formato\n #kitti si riferisce ad una piattaforma di test che valuta gli algoritmi di machine vision soprattutto per la guida automatizzata\n #l'else rappresenta la situazione di errore nel caso il tipo di dato non è MOT o Kitti\n \n if data_type == 'mot':\n save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\\n'\n elif data_type == 'kitti':\n save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\\n'\n else:\n raise ValueError(data_type)\n \n #apre un file in scrittura e lo chiama f\n \n\n with open(filename, 'w') as f:\n for frame_id, tlwhs, track_ids in results:\n if data_type == 'kitti':\n frame_id -= 1\n for tlwh, track_id in zip(tlwhs, track_ids):\n if track_id < 0:\n continue\n x1, y1, w, h = tlwh\n x2, y2 = x1 + w, y1 + h\n line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)\n f.write(line)\n logger.info('save results to {}'.format(filename))\n\n\ndef eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):\n #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea\n if save_dir:\n mkdir_if_missing(save_dir)\n \n #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init)\n #1) Verifica se il programma va eseguito con CPU o GPU\n #2) Crea il modello e lo valuta\n #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti\n tracker = JDETracker(opt, frame_rate=frame_rate)\n #viene inizializzato il timer per monitorare il tempo di elaborazione\n timer = Timer()\n #inizializzazione array dei risultati\n results = []\n #identificatore del frame\n frame_id = 0\n #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main\n for path, img, img0 in dataloader:\n #visualizza il frame rate dopo 20 frame elaborati\n if frame_id % 20 == 0:\n logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))\n\n # run tracking\n timer.tic()\n \n #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU\n #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img\n \n \n blob = torch.from_numpy(img).cuda().unsqueeze(0)\n \n #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py)\n #1) Vengono passati come parametri gli elementi blob e img0\n #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile\n #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0\n #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0\n #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet\n #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti\n #7) Seconda associazione con IoU\n #8) Inizializza nuovi Stracks\n #9) Aggiorna lo stato\n #10) Ritorna nella variabile il valore degli stracks attivi \n online_targets = tracker.update(blob, img0)\n online_tlwhs = []\n online_ids = []\n #vengono iterati i vari stracks\n for t in online_targets:\n \n tlwh = t.tlwh\n tid = t.track_id\n vertical = tlwh[2] / tlwh[3] > 1.6\n if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:\n online_tlwhs.append(tlwh)\n online_ids.append(tid)\n timer.toc()\n # save results\n results.append((frame_id + 1, online_tlwhs, online_ids))\n if show_image or save_dir is not None:\n online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,\n fps=1. / timer.average_time)\n if show_image:\n cv2.imshow('online_im', online_im)\n if save_dir is not None:\n cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)\n frame_id += 1\n # save results\n write_results(result_filename, results, data_type)\n return frame_id, timer.average_time, timer.calls\n\n\ndef main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',\n save_images=False, save_videos=False, show_image=True):\n logger.setLevel(logging.INFO)\n result_root = os.path.join(data_root, '..', 'results', exp_name)\n mkdir_if_missing(result_root)\n data_type = 'mot'\n\n # run tracking\n accs = []\n n_frame = 0\n timer_avgs, timer_calls = [], []\n \n #scorre tutti gli elementi della sequenza di frame previsti dal MOT passato come parametro\n for seq in seqs:\n #stabilisce la cartella in cui dovrà salvare gli output elaborati\n output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None\n #messaggio che viene restituito all'utente per aggiornarlo sull'andamento dell'esecuzione\n logger.info('start seq: {}'.format(seq))\n #carica l'immagine contenuta all'interno del dataset passato come parametro da seq\n dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)\n #Il file testuale con il risultato dell'elaborazione è salvato in result_filename\n result_filename = os.path.join(result_root, '{}.txt'.format(seq))\n #legge un file per seguire le indicazioni\n meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()\n #qui avviene il calcolo del frame rate, per comprendere quanti frame fa nell'unità di tempo\n frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\\nseqLength')])\n #esecuzione di eval_seq\n nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,\n save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)\n n_frame += nf\n timer_avgs.append(ta)\n timer_calls.append(tc)\n\n # eval\n logger.info('Evaluate seq: {}'.format(seq))\n evaluator = Evaluator(data_root, seq, data_type)\n accs.append(evaluator.eval_file(result_filename))\n if save_videos:\n output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))\n cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)\n os.system(cmd_str)\n timer_avgs = np.asarray(timer_avgs)\n timer_calls = np.asarray(timer_calls)\n all_time = np.dot(timer_avgs, timer_calls)\n avg_time = all_time / np.sum(timer_calls)\n logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))\n\n # get summary\n metrics = mm.metrics.motchallenge_metrics\n mh = mm.metrics.create()\n summary = Evaluator.get_summary(accs, seqs, metrics)\n strsummary = mm.io.render_summary(\n summary,\n formatters=mh.formatters,\n namemap=mm.io.motchallenge_metric_names\n )\n print(strsummary)\n Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n opt = opts().init()\n\n if not opt.val_mot16:\n seqs_str = '''KITTI-13\n KITTI-17\n ADL-Rundle-6\n PETS09-S2L1\n TUD-Campus\n TUD-Stadtmitte'''\n data_root = os.path.join(opt.data_dir, 'MOT15/images/train')\n else:\n seqs_str = '''MOT16-02\n MOT16-04\n MOT16-05\n MOT16-09\n MOT16-10\n MOT16-11\n MOT16-13'''\n data_root = os.path.join(opt.data_dir, 'MOT16/train')\n\n if opt.test_mot16:\n seqs_str = '''MOT16-01\n MOT16-03\n MOT16-06\n MOT16-07\n MOT16-08\n MOT16-12\n MOT16-14'''\n data_root = os.path.join(opt.data_dir, 'MOT16/test')\n if opt.test_mot15:\n seqs_str = '''ADL-Rundle-1\n ADL-Rundle-3\n AVG-TownCentre\n ETH-Crossing\n ETH-Jelmoli\n ETH-Linthescher\n KITTI-16\n KITTI-19\n PETS09-S2L2\n TUD-Crossing\n Venice-1'''\n data_root = os.path.join(opt.data_dir, 'MOT15/images/test')\n if opt.test_mot17:\n seqs_str = '''MOT17-01-SDP\n MOT17-03-SDP\n MOT17-06-SDP\n MOT17-07-SDP\n MOT17-08-SDP\n MOT17-12-SDP\n MOT17-14-SDP'''\n data_root = os.path.join(opt.data_dir, 'MOT17/images/test')\n if opt.val_mot17:\n seqs_str = '''MOT17-02-SDP\n MOT17-04-SDP\n MOT17-05-SDP\n MOT17-09-SDP\n MOT17-10-SDP\n MOT17-11-SDP\n MOT17-13-SDP'''\n data_root = os.path.join(opt.data_dir, 'MOT17/images/train')\n if opt.val_mot15:\n seqs_str = '''KITTI-13\n KITTI-17\n ETH-Bahnhof\n ETH-Sunnyday\n PETS09-S2L1\n TUD-Campus\n TUD-Stadtmitte\n ADL-Rundle-6\n ADL-Rundle-8\n ETH-Pedcross2\n TUD-Stadtmitte'''\n data_root = os.path.join(opt.data_dir, 'MOT15/images/train')\n if opt.val_mot20:\n seqs_str = '''MOT20-01\n MOT20-02\n MOT20-03\n MOT20-05\n '''\n data_root = os.path.join(opt.data_dir, 'MOT20/images/train')\n if opt.test_mot20:\n seqs_str = '''MOT20-04\n MOT20-06\n MOT20-07\n MOT20-08\n '''\n data_root = os.path.join(opt.data_dir, 'MOT20/images/test')\n seqs = [seq.strip() for seq in seqs_str.split()]\n\n main(opt,\n data_root=data_root,\n seqs=seqs,\n exp_name='MOT15_val_all_dla34',\n show_image=False,\n save_images=False,\n save_videos=False)\n" ]
[ [ "numpy.sum", "numpy.dot", "numpy.asarray", "torch.from_numpy" ] ]
liamirpy/Covid19-face-mask-detection-with-openCV-deep-learning-and-python
[ "62357eb13bee05fd600a87b0f30465f7b9f51773" ]
[ "model.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nnp.random.seed(1000)\nimport cv2\nimport os\nfrom PIL import Image\nimport keras\nos.environ['KERAS_BACKEND']='tensorflow'\nimage_directory='/content/drive/My Drive/Covid_19/'\nSIZE=256\ndataset=[]\nlabel=[]\nn=0\no=0\n########READ DATA\nmask_images=os.listdir(image_directory+'with_mask/')\nfor i,image_name in enumerate(mask_images):\n if(image_name.split('.')[1]=='png' or image_name.split('.')[1]=='jpeg' or image_name.split('.')[1]=='jpg'):\n image=cv2.imread(image_directory+'with_mask/'+image_name)\n image=Image.fromarray(image,'RGB')\n image=image.resize((SIZE,SIZE))\n n=n+1\n dataset.append(np.array(image))\n label.append(1)\n\nnomask_images=os.listdir(image_directory+'without_mask/')\nfor i,image_name in enumerate(nomask_images):\n if(image_name.split('.')[1]=='png' or image_name.split('.')[1]=='jpeg' or image_name.split('.')[1]=='jpg'):\n image=cv2.imread(image_directory+'without_mask/'+image_name)\n image=Image.fromarray(image,'RGB')\n image=image.resize((SIZE,SIZE))\n o=o+1\n dataset.append(np.array(image))\n label.append(0)\nprint(n)\nprint(o)\n#####MODEL\nINPUT_SHAPE=(SIZE,SIZE,3)\ninp=keras.layers.Input(shape=INPUT_SHAPE)\nconv1=keras.layers.Conv2D(64,kernel_size=(3,3),\n activation='relu',padding='same')(inp)\npool1=keras.layers.MaxPooling2D(pool_size=(2,2))(conv1)\nnorm1=keras.layers.BatchNormalization(axis=-1)(pool1)\ndrop1=keras.layers.Dropout(rate=0.2)(norm1)\n\nconv2=keras.layers.Conv2D(32,kernel_size=(3,3),\n activation='relu',padding='same')(drop1)\npool2=keras.layers.MaxPooling2D(pool_size=(2,2))(conv2)\nnorm2=keras.layers.BatchNormalization(axis=-1)(pool2)\ndrop2=keras.layers.Dropout(rate=0.2)(norm2)\n\nflat=keras.layers.Flatten()(drop2)\n\nhidden1=keras.layers.Dense(128,activation='relu')(flat)\nnorm3=keras.layers.BatchNormalization(axis=-1)(hidden1)\ndrop3=keras.layers.Dropout(rate=0.2)(norm3)\n\nhidden2=keras.layers.Dense(50,activation='relu')(drop3)\nnorm4=keras.layers.BatchNormalization(axis=-1)(hidden2)\ndrop4=keras.layers.Dropout(rate=0.2)(norm4)\n\nout=keras.layers.Dense(2,activation='softmax')(drop4)\n\nmodel=keras.Model(input=inp,outputs=out)\nmodel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\nprint(model.summary())\n##################\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\nX_train,X_test,y_train,y_test=train_test_split(dataset,to_categorical(np.array(label)),test_size=0.20,random_state=0)\n\n\nhistory=model.fit(np.array(X_train),y_train,batch_size=32,verbose=1,epochs=15,validation_split=0.1)\n\nprint(\"Test_Acuracy:{:.2f}%\".format(model.evaluate(np.array(X_test),np.array(y_test))[1]*100))\n\nmodel.save('mask_nomask.h5')\n\n" ]
[ [ "numpy.random.seed", "numpy.array" ] ]
SamuelMarks/model-remediation
[ "8ab480785ee1446d8d67ea62f788b7feac610aec" ]
[ "tools/tutorials_utils/min_diff_keras_utils_test.py" ]
[ "# coding=utf-8\n# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for min_diff_keras_util functions.\"\"\"\n\nimport csv\nimport os\nimport tempfile\nimport unittest.mock as mock\n\nimport tensorflow as tf\nfrom tensorflow_model_remediation.tools.tutorials_utils import min_diff_keras_utils\n\n\nclass UtilTest(tf.test.TestCase):\n\n def _create_example_csv(self, use_fake_embedding=False):\n header = [\n 'comment_text',\n 'toxicity',\n 'heterosexual',\n 'homosexual_gay_or_lesbian',\n 'bisexual',\n 'other_sexual_orientation',\n 'male',\n 'female',\n 'transgender',\n 'other_gender',\n 'christian',\n 'jewish',\n 'muslim',\n 'hindu',\n 'buddhist',\n 'atheist',\n 'other_religion',\n 'black',\n 'white',\n 'asian',\n 'latino',\n 'other_race_or_ethnicity',\n 'physical_disability',\n 'intellectual_or_learning_disability',\n 'psychiatric_or_mental_illness',\n 'other_disability',\n ]\n example = [\n 'comment 1' if not use_fake_embedding else 0.35,\n 0.1,\n # sexual orientation\n 0.1,\n 0.1,\n 0.5,\n 0.1,\n # gender\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n # religion\n 0.0,\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n # race or ethnicity\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n # disability\n 0.6,\n 0.7,\n 0.8,\n 1.0,\n ]\n empty_comment_example = [\n '' if not use_fake_embedding else 0.35,\n 0.1,\n 0.1,\n 0.1,\n 0.5,\n 0.1,\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.0,\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n 0.7,\n 0.8,\n 1.0,\n ]\n return [header, example, empty_comment_example]\n\n def _write_csv(self, examples):\n filename = os.path.join(tempfile.mkdtemp(), 'input.csv')\n with open(filename, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n for example in examples:\n csvwriter.writerow(example)\n\n return filename\n\n @mock.patch(\n 'tensorflow_model_remediation.tools.tutorials_utils.min_diff_keras_utils._create_embedding_layer',\n autospec=True)\n @mock.patch('tensorflow.keras.utils.get_file', autospec=True)\n def test_download_and_process_civil_comments_data_and_create_model(\n self, mock_get_file, mock__create_embedding_layer):\n\n # First test download_and_process_civil_comments_data. Mock out the\n # download.\n filename = self._write_csv(\n self._create_example_csv(use_fake_embedding=True))\n mock_get_file.return_value = filename\n data_train, _, _, labels_train, _ = min_diff_keras_utils.download_and_process_civil_comments_data(\n )\n\n self.assertEqual(mock_get_file.call_count, 3)\n\n # Undo the string interpretation of the text_feature, since we are mocking\n # out the embedding layer in the following model testing.\n data_train[min_diff_keras_utils.TEXT_FEATURE] = data_train[\n min_diff_keras_utils.TEXT_FEATURE].astype(float)\n\n # Now use that data to test create_keras_sequential_model.\n mock__create_embedding_layer.return_value = tf.keras.layers.Dense(units=128)\n\n model = min_diff_keras_utils.create_keras_sequential_model(hub_url='')\n\n # Sanity check that you have a valid model by training it and predicting.\n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n metrics = ['accuracy']\n model.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n model.fit(\n x=data_train['comment_text'], y=labels_train, batch_size=1, epochs=1)\n result = model.predict([0.1])\n self.assertTrue(result[0][0] < 1 and result[0][0] > 0)\n\n def test_get_eval_results(self):\n # TODO: Add testing.\n pass\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.keras.optimizers.Adam", "tensorflow.test.main", "tensorflow.keras.layers.Dense" ] ]
mbtaPredict/Main
[ "e1c3320ff08b61355ac96f51be9e20c57372f13b" ]
[ "Graph_Code/hubway2011_graph.py" ]
[ "\"\"\"Part of Hubway Prediction project by Shane Kelly, William Lu, and\nKevin Crispie, Olin College of Engineering\n\"\"\"\n\nimport csv\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom pylab import *\nimport numpy as np\nfrom numpy import fft\n\n\ndef get_file(file_path):\n\t\"\"\" reads a csv file and returns a file file_id\n\t\"\"\"\n\n\tf = open(file_path, 'r')\n\tbike_reader = csv.reader(f)\n\n\treturn bike_reader\n\ndef get_month_days(file_id):\n\t\"\"\"takes a file id and returns month_days, a list of all the individual \n\tHubway trips in a month in a list of lists. Each nested list has all the \n\tdates for each trip in the given month_days\n\t\"\"\"\n\n\tstandard_datetime = []\n\tfor line in file_id:\n\t\tstandard_datetime.append(line[4]) #append the contents of line 4 (trip start date)\n\n\tstandard_datetime = standard_datetime [1::]\t#removes header text from list\n\n\tmonth = []\n\tday = []\n\tyear = []\n\n\t#creates lists of trip months, days, and years, multiplicty is number of trips during \n\t#that time period\n\n\tfor i in range(len(standard_datetime)):\n\t\tonly_date = standard_datetime[i].split(' ')\n\t\tonly_date_string = only_date[0]\n\t\tsplit_date_string = only_date_string.split('/')\n\t\tmonth.append(split_date_string[0])\n\t\tday.append(split_date_string[1])\n\t\tyear.append(split_date_string[2])\n\n\t#separates a large list of days into corresponding month\n\tjuly = day[0:month.index('8')]\n\taugust = day[month.index('8'):month.index('9')]\n\tseptember = day[month.index('9'):month.index('10')]\n\toctober = day[month.index('10'):month.index('11')]\n\tnovember = day[month.index('11')::]\n\n\treturn [july, august, september, october, november]\n\ndef fourierExtrapolation(x, n_predict):\n n = x.size\n n_harm = 4 # number of harmonics in model\n t = np.arange(0, n)\n p = np.polyfit(t, x, 1) # find linear trend in x\n x_notrend = x - p[0] * t # detrended x\n x_freqdom = fft.fft(x_notrend) # detrended x in frequency domain\n f = fft.fftfreq(n) # frequencies\n indexes = range(n)\n # sort indexes by frequency, lower -> higher\n indexes.sort(key = lambda i: np.absolute(f[i]))\n \n t = np.arange(0, n + n_predict)\n restored_sig = np.zeros(t.size)\n for i in indexes[:1 + n_harm * 2]:\n ampli = np.absolute(x_freqdom[i]) / n # amplitude\n phase = np.angle(x_freqdom[i]) # phase\n restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)\n return restored_sig + p[0] * t\n\ndef day_of_week_classifier(data):\n \t#Hubway opened on July 28th, a Thursday, in 2011\n \tthurs_start = 0\n \tfri_start = 1\n \tsat_start = 2\n \tsun_start = 3\n \tmon_start = 4\n \ttues_start = 5\n \twed_start = 6\n\n \tmon = data[mon_start::7]\n \ttues = data[tues_start::7]\n \twed = data[wed_start::7]\n \tthurs = data[thurs_start::7]\n \tfri = data[fri_start::7]\n \tsat = data[sat_start::7]\n \tsun = data[sun_start::7]\n\n \treturn (mon, tues, wed, thurs, fri, sat, sun)\n\ndef sum_daily_totals(daily_totals):\n\tmon_sum = sum(daily_totals[0])\n\ttues_sum = sum(daily_totals[1])\n\twed_sum = sum(daily_totals[2])\n\tthurs_sum = sum(daily_totals[3])\n\tfri_sum = sum(daily_totals[4])\n\tsat_sum = sum(daily_totals[5])\n\tsun_sum = sum(daily_totals[6])\n\n\treturn (mon_sum, tues_sum, wed_sum, thurs_sum, fri_sum, sat_sum, sun_sum)\n\ndef average_daily_totals(daily_totals):\n\tmon_ave = sum(daily_totals[0])/len(daily_totals[0])\n\ttues_ave = sum(daily_totals[1])/len(daily_totals[1])\n\twed_ave = sum(daily_totals[2])/len(daily_totals[2])\n\tthurs_ave = sum(daily_totals[3])/len(daily_totals[3])\n\tfri_ave = sum(daily_totals[4])/len(daily_totals[4])\n\tsat_ave = sum(daily_totals[5])/len(daily_totals[5])\n\tsun_ave = sum(daily_totals[6])/len(daily_totals[6])\n\n\treturn (mon_ave, tues_ave, wed_ave, thurs_ave, fri_ave, sat_ave, sun_ave)\n\ndef get_diff_average(averages):\n\t\"\"\"Returns the difference between each datapoint and the average of the dataset\n\tIt is used to calculate the difference bewteen the daily totals and the \n\taverage totals for each day. It is returned as a ratio.\n\t\"\"\"\n\n\tall_averaged = np.mean(averages)\n\tratio_diffs = []\n\tfor x in range(len(averages)):\n\t\tratio_diffs.append((averages[x] - all_averaged) / all_averaged)\n\t\n\treturn ratio_diffs\n\ndef main():\n\tfile_path = 'HubwayData/2011_hubway_trips.csv'\n\tbike_reader = get_file(file_path)\n\tmonth_days = get_month_days(bike_reader)\n\n\t#list corresping to date of trips for each month\n\tjuly = month_days[0]\n\taugust = month_days[1]\n\tseptember = month_days[2]\n\toctober = month_days[3]\n\tnovember = month_days[4]\n\n\t#counts the number of trips in each month\n\n\tjuly_count = []\n\n\tfor x in range(1,32):\n\t\tjuly_count.append(july.count(str(x)))\n\tjuly_count = july_count[27:]\n\n\taugust_count = []\n\n\tfor x in range(1,32):\n\t\taugust_count.append(august.count(str(x)))\n\n\tseptember_count = []\n\n\tfor x in range(1,32):\n\t\tseptember_count.append(september.count(str(x)))\n\tseptember_count = september_count[:-1]\n\n\toctober_count = []\n\n\tfor x in range(1,32):\n\t\toctober_count.append(october.count(str(x)))\n\n\tnovember_count = []\n\n\tfor x in range(1,32):\n\t\tnovember_count.append(november.count(str(x)))\n\tnovember_count = november_count[:-1]\n\n\t#get a list of number of trips for all months\n\tall_months_count = july_count + august_count + september_count + october_count + november_count\n\n\t#This code plots in 4 different graphs a polynomial regression,\n\t#a bar chart of the total riders on each day of the week,\n\t#the average riders per day of the week, and a bar chart\n\t#of all the Mondays in the year.\n\n\t#polynomial regression\n\tfig1 = plt.figure(1)\n\tyreg = all_months_count\n\txreg = range(len(yreg)) #each day counts up by 1\n\tfit = polyfit(xreg,yreg,4) #regression\n\tfit_fn = poly1d(fit) #generate polynomial from regression function\n\tax1 = fig1.add_subplot(111)\n\tax1.plot(xreg,yreg,'yo', xreg, fit_fn(xreg), '--k') #plot regression\n\n\t#regular line plot\n\t#plt.plot(all_months_count) \n\n\t#Fourier Transform Regression\n\t\"\"\"\n\txfour = np.array(yreg[70:70+21])\n\tn_predict = len(xreg[70:70+21])\n\textrapolation = fourierExtrapolation(xfour, n_predict)\n\tplt.plot(np.arange(0, extrapolation.size), extrapolation, 'r', label = 'extrapolation')\n\tplt.plot(np.arange(0, xfour.size), xfour, 'b', label = 'x', linewidth = 3)\n\tplt.plot(xreg[21:21+21],all_months_count[70+21:70+21+21])\n\tplt.legend()\n\tplt.show()\n\t\"\"\"\n\n\tax1.set_xlabel('Day of Operation')\n\tax1.set_ylabel('Number of Riders')\n\tax1.set_title('Hubway Ridership in 2011')\n\n\tdaily_totals = day_of_week_classifier(all_months_count)\n\tsum_totals = sum_daily_totals(daily_totals)\n\n\tfig2 = plt.figure(2)\n\tax2 = fig2.add_subplot(111)\n\tax2.bar(range(7),sum_totals, 1/1.5, color = \"blue\")\n\tax2.set_xlabel('Day of Week')\n\tax2.set_ylabel('Amount of Riders')\n\tax2.set_title('Total Ridership by Day')\n\n\tave_totals = average_daily_totals(daily_totals)\n\n\tfig3 = plt.figure(3)\n\tax3 = fig3.add_subplot(111)\n\tax3.bar(range(7),ave_totals, 1/1.5, color = \"blue\")\n\tax3.set_xlabel('Day of Week')\n\tax3.set_ylabel('Amount of Riders')\n\tax3.set_title('Average Ridership by Day')\n\n\tfig4 = plt.figure(4)\n\tax4 = fig4.add_subplot(111)\n\tax4.bar(range(len(daily_totals[0])),daily_totals[0], 1/1.5, color = \"blue\")\n\tax4.set_xlabel('Time of Year')\n\tax4.set_ylabel('Amount of Riders')\n\tax4.set_title('Average Ridership for Mondays')\n\t\n\tshow()\n\n\t#determine the ratio of the difference between the daily ridership and\n\t#the average daily ridership and the average ridership\n\n\tratio_diffs = get_diff_average(ave_totals)\n\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.angle", "numpy.zeros", "numpy.mean", "matplotlib.pyplot.figure", "numpy.fft.fft", "numpy.arange", "numpy.polyfit", "numpy.cos", "numpy.absolute", "numpy.fft.fftfreq" ] ]
IgorSechko/pytorch-retinanet
[ "76c080a91eb3f286ca59af7cbfa4889a7f0c0de0" ]
[ "retinanet/dataloader.py" ]
[ "from __future__ import print_function, division\nimport sys\nimport os\nimport torch\nimport numpy as np\nimport random\nimport csv\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom torch.utils.data.sampler import Sampler\n\nfrom pycocotools.coco import COCO\n\nimport skimage.io\nimport skimage.transform\nimport skimage.color\nimport skimage\n\nfrom PIL import Image\n\n\nclass CocoDataset(Dataset):\n \"\"\"Coco dataset.\"\"\"\n\n def __init__(self, root_dir, set_name='train2017', transform=None):\n \"\"\"\n Args:\n root_dir (string): COCO directory.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.root_dir = root_dir\n self.set_name = set_name\n self.transform = transform\n\n self.coco = COCO(os.path.join(self.root_dir, 'annotations', 'instances_' + self.set_name + '.json'))\n self.image_ids = self.coco.getImgIds()\n\n self.load_classes()\n\n def load_classes(self):\n # load class names (name -> label)\n categories = self.coco.loadCats(self.coco.getCatIds())\n categories.sort(key=lambda x: x['id'])\n\n self.classes = {}\n self.coco_labels = {}\n self.coco_labels_inverse = {}\n for c in categories:\n self.coco_labels[len(self.classes)] = c['id']\n self.coco_labels_inverse[c['id']] = len(self.classes)\n self.classes[c['name']] = len(self.classes)\n\n # also load the reverse (label -> name)\n self.labels = {}\n for key, value in self.classes.items():\n self.labels[value] = key\n\n def __len__(self):\n return len(self.image_ids)\n\n def __getitem__(self, idx):\n\n img = self.load_image(idx)\n annot = self.load_annotations(idx)\n sample = {'img': img, 'annot': annot}\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n def load_image(self, image_index):\n image_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n path = os.path.join(self.root_dir, 'images', self.set_name, image_info['file_name'])\n img = skimage.io.imread(path)\n\n if len(img.shape) == 2:\n img = skimage.color.gray2rgb(img)\n\n return img.astype(np.float32) / 255.0\n\n def load_annotations(self, image_index):\n # get ground truth annotations\n annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)\n annotations = np.zeros((0, 5))\n\n # some images appear to miss annotations (like image with id 257034)\n if len(annotations_ids) == 0:\n return annotations\n\n # parse annotations\n coco_annotations = self.coco.loadAnns(annotations_ids)\n for idx, a in enumerate(coco_annotations):\n\n # some annotations have basically no width / height, skip them\n if a['bbox'][2] < 1 or a['bbox'][3] < 1:\n continue\n\n annotation = np.zeros((1, 5))\n annotation[0, :4] = a['bbox']\n annotation[0, 4] = self.coco_label_to_label(a['category_id'])\n annotations = np.append(annotations, annotation, axis=0)\n\n # transform from [x, y, w, h] to [x1, y1, x2, y2]\n annotations[:, 2] = annotations[:, 0] + annotations[:, 2]\n annotations[:, 3] = annotations[:, 1] + annotations[:, 3]\n\n return annotations\n\n def coco_label_to_label(self, coco_label):\n return self.coco_labels_inverse[coco_label]\n\n def label_to_coco_label(self, label):\n return self.coco_labels[label]\n\n def image_aspect_ratio(self, image_index):\n image = self.coco.loadImgs(self.image_ids[image_index])[0]\n return float(image['width']) / float(image['height'])\n\n def num_classes(self):\n return 80\n\n\nclass CSVDataset(Dataset):\n \"\"\"CSV dataset.\"\"\"\n\n def __init__(self, train_file, class_list, transform=None):\n \"\"\"\n Args:\n train_file (string): CSV file with training annotations\n annotations (string): CSV file with class list\n test_file (string, optional): CSV file with testing annotations\n \"\"\"\n self.train_file = train_file\n self.class_list = class_list\n self.transform = transform\n\n # parse the provided class file\n try:\n with self._open_for_csv(self.class_list) as file:\n self.classes = self.load_classes(csv.reader(file, delimiter=','))\n except ValueError as e:\n raise (ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)))\n\n self.labels = {}\n for key, value in self.classes.items():\n self.labels[value] = key\n\n # csv with img_path, x1, y1, x2, y2, class_name\n try:\n with self._open_for_csv(self.train_file) as file:\n self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)\n except ValueError as e:\n raise (ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)))\n self.image_names = list(self.image_data.keys())\n\n def _parse(self, value, function, fmt):\n \"\"\"\n Parse a string into a value, and format a nice ValueError if it fails.\n Returns `function(value)`.\n Any `ValueError` raised is catched and a new `ValueError` is raised\n with message `fmt.format(e)`, where `e` is the caught `ValueError`.\n \"\"\"\n try:\n return function(value)\n except ValueError as e:\n raise_from(ValueError(fmt.format(e)), None)\n\n def _open_for_csv(self, path):\n \"\"\"\n Open a file with flags suitable for csv.reader.\n This is different for python2 it means with mode 'rb',\n for python3 this means 'r' with \"universal newlines\".\n \"\"\"\n if sys.version_info[0] < 3:\n return open(path, 'rb')\n else:\n return open(path, 'r', newline='')\n\n def load_classes(self, csv_reader):\n result = {}\n\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n class_name, class_id = row\n except ValueError:\n raise (ValueError('line {}: format should be \\'class_name,class_id\\''.format(line)))\n class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))\n\n if class_name in result:\n raise ValueError('line {}: duplicate class name: \\'{}\\''.format(line, class_name))\n result[class_name] = class_id\n return result\n\n def __len__(self):\n return len(self.image_names)\n\n def __getitem__(self, idx):\n\n img = self.load_image(idx)\n annot = self.load_annotations(idx)\n sample = {'img': img, 'annot': annot}\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n def load_image(self, image_index):\n img = skimage.io.imread(self.image_names[image_index])\n\n if len(img.shape) == 2:\n img = skimage.color.gray2rgb(img)\n\n return img.astype(np.float32) / 255.0\n\n def load_annotations(self, image_index):\n # get ground truth annotations\n annotation_list = self.image_data[self.image_names[image_index]]\n annotations = np.zeros((0, 5))\n\n # some images appear to miss annotations (like image with id 257034)\n if len(annotation_list) == 0:\n return annotations\n\n # parse annotations\n for idx, a in enumerate(annotation_list):\n # some annotations have basically no width / height, skip them\n x1 = a['x1']\n x2 = a['x2']\n y1 = a['y1']\n y2 = a['y2']\n\n if (x2 - x1) < 1 or (y2 - y1) < 1:\n continue\n\n annotation = np.zeros((1, 5))\n\n annotation[0, 0] = x1\n annotation[0, 1] = y1\n annotation[0, 2] = x2\n annotation[0, 3] = y2\n\n annotation[0, 4] = self.name_to_label(a['class'])\n annotations = np.append(annotations, annotation, axis=0)\n\n return annotations\n\n def _read_annotations(self, csv_reader, classes):\n result = {}\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n img_file, x1, y1, x2, y2, class_name = row[:6]\n except ValueError:\n raise (ValueError(\n 'line {}: format should be \\'img_file,x1,y1,x2,y2,class_name\\' or \\'img_file,,,,,\\''.format(line)),\n None)\n\n if img_file not in result:\n result[img_file] = []\n\n # If a row contains only an image path, it's an image without annotations.\n if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):\n continue\n\n x1 = self._parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))\n y1 = self._parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))\n x2 = self._parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))\n y2 = self._parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))\n\n # Check that the bounding box is valid.\n if x2 <= x1:\n raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))\n if y2 <= y1:\n raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))\n\n # check if the current class name is correctly present\n if class_name not in classes:\n raise ValueError('line {}: unknown class name: \\'{}\\' (classes: {})'.format(line, class_name, classes))\n\n result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})\n return result\n\n def name_to_label(self, name):\n return self.classes[name]\n\n def label_to_name(self, label):\n return self.labels[label]\n\n def num_classes(self):\n return max(self.classes.values()) + 1\n\n def image_aspect_ratio(self, image_index):\n image = Image.open(self.image_names[image_index])\n return float(image.width) / float(image.height)\n\n\ndef collater(data):\n imgs = [s['img'] for s in data]\n annots = [s['annot'] for s in data]\n scales = [s['scale'] for s in data]\n\n widths = [int(s.shape[0]) for s in imgs]\n heights = [int(s.shape[1]) for s in imgs]\n batch_size = len(imgs)\n\n max_width = np.array(widths).max()\n max_height = np.array(heights).max()\n\n padded_imgs = torch.zeros(batch_size, max_width, max_height, 3)\n\n for i in range(batch_size):\n img = imgs[i]\n padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img\n\n max_num_annots = max(annot.shape[0] for annot in annots)\n\n if max_num_annots > 0:\n\n annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1\n\n if max_num_annots > 0:\n for idx, annot in enumerate(annots):\n # print(annot.shape)\n if annot.shape[0] > 0:\n annot_padded[idx, :annot.shape[0], :] = annot\n else:\n annot_padded = torch.ones((len(annots), 1, 5)) * -1\n\n padded_imgs = padded_imgs.permute(0, 3, 1, 2)\n\n return {'img': padded_imgs, 'annot': annot_padded, 'scale': scales}\n\n\nclass Resizer(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample, min_side=608, max_side=1024):\n image, annots = sample['img'], sample['annot']\n\n rows, cols, cns = image.shape\n\n smallest_side = min(rows, cols)\n\n # rescale the image so the smallest side is min_side\n scale = min_side / smallest_side\n\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side = max(rows, cols)\n\n if largest_side * scale > max_side:\n scale = max_side / largest_side\n\n # resize the image with the computed scale\n image = skimage.transform.resize(image, (int(round(rows * scale)), int(round((cols * scale)))))\n rows, cols, cns = image.shape\n\n pad_w = 32 - rows % 32\n pad_h = 32 - cols % 32\n\n new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)\n new_image[:rows, :cols, :] = image.astype(np.float32)\n\n annots[:, :4] *= scale\n\n return {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'scale': scale}\n\n\nclass Augmenter(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample, flip_x=0.5):\n if np.random.rand() < flip_x:\n image, annots = sample['img'], sample['annot']\n image = image[:, ::-1, :]\n\n rows, cols, channels = image.shape\n\n x1 = annots[:, 0].copy()\n x2 = annots[:, 2].copy()\n\n x_tmp = x1.copy()\n\n annots[:, 0] = cols - x2\n annots[:, 2] = cols - x_tmp\n\n sample = {'img': image, 'annot': annots}\n\n return sample\n\n\nclass Normalizer(object):\n\n def __init__(self):\n self.mean = np.array([[[0.485, 0.456, 0.406]]])\n self.std = np.array([[[0.229, 0.224, 0.225]]])\n\n def __call__(self, sample):\n image, annots = sample['img'], sample['annot']\n\n return {'img': ((image.astype(np.float32) - self.mean) / self.std), 'annot': annots}\n\n\nclass UnNormalizer(object):\n def __init__(self, mean=None, std=None):\n if mean == None:\n self.mean = [0.485, 0.456, 0.406]\n else:\n self.mean = mean\n if std == None:\n self.std = [0.229, 0.224, 0.225]\n else:\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n for t, m, s in zip(tensor, self.mean, self.std):\n t.mul_(s).add_(m)\n return tensor\n\n\nclass AspectRatioBasedSampler(Sampler):\n\n def __init__(self, data_source, batch_size, drop_last):\n self.data_source = data_source\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.groups = self.group_images()\n\n def __iter__(self):\n random.shuffle(self.groups)\n for group in self.groups:\n yield group\n\n def __len__(self):\n if self.drop_last:\n return len(self.data_source) // self.batch_size\n else:\n return (len(self.data_source) + self.batch_size - 1) // self.batch_size\n\n def group_images(self):\n # determine the order of the images\n order = list(range(len(self.data_source)))\n order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))\n\n # divide into groups, one group = one batch\n return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in\n range(0, len(order), self.batch_size)]\n" ]
[ [ "torch.zeros", "numpy.array", "numpy.random.rand", "numpy.zeros", "torch.from_numpy", "numpy.append" ] ]
haohongxiang/PaddleNLP
[ "c862e9c3a4d49caf00f4de81bdfae36aba9b636e" ]
[ "paddlenlp/ops/faster_transformer/transformer/faster_transformer.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport shutil\nimport numpy as np\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddlenlp.transformers import (TransformerModel, WordEmbedding,\n PositionalEmbedding, position_encoding_init,\n InferTransformerModel, GPTModel)\nfrom paddlenlp.ops import (InferTransformerDecoding, InferGptDecoding,\n InferUnifiedDecoding, InferBartDecoding)\nfrom paddlenlp.ops.ext_utils import load\nfrom paddlenlp.utils.log import logger\nfrom paddlenlp.transformers import (GPTChineseTokenizer, GPTTokenizer,\n UnifiedTransformerPretrainedModel,\n UNIMOPretrainedModel, BartPretrainedModel)\n\n\nclass FasterTransformer(TransformerModel):\n \"\"\"\n FasterTransformer is a faster version for generation with the Transformer\n model. It uses a custom op based on and enhancing NV FasterTransformer to\n do fast generation.\n\n Args:\n src_vocab_size (int):\n The size of source vocabulary.\n trg_vocab_size (int):\n The size of target vocabulary.\n max_length (int):\n The maximum length of input sequences.\n num_encoder_layers (int):\n The number of sub-layers to be stacked in the encoder.\n num_decoder_layers (int):\n The number of sub-layers to be stacked in the decoder.\n n_head (int):\n The number of head used in multi-head attention.\n d_model (int):\n The dimension for word embeddings, which is also the last dimension of\n the input and output of multi-head attention, position-wise feed-forward\n networks, encoder and decoder.\n d_inner_hid (int):\n Size of the hidden layer in position-wise feed-forward networks.\n dropout (float):\n Dropout rates. Used for pre-process, activation and inside attention.\n weight_sharing (bool):\n Whether to use weight sharing. \n attn_dropout (float):\n The dropout probability used in MHA to drop some attention target.\n If None, use the value of dropout. Defaults to None.\n act_dropout (float):\n The dropout probability used after FFN activition. If None, use\n the value of dropout. Defaults to None.\n bos_id (int, optional):\n The start token id and also is used as padding id. Defaults to 0.\n eos_id (int, optional):\n The end token id. Defaults to 1.\n decoding_strategy (str, optional):\n Indicating the strategy of decoding. It can be 'beam_search', 'beam_search_v2',\n 'topk_sampling' and 'topp_sampling'. For beam search strategies,\n 'v2' would select the top `beam_size * 2` beams and process the top\n `beam_size` alive and finish beams in them separately, while 'v1'\n would only select the top `beam_size` beams and mix up the alive and\n finish beams. 'v2' always searchs more and get better results, since\n the alive beams would always be `beam_size` while the number of alive\n beams in `v1` might decrease when meeting the end token. However,\n 'v2' always generates longer results thus might do more calculation\n and be slower.\n beam_size (int, optional):\n The beam width for beam search. Defaults to 4. \n topk (int, optional):\n The number of highest probability tokens to keep for top-k sampling.\n Defaults to 4. \n topp (float, optional):\n The most probable tokens whose cumulative probability is not less than\n `topp` are kept for top-p sampling. Defaults to 4. \n max_out_len (int, optional):\n The maximum output length. Defaults to 256.\n diversity_rate (float, optional):\n Refer to `A Simple, Fast Diverse Decoding Algorithm for Neural Generation <https://arxiv.org/abs/1611.08562>`_\n for details. Bigger `diversity_rate` would lead to more diversity.\n if `diversity_rate == 0` is equivalent to naive BeamSearch. Default\n to 0 if not set.\n use_fp16_decoding(bool, optional): Whether to use fp16 for decoding. \n rel_len(bool, optional):\n Indicating whether `max_out_len` in is the length relative to that\n of source text. Only works in `v2` temporarily. It is suggest to set\n a small `max_out_len` and use `rel_len=True`. Default to False if\n not set.\n alpha(float, optional):\n The power number in length penalty calculation. Only works in `v2`\n temporarily. Refer to `GNMT <https://arxiv.org/pdf/1609.08144.pdf>`_.\n Default to 0.6 if not set.\n \"\"\"\n\n def __init__(self,\n src_vocab_size,\n trg_vocab_size,\n max_length,\n num_encoder_layers,\n num_decoder_layers,\n n_head,\n d_model,\n d_inner_hid,\n dropout,\n weight_sharing,\n attn_dropout=None,\n act_dropout=None,\n bos_id=0,\n eos_id=1,\n decoding_strategy=\"beam_search\",\n beam_size=4,\n topk=1,\n topp=0.0,\n max_out_len=256,\n diversity_rate=0.0,\n decoding_lib=None,\n use_fp16_decoding=False,\n rel_len=False,\n alpha=0.6):\n # if decoding_lib is None:\n # raise ValueError(\n # \"The args decoding_lib must be set to use FasterTransformer. \")\n # elif not os.path.exists(decoding_lib):\n # raise ValueError(\"The path to decoding lib is not exist.\")\n\n args = dict(locals())\n args.pop(\"self\")\n args.pop(\"__class__\", None)\n self.decoding_strategy = args.pop(\"decoding_strategy\")\n self.beam_size = args.pop(\"beam_size\")\n self.topk = args.pop(\"topk\")\n self.topp = args.pop(\"topp\")\n self.max_out_len = args.pop(\"max_out_len\")\n self.diversity_rate = args.pop(\"diversity_rate\")\n self.decoding_lib = args.pop(\"decoding_lib\")\n self.use_fp16_decoding = args.pop(\"use_fp16_decoding\")\n self.rel_len = args.pop(\"rel_len\")\n self.alpha = args.pop(\"alpha\")\n self.dropout = dropout\n self.weight_sharing = weight_sharing\n self.trg_vocab_size = trg_vocab_size\n self.d_model = d_model\n self.bos_id = bos_id\n self.max_length = max_length\n super(FasterTransformer, self).__init__(**args)\n\n self.decoding_linear = nn.Linear(\n in_features=d_model, out_features=trg_vocab_size)\n\n if weight_sharing:\n self.trg_word_embedding = WordEmbedding(\n vocab_size=trg_vocab_size, emb_dim=d_model, bos_id=self.bos_id)\n self.trg_pos_embedding = PositionalEmbedding(\n emb_dim=d_model, max_length=max_length)\n\n self.decoding = InferTransformerDecoding(\n decoder=self.transformer.decoder,\n word_embedding=self.trg_word_embedding.word_embedding,\n positional_embedding=self.trg_pos_embedding.pos_encoder,\n linear=self.decoding_linear,\n num_decoder_layers=num_decoder_layers,\n n_head=n_head,\n d_model=d_model,\n bos_id=bos_id,\n eos_id=eos_id,\n decoding_strategy=decoding_strategy,\n beam_size=beam_size,\n topk=topk,\n topp=topp,\n max_out_len=max_out_len,\n diversity_rate=self.diversity_rate,\n decoding_lib=self.decoding_lib,\n use_fp16_decoding=self.use_fp16_decoding,\n rel_len=self.rel_len,\n alpha=self.alpha)\n\n def forward(self, src_word):\n src_max_len = paddle.shape(src_word)[-1]\n src_slf_attn_bias = paddle.cast(\n src_word == self.bos_id,\n dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e9\n src_pos = paddle.cast(\n src_word != self.bos_id, dtype=src_word.dtype) * paddle.arange(\n start=0, end=src_max_len)\n\n # Run encoder\n src_emb = self.src_word_embedding(src_word)\n src_pos_emb = self.src_pos_embedding(src_pos)\n src_emb = src_emb + src_pos_emb\n enc_input = F.dropout(\n src_emb, p=self.dropout,\n training=False) if self.dropout else src_emb\n enc_output = self.transformer.encoder(enc_input, src_slf_attn_bias)\n\n if self.use_fp16_decoding:\n enc_output = paddle.cast(enc_output, dtype=\"float16\")\n\n mem_seq_lens = paddle.sum(paddle.cast(\n src_word != self.bos_id, dtype=\"int32\"),\n dtype=\"int32\",\n axis=1)\n ids = self.decoding(enc_output, mem_seq_lens)\n\n return ids\n\n def load(self, init_from_params):\n # Load the trained model\n assert init_from_params, (\n \"Please set init_from_params to load the infer model.\")\n\n model_dict = paddle.load(init_from_params, return_numpy=True)\n\n # To set weight[padding_idx] to 0.\n model_dict[\"trg_word_embedding.word_embedding.weight\"][\n self.bos_id] = [0] * self.d_model\n\n # Dealing with weight sharing.\n if self.weight_sharing:\n model_dict[\"decoding_linear.weight\"] = np.transpose(model_dict[\n \"trg_word_embedding.word_embedding.weight\"])\n else:\n model_dict[\"decoding_linear.weight\"] = model_dict[\"linear.weight\"]\n # NOTE: the data type of the embedding bias for logits is different\n # between decoding with beam search and top-k/top-p sampling in\n # FasterTransformer when using float16.\n # NOTE: This changes since FasterTransformer V4.0 and update accordingly\n # after update to FT-4.0.\n bias_dtype = \"float32\"\n if self.use_fp16_decoding and not self.decoding_strategy.startswith(\n \"beam_search\"):\n bias_dtype = \"float16\"\n model_dict[\"decoding_linear.bias\"] = np.zeros(\n [self.trg_vocab_size], dtype=bias_dtype)\n\n # To avoid a longer length than training, reset the size of position\n # encoding to max_length\n model_dict[\"encoder.pos_encoder.weight\"] = position_encoding_init(\n self.max_length, self.d_model)\n model_dict[\"decoder.pos_encoder.weight\"] = position_encoding_init(\n self.max_length, self.d_model)\n\n if self.use_fp16_decoding:\n for item in self.state_dict():\n if \"decoder\" in item:\n model_dict[item] = np.float16(model_dict[item])\n model_dict[\"decoding_linear.weight\"] = np.float16(model_dict[\n \"decoding_linear.weight\"])\n model_dict[\"trg_word_embedding.word_embedding.weight\"] = np.float16(\n model_dict[\"trg_word_embedding.word_embedding.weight\"])\n model_dict[\"trg_pos_embedding.pos_encoder.weight\"] = np.float16(\n model_dict[\"trg_pos_embedding.pos_encoder.weight\"])\n\n self.load_dict(model_dict)\n\n def export_params(self, init_from_params, place):\n '''\n This method is used for load static graph from dygraph checkpoint\n or export inference model using static graph. \n\n Args:\n init_from_params (string):\n The path to dygraph checkpoint. \n place (paddle.Place):\n The place to execute static graph. \n \n Example:\n .. code-block::\n paddle.enable_static()\n place = \"gpu\"\n place = paddle.set_device(place)\n reader.adapt_vocab_size(args)\n\n test_program = paddle.static.Program()\n startup_program = paddle.static.Program()\n with paddle.static.program_guard(test_program, startup_program):\n src_word = paddle.static.data(\n name=\"src_word\", shape=[None, None], dtype=\"int64\")\n\n # Define model\n transformer = FasterTransformer(\n src_vocab_size=args.src_vocab_size,\n trg_vocab_size=args.trg_vocab_size,\n max_length=args.max_length + 1,\n num_encoder_layers=args.n_layer,\n num_decoder_layers=args.n_layer,\n n_head=args.n_head,\n d_model=args.d_model,\n d_inner_hid=args.d_inner_hid,\n dropout=args.dropout,\n weight_sharing=args.weight_sharing,\n bos_id=args.bos_idx,\n eos_id=args.eos_idx,\n decoding_strategy=args.decoding_strategy,\n beam_size=args.beam_size,\n max_out_len=args.max_out_len,\n decoding_lib=args.decoding_lib,\n use_fp16_decoding=args.use_fp16_decoding,\n rel_len=args.use_rel_len,\n alpha=args.alpha)\n\n finished_seq = transformer(src_word=src_word)\n\n test_program = test_program.clone(for_test=True)\n\n exe = paddle.static.Executor(place)\n exe.run(startup_program)\n\n # Load checkpoint.\n transformer.export_params(\n init_from_params=os.path.join(args.init_from_params,\n \"transformer.pdparams\"),\n place=place)\n\n paddle.static.save_inference_model(\n os.path.join(args.inference_model_dir, \"transformer\"),\n feed_vars=src_word,\n fetch_vars=finished_seq,\n executor=exe,\n program=test_program)\n '''\n # Load the trained model\n assert init_from_params, (\n \"Please set init_from_params to load the infer model.\")\n\n model_dict = paddle.load(init_from_params, return_numpy=True)\n\n # To set weight[padding_idx] to 0.\n model_dict[\"trg_word_embedding.word_embedding.weight\"][\n self.bos_id] = [0] * self.d_model\n\n # Dealing with weight sharing.\n if self.weight_sharing:\n model_dict[\"decoding_linear.weight\"] = np.transpose(model_dict[\n \"trg_word_embedding.word_embedding.weight\"])\n else:\n model_dict[\"decoding_linear.weight\"] = model_dict[\"linear.weight\"]\n # NOTE: the data type of the embedding bias for logits is different\n # between decoding with beam search and top-k/top-p sampling in\n # FasterTransformer when using float16.\n # NOTE: This changes since FasterTransformer V4.0 and update accordingly\n # after update to FT-4.0.\n bias_dtype = \"float32\"\n if self.use_fp16_decoding and not self.decoding_strategy.startswith(\n \"beam_search\"):\n bias_dtype = \"float16\"\n model_dict[\"decoding_linear.bias\"] = np.zeros(\n [self.trg_vocab_size], dtype=bias_dtype)\n\n # To avoid a longer length than training, reset the size of position\n # encoding to max_length\n model_dict[\"encoder.pos_encoder.weight\"] = position_encoding_init(\n self.max_length, self.d_model)\n model_dict[\"decoder.pos_encoder.weight\"] = position_encoding_init(\n self.max_length, self.d_model)\n\n if self.use_fp16_decoding:\n for item in self.state_dict():\n if \"decoder\" in item:\n model_dict[item] = np.float16(model_dict[item])\n model_dict[\"decoding_linear.weight\"] = np.float16(model_dict[\n \"decoding_linear.weight\"])\n model_dict[\"trg_word_embedding.word_embedding.weight\"] = np.float16(\n model_dict[\"trg_word_embedding.word_embedding.weight\"])\n model_dict[\"trg_pos_embedding.pos_encoder.weight\"] = np.float16(\n model_dict[\"trg_pos_embedding.pos_encoder.weight\"])\n\n for item in self.state_dict():\n param = self\n attr_list = item.split(\".\")\n for attr in attr_list:\n param = getattr(param, attr)\n param_name = param.name\n var = paddle.static.global_scope().find_var(param_name).get_tensor()\n var.set(model_dict[item], place)\n\n\nclass TransformerGenerator(paddle.nn.Layer):\n \"\"\"\n The Transformer model for auto-regressive generation with beam search. It wraps\n `FasterTransformer` and `InferTransformerModel`, and automatically chioces using\n `FasterTransformer` (with jit building) or the slower verison `InferTransformerModel`.\n\n Args:\n src_vocab_size (int):\n The size of source vocabulary.\n trg_vocab_size (int):\n The size of target vocabulary.\n max_length (int):\n The maximum length of input sequences.\n num_encoder_layers (int):\n The number of sub-layers to be stacked in the encoder.\n num_decoder_layers (int):\n The number of sub-layers to be stacked in the decoder.\n n_head (int):\n The number of head used in multi-head attention.\n d_model (int):\n The dimension for word embeddings, which is also the last dimension of\n the input and output of multi-head attention, position-wise feed-forward\n networks, encoder and decoder.\n d_inner_hid (int):\n Size of the hidden layer in position-wise feed-forward networks.\n dropout (float):\n Dropout rates. Used for pre-process, activation and inside attention.\n weight_sharing (bool):\n Whether to use weight sharing. \n bos_id (int, optional):\n The start token id and also is used as padding id. Defaults to 0.\n eos_id (int, optional):\n The end token id. Defaults to 1.\n beam_size (int, optional):\n The beam width for beam search. Defaults to 4. \n max_out_len (int, optional):\n The maximum output length. Defaults to 256.\n kwargs:\n The key word arguments can be `output_time_major`, `use_ft`, `use_fp16_decoding`,\n `rel_len`, `alpha`:\n\n - `output_time_major(bool, optional)`: Indicate the data layout of predicted\n Tensor. If `False`, the data layout would be batch major with shape\n `[batch_size, seq_len, beam_size]`. If `True`, the data layout would\n be time major with shape `[seq_len, batch_size, beam_size]`. Default\n to `False`. \n\n - `use_ft(bool, optional)`: Whether to use FasterTransformer\n for decoding. Default to True if not set.\n\n - `use_fp16_decoding(bool, optional)`: Whether to use fp16\n for decoding. Only works when using FasterTransformer.\n\n - `beam_search_version(str, optional)`: Indicating the strategy of\n beam search. It can be 'v1' or 'v2'. 'v2' would select the top\n `beam_size * 2` beams and process the top `beam_size` alive and\n finish beams in them separately, while 'v1' would only select the\n top `beam_size` beams and mix up the alive and finish beams. 'v2' always\n searchs more and get better results, since the alive beams would\n always be `beam_size` while the number of alive beams in `v1` might\n decrease when meeting the end token. However, 'v2' always generates\n longer results thus might do more calculation and be slower.\n\n - `rel_len(bool, optional)`: Indicating whether `max_out_len` in is\n the length relative to that of source text. Only works in `v2` temporarily.\n It is suggest to set a small `max_out_len` and use `rel_len=True`.\n Default to False if not set.\n\n - `alpha(float, optional)`: The power number in length penalty\n calculation. Refer to `GNMT <https://arxiv.org/pdf/1609.08144.pdf>`_.\n Only works in `v2` temporarily. Default to 0.6 if not set.\n \n - diversity_rate(float, optional): Refer to `A Simple, Fast Diverse\n Decoding Algorithm for Neural Generation <https://arxiv.org/abs/1611.08562>`_\n for details. Bigger `diversity_rate` would lead to more diversity.\n if `diversity_rate == 0` is equivalent to naive BeamSearch. Default\n to 0 if not set. **NOTE**: Only works when using FasterTransformer\n temporarily.\n \"\"\"\n\n def __init__(self,\n src_vocab_size,\n trg_vocab_size,\n max_length,\n num_encoder_layers,\n num_decoder_layers,\n n_head,\n d_model,\n d_inner_hid,\n dropout,\n weight_sharing,\n bos_id=0,\n eos_id=1,\n beam_size=4,\n max_out_len=256,\n **kwargs):\n logger.warning(\n \"TransformerGenerator is an experimental API and subject to change.\")\n # `kwargs` can include output_time_major, use_fp16_decoding, topk, topp.\n # The later three arguments can only work when using FasterTransformer,\n # and expose topk, topp later.\n super(TransformerGenerator, self).__init__()\n self.d_model = d_model\n self.max_length = max_length\n self.output_time_major = kwargs.pop(\"output_time_major\", True)\n # Only works for FasterTransformer.\n # TODO: original version supports diversity rate.\n diversity_rate = kwargs.pop(\"diversity_rate\", 0.0)\n use_fp16_decoding = kwargs.pop(\"use_fp16_decoding\", False)\n use_ft = kwargs.pop(\"use_ft\", True)\n beam_search_version = kwargs.pop(\"beam_search_version\", \"v1\")\n rel_len = kwargs.pop(\"rel_len\", False)\n alpha = kwargs.pop(\"alpha\", 0.6)\n\n if use_ft:\n try:\n decoding_strategy = (\"beam_search_v2\"\n if beam_search_version == \"v2\" else\n \"beam_search\")\n self.transformer = FasterTransformer(\n src_vocab_size=src_vocab_size,\n trg_vocab_size=trg_vocab_size,\n max_length=max_length,\n num_encoder_layers=num_encoder_layers,\n num_decoder_layers=num_decoder_layers,\n n_head=n_head,\n d_model=d_model,\n d_inner_hid=d_inner_hid,\n dropout=dropout,\n weight_sharing=weight_sharing,\n bos_id=bos_id,\n eos_id=eos_id,\n beam_size=beam_size,\n max_out_len=max_out_len,\n diversity_rate=diversity_rate,\n decoding_strategy=decoding_strategy,\n use_fp16_decoding=use_fp16_decoding,\n rel_len=rel_len,\n alpha=alpha)\n except Exception:\n logger.warning(\n \"Exception occurs when using FasterTransformer. \" \\\n \"The original forward will be involved. \")\n if diversity_rate != 0:\n logger.warning(\n \"diversity_rate would not work since it is only \" \\\n \"supported by FasterTransformer temporarily.\")\n self.transformer = InferTransformerModel(\n src_vocab_size=src_vocab_size,\n trg_vocab_size=trg_vocab_size,\n max_length=max_length,\n num_encoder_layers=num_encoder_layers,\n num_decoder_layers=num_decoder_layers,\n n_head=n_head,\n d_model=d_model,\n d_inner_hid=d_inner_hid,\n dropout=dropout,\n weight_sharing=weight_sharing,\n bos_id=bos_id,\n eos_id=eos_id,\n beam_size=beam_size,\n max_out_len=max_out_len,\n output_time_major=self.output_time_major,\n beam_search_version=beam_search_version,\n rel_len=rel_len,\n alpha=alpha)\n else:\n if diversity_rate != 0:\n logger.warning(\n \"diversity_rate would not work since it is only \" \\\n \"supported by FasterTransformer temporarily.\")\n self.transformer = InferTransformerModel(\n src_vocab_size=src_vocab_size,\n trg_vocab_size=trg_vocab_size,\n max_length=max_length,\n num_encoder_layers=num_encoder_layers,\n num_decoder_layers=num_decoder_layers,\n n_head=n_head,\n d_model=d_model,\n d_inner_hid=d_inner_hid,\n dropout=dropout,\n weight_sharing=weight_sharing,\n bos_id=bos_id,\n eos_id=eos_id,\n beam_size=beam_size,\n max_out_len=max_out_len,\n output_time_major=self.output_time_major,\n beam_search_version=beam_search_version,\n rel_len=rel_len,\n alpha=alpha)\n\n def forward(self, src_word):\n r\"\"\"\n Performs decoding for transformer model.\n\n Args:\n src_word (Tensor):\n The ids of source sequence words. It is a tensor with shape\n `[batch_size, source_sequence_length]` and its data type can be\n int or int64.\n \n Returns:\n Tensor:\n An int64 tensor shaped indicating the predicted ids. Its shape is\n `[batch_size, seq_len, beam_size]` or `[seq_len, batch_size, beam_size]`\n according to `output_time_major`. While, when using FasterTransformer\n and beam search v2, the beam dimension would be doubled to include\n both the top `beam_size` alive and finish beams, thus the tensor\n shape is `[batch_size, seq_len, beam_size * 2]` or `[seq_len, batch_size, beam_size * 2]`.\n \n Example:\n .. code-block::\n\n import paddle\n from paddlenlp.ops import TransformerGenerator\n\n transformer = TransformerGenerator(\n src_vocab_size=30000,\n trg_vocab_size=30000,\n max_length=256,\n num_encoder_layers=6,\n num_decoder_layers=6,\n n_head=8,\n d_model=512,\n d_inner_hid=2048,\n dropout=0.1,\n weight_sharing=True,\n bos_id=0,\n eos_id=1,\n beam_size=4,\n max_out_len=256)\n\n batch_size = 5\n seq_len = 10\n transformer(\n src_word=paddle.randint(low=3, high=30000, shape=[batch_size, seq_len]))\n \"\"\"\n out = self.transformer(src_word)\n # TODO(guosheng): FasterTransformer has an output with layout\n # `[seq_len, batch_size, beam_size]`. While the output layout of\n # original one is `[batch_size, seq_len, beam_size]`. Maybe we need\n # unify them later.\n if not self.output_time_major and isinstance(self.transformer,\n FasterTransformer):\n out = paddle.transpose(out, [1, 0, 2])\n return out\n\n def load(self, path):\n if isinstance(self.transformer, FasterTransformer):\n self.transformer.load(path)\n else:\n model_dict = paddle.load(path)\n self.transformer.load_dict(model_dict)\n\n\nclass FasterGPT(nn.Layer):\n def __init__(self,\n model,\n topk=4,\n topp=0.0,\n max_out_len=256,\n bos_id=50256,\n eos_id=50256,\n temperature=0,\n decoding_lib=None,\n use_fp16_decoding=False):\n super(FasterGPT, self).__init__()\n self.use_fp16_decoding = use_fp16_decoding\n self.decoding = InferGptDecoding(\n model=model,\n topk=topk,\n topp=topp,\n max_out_len=max_out_len,\n bos_id=bos_id,\n eos_id=eos_id,\n temperature=temperature,\n decoding_lib=decoding_lib,\n use_fp16_decoding=use_fp16_decoding)\n\n def forward(self, input_ids):\n return self.decoding(input_ids)\n\n def export_params(self, state_to_load, place):\n for item in state_to_load:\n param_data = np.array(state_to_load[item])\n if self.use_fp16_decoding:\n param_data = np.float16(param_data)\n\n param = self\n attr_list = item.split(\".\")\n attr_list = [\"decoding\", \"model\"] + attr_list\n for attr in attr_list:\n param = getattr(param, attr)\n param_name = param.name\n var = paddle.static.global_scope().find_var(param_name).get_tensor()\n var.set(param_data, place)\n\n def save_resources(self, tokenizer, path):\n vocab_file = os.path.join(path, \"vocab.txt\")\n if isinstance(tokenizer, GPTTokenizer):\n with open(vocab_file, 'w', encoding='utf-8') as f:\n for token in tokenizer.encoder:\n f.write(token + '\\n')\n merges_file = os.path.join(path, \"merges.txt\")\n shutil.copyfile(tokenizer._merges_file, merges_file)\n elif isinstance(tokenizer, GPTChineseTokenizer):\n tokenizer.save_resources(path)\n\n\nclass FasterUnifiedTransformer(UnifiedTransformerPretrainedModel):\n def __init__(self,\n model,\n decoding_strategy=\"sampling\",\n decoding_lib=None,\n use_fp16_decoding=False):\n super(FasterUnifiedTransformer, self).__init__()\n self._model = model\n self._decoding_strategy = decoding_strategy\n self.bos_token_id = model.bos_token_id\n self.pad_token_id = model.pad_token_id\n self.eos_token_id = model.eos_token_id\n self.unk_token_id = model.unk_token_id\n self.vocab_size = model.lm_head.decoder_bias.shape[0]\n self.logits_mask = self.generate_logits_mask(use_fp16_decoding)\n\n self._n_head = self._model.num_attention_heads\n self._hidden_dims = self._model.hidden_size\n self._normalize_before = self._model.normalize_before\n self._size_per_head = self._hidden_dims // self._n_head\n self._n_layer = self._model.num_hidden_layers\n self._mask_id = self._model.mask_token_id\n self._hidden_act = self._model.hidden_act\n\n self.decoding = InferUnifiedDecoding(\n model=self._model,\n decoding_strategy=self._decoding_strategy,\n decoding_lib=decoding_lib,\n use_fp16_decoding=use_fp16_decoding,\n logits_mask=self.logits_mask,\n n_head=self._n_head,\n hidden_dims=self._hidden_dims,\n size_per_head=self._size_per_head,\n n_layer=self._n_layer,\n unk_id=self.unk_token_id,\n mask_id=self._mask_id,\n normalize_before=self._normalize_before,\n hidden_act=self._hidden_act)\n\n def prepare_inputs_for_generation(self,\n input_ids,\n token_type_ids,\n position_ids,\n attention_mask,\n use_cache=False,\n cache=None,\n **kwargs):\n input_ids = input_ids[:, :-1]\n decoding_type_id = token_type_ids[:, -1]\n token_type_ids = token_type_ids[:, :-1]\n position_ids = position_ids[:, :-1]\n attention_mask = attention_mask[:, :, :-1, :-1]\n seq_len = kwargs.get(\"seq_len\", None) - 1\n\n return {\n \"input_ids\": input_ids,\n \"token_type_ids\": token_type_ids,\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache,\n \"cache\": cache,\n \"seq_len\": seq_len,\n \"decoding_type_id\": paddle.cast(\n decoding_type_id, dtype=\"int32\")\n }\n\n def generate_logits_mask(self, use_fp16_decoding):\n # pre-process distribution\n logits_mask = np.zeros(shape=[self.vocab_size], dtype=np.float32)\n logits_mask[self.unk_token_id] = -1e9\n logits_mask[self.bos_token_id] = -1e9\n logits_mask[self.pad_token_id] = -1e9\n\n logits_mask_t = paddle.assign(logits_mask)\n if use_fp16_decoding and self._decoding_strategy == \"sampling\":\n return paddle.cast(logits_mask_t, dtype=\"float16\")\n else:\n return logits_mask_t\n\n def sample(self,\n input_ids,\n logits_processors,\n max_length,\n pad_token_id,\n eos_token_id,\n top_k=4,\n top_p=0.0,\n temperature=1.0,\n min_tokens_to_keep=1,\n **model_kwargs):\n max_length -= input_ids.shape[-1]\n model_inputs = self.prepare_inputs_for_generation(input_ids,\n **model_kwargs)\n\n if self._decoding_strategy == \"sampling\":\n if top_p == 1.0 and top_k > 0:\n top_p = 0.0\n elif top_p <= 0.0 and top_k == 0:\n raise ValueError(\n \"Topk sampling or topp sampling must be applied. \" \\\n \"Topk sampling and topp sampling cannot be both applied. \")\n elif (top_p > 0.0 and top_p < 1.0) and top_k > 0:\n raise ValueError(\n \"Topk sampling and topp sampling cannot be both applied. \")\n\n return self.forward(\n model_inputs=model_inputs,\n max_length=max_length,\n top_k=top_k,\n top_p=top_p,\n temperature=temperature)\n\n def beam_search(self, input_ids, beam_scorer, logits_processors, max_length,\n diversity_rate, pad_token_id, eos_token_id, **model_kwargs):\n max_length -= input_ids.shape[-1]\n model_inputs = self.prepare_inputs_for_generation(input_ids,\n **model_kwargs)\n temperature = model_kwargs.pop('temperature', 1.0)\n\n return self.forward(\n model_inputs=model_inputs,\n max_length=max_length,\n num_beams=beam_scorer.num_beams,\n diversity_rate=diversity_rate,\n temperature=temperature)\n\n def forward(self,\n max_length,\n decoding_strategy=\"sampling\",\n top_k=4,\n top_p=0.0,\n num_beams=4,\n diversity_rate=0.0,\n temperature=1.0,\n model_inputs=None,\n **model_kwargs):\n seq_len = model_inputs.pop('seq_len', None)\n decoding_type_id = model_inputs.pop('decoding_type_id')\n\n outputs = self._model(**model_inputs)\n if isinstance(outputs, tuple):\n caches = outputs[1]\n else:\n raise RuntimeError('Not support.')\n cache_k = [c.k for c in caches]\n cache_v = [c.v for c in caches]\n\n return self.decoding(\n cache_k=cache_k,\n cache_v=cache_v,\n memory_seq_lens=seq_len,\n beam_size=num_beams,\n diversity_rate=diversity_rate,\n topk=top_k,\n topp=top_p,\n max_out_len=max_length,\n bos_id=self.bos_token_id,\n eos_id=self.eos_token_id,\n temperature=temperature,\n decoding_type_id=decoding_type_id,\n pos_bias=True)\n\n\nclass FasterUNIMOText(UNIMOPretrainedModel):\n def __init__(self,\n model,\n decoding_strategy=\"sampling\",\n decoding_lib=None,\n use_fp16_decoding=False):\n super(FasterUNIMOText, self).__init__()\n self._model = model\n self._decoding_strategy = decoding_strategy\n self.bos_token_id = model.bos_token_id\n self.pad_token_id = model.pad_token_id\n self.eos_token_id = model.eos_token_id\n self.unk_token_id = model.unk_token_id\n self.vocab_size = model.lm_head.decoder_bias.shape[0]\n self.logits_mask = self.generate_logits_mask(use_fp16_decoding)\n\n self._n_head = self._model.num_attention_heads\n self._hidden_dims = self._model.hidden_size\n self._normalize_before = self._model.normalize_before\n self._size_per_head = self._hidden_dims // self._n_head\n self._n_layer = self._model.num_hidden_layers\n self._mask_id = self._model.mask_token_id\n self._hidden_act = self._model.hidden_act\n\n self.decoding = InferUnifiedDecoding(\n model=self._model,\n decoding_strategy=self._decoding_strategy,\n decoding_lib=decoding_lib,\n use_fp16_decoding=use_fp16_decoding,\n logits_mask=self.logits_mask,\n n_head=self._n_head,\n hidden_dims=self._hidden_dims,\n size_per_head=self._size_per_head,\n n_layer=self._n_layer,\n unk_id=self.unk_token_id,\n mask_id=self._mask_id,\n normalize_before=self._normalize_before,\n hidden_act=self._hidden_act)\n\n def prepare_inputs_for_generation(self,\n input_ids,\n token_type_ids,\n position_ids,\n attention_mask,\n use_cache=False,\n cache=None,\n **kwargs):\n input_ids = input_ids[:, :-1]\n decoding_type_id = token_type_ids[:, -1]\n token_type_ids = token_type_ids[:, :-1]\n position_ids = position_ids[:, :-1]\n attention_mask = attention_mask[:, :, :-1, :-1]\n seq_len = kwargs.get(\"seq_len\", None) - 1\n\n return {\n \"input_ids\": input_ids,\n \"token_type_ids\": token_type_ids,\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"use_cache\": use_cache,\n \"cache\": cache,\n \"seq_len\": seq_len,\n \"decoding_type_id\": paddle.cast(\n decoding_type_id, dtype=\"int32\")\n }\n\n def generate_logits_mask(self, use_fp16_decoding):\n # pre-process distribution\n logits_mask = np.zeros(shape=[self.vocab_size], dtype=np.float32)\n logits_mask[self.unk_token_id] = -1e9\n logits_mask[self.bos_token_id] = -1e9\n logits_mask[self.pad_token_id] = -1e9\n\n logits_mask_t = paddle.assign(logits_mask)\n if use_fp16_decoding and self._decoding_strategy == \"sampling\":\n return paddle.cast(logits_mask_t, dtype=\"float16\")\n else:\n return logits_mask_t\n\n def sample(self,\n input_ids,\n logits_processors,\n max_length,\n pad_token_id,\n eos_token_id,\n top_k=4,\n top_p=0.0,\n temperature=1.0,\n min_tokens_to_keep=1,\n **model_kwargs):\n max_length -= input_ids.shape[-1]\n model_inputs = self.prepare_inputs_for_generation(input_ids,\n **model_kwargs)\n\n if self._decoding_strategy == \"sampling\":\n if top_p == 1.0 and top_k > 0:\n top_p = 0.0\n elif top_p <= 0.0 and top_k == 0:\n raise ValueError(\n \"Topk sampling or topp sampling must be applied. \" \\\n \"Topk sampling and topp sampling cannot be both applied. \")\n elif (top_p > 0.0 and top_p < 1.0) and top_k > 0:\n raise ValueError(\n \"Topk sampling and topp sampling cannot be both applied. \")\n\n return self.forward(\n model_inputs=model_inputs,\n max_length=max_length,\n top_k=top_k,\n top_p=top_p,\n temperature=temperature)\n\n def beam_search(self, input_ids, beam_scorer, logits_processors, max_length,\n diversity_rate, pad_token_id, eos_token_id, **model_kwargs):\n max_length -= input_ids.shape[-1]\n model_inputs = self.prepare_inputs_for_generation(input_ids,\n **model_kwargs)\n temperature = model_kwargs.pop('temperature', 1.0)\n\n return self.forward(\n model_inputs=model_inputs,\n max_length=max_length,\n num_beams=beam_scorer.num_beams,\n diversity_rate=diversity_rate,\n temperature=temperature)\n\n def forward(self,\n max_length,\n decoding_strategy=\"sampling\",\n top_k=4,\n top_p=0.0,\n num_beams=4,\n diversity_rate=0.0,\n temperature=1.0,\n model_inputs=None,\n **model_kwargs):\n seq_len = model_inputs.pop('seq_len', None)\n decoding_type_id = model_inputs.pop('decoding_type_id')\n\n outputs = self._model(**model_inputs)\n if isinstance(outputs, tuple):\n caches = outputs[1]\n else:\n raise RuntimeError('Not support.')\n cache_k = [c.k for c in caches]\n cache_v = [c.v for c in caches]\n\n return self.decoding(\n cache_k=cache_k,\n cache_v=cache_v,\n memory_seq_lens=seq_len,\n beam_size=num_beams,\n diversity_rate=diversity_rate,\n topk=top_k,\n topp=top_p,\n max_out_len=max_length,\n bos_id=self.bos_token_id,\n eos_id=self.eos_token_id,\n temperature=temperature,\n decoding_type_id=decoding_type_id,\n pos_bias=False)\n\n\nclass FasterBART(BartPretrainedModel):\n def __init__(self,\n model,\n decoding_strategy=\"beam_search_v2\",\n decoding_lib=None,\n use_fp16_decoding=False):\n super(FasterBART, self).__init__()\n self.use_fp16_decoding = use_fp16_decoding\n if use_fp16_decoding:\n weight_attr = paddle.ParamAttr(initializer=nn.initializer.Assign(\n model.bart.encoder.embed_tokens.weight))\n model.bart.encoder.embed_tokens = nn.Embedding(\n *model.bart.encoder.embed_tokens.weight.shape,\n weight_attr=weight_attr)\n self.encoder = model.bart.get_encoder()\n self.decoder = model.bart.get_decoder()\n self.bos_token_id = model.bart.config['bos_token_id']\n self.eos_token_id = model.bart.config['eos_token_id']\n self.pad_token_id = model.bart.config['pad_token_id']\n if decoding_strategy.startswith(\"beam_search\"):\n decoding_strategy = \"beam_search_v2\"\n self._decoding_strategy = decoding_strategy\n self.decoding = InferBartDecoding(\n model=model,\n decoding_strategy=decoding_strategy,\n decoding_lib=decoding_lib,\n use_fp16_decoding=use_fp16_decoding)\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def greedy_search(self, input_ids, logits_processors, max_length,\n pad_token_id, eos_token_id, **model_kwargs):\n return self.sample(\n input_ids=input_ids,\n logits_processors=logits_processors,\n max_length=max_length,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n top_k=1,\n top_p=1.0,\n **model_kwargs)\n\n def beam_search(self, input_ids, beam_scorer, logits_processors, max_length,\n diversity_rate, pad_token_id, eos_token_id, **model_kwargs):\n max_length -= input_ids.shape[-1]\n rel_len = model_kwargs.pop(\"rel_len\", False)\n alpha = model_kwargs.pop(\"alpha\", 0.6)\n encoder_output = model_kwargs.pop(\"encoder_output\")\n mem_seq_lens = model_kwargs.pop(\"mem_seq_lens\")\n return self.forward(\n encoder_output=encoder_output,\n mem_seq_lens=mem_seq_lens,\n beam_size=beam_scorer.num_beams,\n max_out_len=max_length,\n diversity_rate=diversity_rate,\n rel_len=rel_len,\n alpha=alpha)\n\n def sample(self,\n input_ids,\n logits_processors,\n max_length,\n pad_token_id,\n eos_token_id,\n top_k=4,\n top_p=0.0,\n temperature=1.0,\n min_tokens_to_keep=1,\n **model_kwargs):\n max_length -= input_ids.shape[-1]\n if self._decoding_strategy in [\"sampling\", \"greedy_search\"] and (\n abs(top_p - 1.0) < 1e-6 and top_k > 0):\n top_p = 0.0\n elif self._decoding_strategy == \"sampling\" and (top_p != 1.0 and\n top_k == 0):\n top_k = 0\n else:\n raise ValueError(\n \"Only top_k sampling or top_p sampling are supported. \" \\\n \"Top_k sampling and top_p sampling cannot be both applied. \")\n encoder_output = model_kwargs.pop(\"encoder_output\")\n mem_seq_lens = model_kwargs.pop(\"mem_seq_lens\")\n return self.forward(\n encoder_output=encoder_output,\n mem_seq_lens=mem_seq_lens,\n top_k=top_k,\n top_p=top_p,\n max_out_len=max_length)\n\n def forward(self,\n input_ids=None,\n encoder_output=None,\n mem_seq_lens=None,\n beam_size=4,\n top_k=1,\n top_p=0.0,\n max_out_len=256,\n diversity_rate=0.0,\n rel_len=False,\n alpha=0.6):\n if encoder_output is None:\n assert input_ids is not None, \"You have to specify either input_ids or encoder_output.\"\n encoder_output = self.encoder(input_ids)\n if mem_seq_lens is None:\n assert input_ids is not None, \"You have to specify either input_ids when generating mem_seq_lens.\"\n mem_seq_lens = paddle.sum(paddle.cast(\n input_ids != self.pad_token_id, dtype=\"int32\"),\n axis=-1,\n keepdim=True,\n dtype=\"int32\")\n if self.use_fp16_decoding:\n encoder_output = paddle.cast(encoder_output, \"float16\")\n return self.decoding(\n enc_output=encoder_output,\n memory_seq_lens=mem_seq_lens,\n beam_size=beam_size,\n top_k=top_k,\n top_p=top_p,\n max_out_len=max_out_len,\n diversity_rate=diversity_rate,\n rel_len=rel_len,\n alpha=alpha)\n" ]
[ [ "numpy.array", "numpy.float16", "numpy.transpose", "numpy.zeros" ] ]
westpark/piwars-2018
[ "a2e1cb67e5fcc8f65ed17975d076088a9f92da2a" ]
[ "scratch/process.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n# Load library functions we want\nimport time\nimport os\nimport sys\n# import ThunderBorg\nimport io\nimport threading\nimport picamera\nimport picamera.array\nimport cv2\nimport numpy\n\n# Camera settings\nimageWidth = 320 # Camera image width\nimageHeight = 240 # Camera image height\nframeRate = 3 # Camera image capture frame rate\n\nfinished = threading.Event()\nbuffer_empty = threading.Event()\nbuffer_filled = threading.Event()\n\nclass Robot(object):\n \n def __init__(self):\n pass\n \n def reset(self):\n pass\n \n def drive(self, left_speed, right_speed):\n pass\n\nclass PiborgRobot(Robot):\n \n def __init__(self):\n self.tb = ThunderBorg.ThunderBorg()\n self.tb.Init()\n if not TB.foundChip:\n raise RuntimeError(\"ThunderBorg not found\")\n self.tb.SetCommsFailsafe(False)\n \n # Auto drive settings\n self.autoMaxPower = 1.0 # Maximum output in automatic mode\n self.autoMinPower = 0.2 # Minimum output in automatic mode\n self.autoFullSpeedArea = 300 # Target size at which we use the maximum allowed output\n \n self.voltageIn = 12.0 # Total battery voltage to the ThunderBorg\n self.voltageOut = 12.0 * 0.95 # Maximum motor voltage, we limit it to 95% to allow the RPi to get uninterrupted power\n # Setup the power limits\n if self.voltageOut > self.voltageIn:\n self.maxPower = 1.0\n else:\n self.maxPower = self.voltageOut / float(self.voltageIn)\n self.autoMaxPower *= self.maxPower\n \n def reset(self):\n self.tb.MotorsOff()\n \n def drive(self, left_speed, right_speed):\n self.tb.SetMotor1(left_speed)\n self.tb.SetMotor2(right_speed)\n\n\n# Image processing function\ndef ProcessImage(self, image, colour):\n print(\"Processing image for\", colour)\n time.sleep(random.random())\n return \n \n # View the original image seen by the camera.\n if debug:\n cv2.imshow('original', image)\n cv2.waitKey(0)\n\n # Blur the image\n image = cv2.medianBlur(image, 5)\n if debug:\n cv2.imshow('blur', image)\n cv2.waitKey(0)\n\n # Convert the image from 'BGR' to HSV colour space\n image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n if debug:\n cv2.imshow('cvtColour', image)\n cv2.waitKey(0)\n\n # We want to extract the 'Hue', or colour, from the image. The 'inRange'\n # method will extract the colour we are interested in (between 0 and 180)\n # In testing, the Hue value for red is between 95 and 125\n # Green is between 50 and 75\n # Blue is between 20 and 35\n # Yellow is... to be found!\n if colour == \"red\":\n imrange = cv2.inRange(image, numpy.array((95, 127, 64)), numpy.array((125, 255, 255)))\n elif colour == \"green\":\n imrange = cv2.inRange(image, numpy.array((50, 127, 64)), numpy.array((75, 255, 255)))\n elif colour == 'blue':\n imrange = cv2.inRange(image, numpy.array((20, 64, 64)), numpy.array((35, 255, 255)))\n\n # I used the following code to find the approximate 'hue' of the ball in\n # front of the camera\n # for crange in range(0,170,10):\n # imrange = cv2.inRange(image, numpy.array((crange, 64, 64)), numpy.array((crange+10, 255, 255)))\n # print(crange)\n # cv2.imshow('range',imrange)\n # cv2.waitKey(0)\n \n # View the filtered image found by 'imrange'\n if debug:\n cv2.imshow('imrange', imrange)\n cv2.waitKey(0)\n\n # Find the contours\n contourimage, contours, hierarchy = cv2.findContours(imrange, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n if debug:\n cv2.imshow('contour', contourimage)\n cv2.waitKey(0)\n\n # Go through each contour\n foundArea = -1\n foundX = -1\n foundY = -1\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n cx = x + (w / 2)\n cy = y + (h / 2)\n area = w * h\n if foundArea < area:\n foundArea = area\n foundX = cx\n foundY = cy\n if foundArea > 0:\n ball = [foundX, foundY, foundArea]\n else:\n ball = None\n # Set drives or report ball status\n self.SetSpeedFromBall(ball)\n\n# Set the motor speed from the ball position\ndef SetSpeedFromBall(self, ball):\n global TB\n driveLeft = 0.0\n driveRight = 0.0\n if ball:\n x = ball[0]\n area = ball[2]\n if area < autoMinArea:\n print('Too small / far')\n elif area > autoMaxArea:\n print('Close enough')\n else:\n if area < autoFullSpeedArea:\n speed = 1.0\n else:\n speed = 1.0 / (area / autoFullSpeedArea)\n speed *= autoMaxPower - autoMinPower\n speed += autoMinPower\n direction = (x - imageCentreX) / imageCentreX\n if direction < 0.0:\n # Turn right\n print('Turn Right')\n driveLeft = speed\n driveRight = speed * (1.0 + direction)\n else:\n # Turn left\n print('Turn Left')\n driveLeft = speed * (1.0 - direction)\n driveRight = speed\n print('%.2f, %.2f' % (driveLeft, driveRight))\n else:\n print('No ball')\n\n\n\n# Image stream processing thread\nclass StreamProcessor(threading.Thread):\n def __init__(self, stream, process_image):\n super(StreamProcessor, self).__init__()\n self.stream = stream\n self.process_image = process_image\n self.start()\n\n def run(self):\n while not finished.is_set():\n buffer_filled.wait() \n buffer_filled.clear()\n try:\n self.stream.seek(0)\n self.process_image(self.stream.array, colour)\n finally:\n # Reset the stream and event\n self.stream.seek(0)\n self.stream.truncate()\n buffer_empty.set()\n \n# Image capture thread\nclass ImageCapture(threading.Thread):\n def __init__(self, camera, stream):\n super(ImageCapture, self).__init__()\n self.camera = camera\n self.buffer = stream\n self.start()\n\n def run(self):\n print('Start the stream using the video port')\n self.camera.capture_sequence(self.writeable_buffer(), format='bgr', use_video_port=True)\n print('Terminating camera processing...')\n finished.set()\n print('Processing terminated.')\n\n def writeable_buffer(self):\n while not finished.is_set():\n buffer_empty.wait()\n yield self.buffer\n buffer_filled.set()\n\ndef main():\n # Startup sequence\n print('Setup camera')\n robot = Robot()\n camera = picamera.PiCamera()\n camera.resolution = (imageWidth, imageHeight)\n camera.framerate = frameRate\n imageCentreX = imageWidth / 2.0\n imageCentreY = imageHeight / 2.0\n\n stream = picamera.array.PiRGBArray(camera)\n\n print('Setup the stream processing thread')\n processor = StreamProcessor(stream, ProcessImage)\n\n print('Wait ...')\n time.sleep(2)\n captureThread = ImageCapture(camera, stream)\n\n try:\n print('Press CTRL+C to quit')\n ## TB.MotorsOff()\n ## TB.SetLedShowBattery(True)\n # Loop indefinitely until we are no longer running\n while running:\n # Wait for the interval period\n # You could have the code do other work in here 🙂\n time.sleep(1.0)\n # Disable all drives\n except KeyboardInterrupt:\n # CTRL+C exit, disable all drives\n print('\\nUser shutdown')\n break\n finally:\n robot.reset()\n captureThread.join()\n processor.join()\n\nif __name__ == '__main__':\n main(*sys.argv[1:])\n" ]
[ [ "numpy.array" ] ]
FengJunJian/simple-faster-rcnn-pytorch
[ "ed849d60c16b7fad920491284b18165a32448d2c" ]
[ "data/dataset.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nimport torch as t\nfrom data.voc_dataset import VOCBboxDataset\nfrom skimage import transform as sktsf\nfrom torchvision import transforms as tvtsf\nfrom data import util\nimport numpy as np\nfrom utils.config import cfg#opt\n\n\ndef inverse_normalize(img):\n if cfg.caffe_pretrain:\n img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1))\n return img[::-1, :, :]\n # approximate un-normalize for visualize\n return (img * 0.225 + 0.45).clip(min=0, max=1) * 255\n\n\ndef pytorch_normalze(img):\n \"\"\"\n https://github.com/pytorch/vision/issues/223\n return appr -1~1 RGB\n \"\"\"\n normalize = tvtsf.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n img = normalize(t.from_numpy(img))\n return img.numpy()\n\n\ndef caffe_normalize(img):\n \"\"\"\n return appr -125-125 BGR\n \"\"\"\n img = img[[2, 1, 0], :, :] # RGB-BGR\n img = img * 255\n mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)\n img = (img - mean).astype(np.float32, copy=True)\n return img\n\n\ndef preprocess(img, min_size=600, max_size=1000):\n \"\"\"Preprocess an image for feature extraction.\n\n The length of the shorter edge is scaled to :obj:`self.min_size`.\n After the scaling, if the length of the longer edge is longer than\n :param min_size:\n :obj:`self.max_size`, the image is scaled to fit the longer edge\n to :obj:`self.max_size`.\n\n After resizing the image, the image is subtracted by a mean image value\n :obj:`self.mean`.\n\n Args:\n img (~numpy.ndarray): An image. This is in CHW and RGB format.\n The range of its value is :math:`[0, 255]`.\n\n Returns:\n ~numpy.ndarray: A preprocessed image.\n\n \"\"\"\n C, H, W = img.shape\n scale1 = min_size / min(H, W)\n scale2 = max_size / max(H, W)\n scale = min(scale1, scale2)\n img = img / 255.\n img = sktsf.resize(img, (C, H * scale, W * scale), mode='reflect',anti_aliasing=False)\n # both the longer and shorter should be less than\n # max_size and min_size\n if cfg.caffe_pretrain:\n normalize = caffe_normalize\n else:\n normalize = pytorch_normalze\n return normalize(img)\n\n\nclass Transform(object):\n\n def __init__(self, min_size=600, max_size=1000):\n self.min_size = min_size\n self.max_size = max_size\n\n def __call__(self, in_data):\n img, bbox, label = in_data\n _, H, W = img.shape\n img = preprocess(img, self.min_size, self.max_size)\n _, o_H, o_W = img.shape\n scale = o_H / H\n bbox = util.resize_bbox(bbox, (H, W), (o_H, o_W))\n\n # horizontally flip\n img, params = util.random_flip(\n img, x_random=True, return_param=True)\n bbox = util.flip_bbox(\n bbox, (o_H, o_W), x_flip=params['x_flip'])\n\n return img, bbox, label, scale\n\n\nclass Dataset(object):\n def __init__(self, cfg):\n self.opt = cfg\n self.db = VOCBboxDataset(cfg.voc_data_dir,cfg.split)\n self.tsf = Transform(cfg.min_size, cfg.max_size)\n\n def __getitem__(self, idx):\n ori_img, bbox, label, difficult = self.db.get_example(idx)\n if len(bbox)==0 or len(label) ==0:\n return ori_img, bbox, label, difficult\n img, bbox, label, scale = self.tsf((ori_img, bbox, label))\n # TODO: check whose stride is negative to fix this instead copy all\n # some of the strides of a given numpy array are negative.\n return img.copy(), bbox.copy(), label.copy(), scale\n\n def __len__(self):\n return len(self.db)\n\n\nclass TestDataset(object):\n def __init__(self, cfg, split='test', use_difficult=True):\n self.opt = cfg\n self.db = VOCBboxDataset(cfg.voc_data_dir, split=split, use_difficult=use_difficult)\n\n def __getitem__(self, idx):\n ori_img, bbox, label, difficult = self.db.get_example(idx)\n img = preprocess(ori_img)\n return img, ori_img.shape[1:], bbox, label, difficult\n\n def __len__(self):\n return len(self.db)\n" ]
[ [ "numpy.array", "torch.from_numpy" ] ]
yoyoberenguer/IndexMapping
[ "42a43f418a272e0ca84a0cfa1f08d9140d1b151b" ]
[ "test/test_split.py" ]
[ "\"\"\"\r\nMIT License\r\n\r\nCopyright (c) 2019 Yoann Berenguer\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\r\n\"\"\"\r\n# NUMPY IS REQUIRED\r\ntry:\r\n import numpy\r\nexcept ImportError:\r\n raise ImportError(\"\\n<numpy> library is missing on your system.\"\r\n \"\\nTry: \\n C:\\\\pip install numpy on a window command prompt.\")\r\n\r\nimport timeit\r\n\r\n# PYGAME IS REQUIRED\r\ntry:\r\n import pygame\r\n from pygame import Color, Surface, SRCALPHA, RLEACCEL, BufferProxy\r\n from pygame.surfarray import pixels3d, array_alpha, pixels_alpha, array3d, make_surface\r\n from pygame.image import frombuffer\r\n\r\nexcept ImportError:\r\n raise ImportError(\"\\n<Pygame> library is missing on your system.\"\r\n \"\\nTry: \\n C:\\\\pip install pygame on a window command prompt.\")\r\n\r\nimport os\r\nimport IndexMapping\r\nfrom IndexMapping.mapcfunctions import testing_pure_c, test_c_inplace, rgb_inplace\r\n\r\nPROJECT_PATH = IndexMapping.__path__\r\nos.chdir(PROJECT_PATH[0] + \"\\\\test\")\r\n\r\ndef run_test_split():\r\n\r\n w, h = 800, 1024\r\n screen = pygame.display.set_mode((w * 2, h))\r\n\r\n # TESTING RGB SPLIT\r\n background = pygame.image.load('../Assets/A1.png').convert()\r\n background = pygame.transform.smoothscale(background, (w, h))\r\n background_rgb = pygame.surfarray.array3d(background)\r\n\r\n CLOCK = pygame.time.Clock()\r\n timer = 0\r\n while 1:\r\n pygame.event.pump()\r\n background_b = background_rgb.flatten()\r\n red, green, blue = rgb_inplace(background_b.astype(dtype=numpy.uint8), 800, 1024)\r\n red_surface = make_surface(numpy.frombuffer(red, dtype=numpy.uint8).reshape(w, h, 3))\r\n green_surface = make_surface(numpy.frombuffer(green, dtype=numpy.uint8).reshape(w, h, 3))\r\n blue_surface = make_surface(numpy.frombuffer(blue, dtype=numpy.uint8).reshape(w, h, 3))\r\n screen.fill((0, 0, 0))\r\n screen.blit(red_surface, (0, 0))\r\n screen.blit(green_surface, (20, 20), special_flags=pygame.BLEND_RGB_ADD)\r\n screen.blit(blue_surface, (20, 20), special_flags=pygame.BLEND_RGB_ADD)\r\n if timer > int(1e2):\r\n break\r\n timer += 1\r\n pygame.display.flip()\r\n CLOCK.tick()\r\n\r\n\r\nif __name__ == '__main__':\r\n pass" ]
[ [ "numpy.frombuffer" ] ]
SkrighYZ/scene_graph_benchmark
[ "b17e831a031e11c7b56d12dd092e8f476e48e3d4", "b17e831a031e11c7b56d12dd092e8f476e48e3d4" ]
[ "maskrcnn_benchmark/modeling/balanced_positive_negative_sampler.py", "maskrcnn_benchmark/layers/batch_norm.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\n\nclass BalancedPositiveNegativeSampler(object):\n \"\"\"\n This class samples batches, ensuring that they contain a fixed proportion of positives\n \"\"\"\n\n def __init__(self, batch_size_per_image, positive_fraction):\n \"\"\"\n Arguments:\n batch_size_per_image (int): number of elements to be selected per image\n positive_fraction (float): percentage of positive elements per batch\n \"\"\"\n self.batch_size_per_image = batch_size_per_image\n self.positive_fraction = positive_fraction\n\n def __call__(self, matched_idxs):\n \"\"\"\n Arguments:\n matched idxs: list of tensors containing -1, 0 or positive values.\n Each tensor corresponds to a specific image.\n -1 values are ignored, 0 are considered as negatives and > 0 as\n positives.\n\n Returns:\n pos_idx (list[tensor])\n neg_idx (list[tensor])\n\n Returns two lists of binary masks for each image.\n The first list contains the positive elements that were selected,\n and the second list the negative example.\n \"\"\"\n pos_idx = []\n neg_idx = []\n for matched_idxs_per_image in matched_idxs:\n positive = torch.nonzero(matched_idxs_per_image >= 1, as_tuple=False).squeeze(1)\n negative = torch.nonzero(matched_idxs_per_image == 0, as_tuple=False).squeeze(1)\n\n num_pos = int(self.batch_size_per_image * self.positive_fraction)\n # protect against not enough positive examples\n num_pos = min(positive.numel(), num_pos)\n num_neg = self.batch_size_per_image - num_pos\n # protect against not enough negative examples\n num_neg = min(negative.numel(), num_neg)\n\n # randomly select positive and negative examples\n perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]\n perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]\n\n pos_idx_per_image = positive[perm1]\n neg_idx_per_image = negative[perm2]\n\n # create binary mask from indices\n pos_idx_per_image_mask = torch.zeros_like(\n matched_idxs_per_image, dtype=torch.bool\n )\n neg_idx_per_image_mask = torch.zeros_like(\n matched_idxs_per_image, dtype=torch.bool\n )\n pos_idx_per_image_mask[pos_idx_per_image] = 1\n neg_idx_per_image_mask[neg_idx_per_image] = 1\n\n pos_idx.append(pos_idx_per_image_mask)\n neg_idx.append(neg_idx_per_image_mask)\n\n return pos_idx, neg_idx\n", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nfrom torch import nn\n\nimport torch.distributed as dist\nimport maskrcnn_benchmark.utils.comm as comm\nfrom torch.autograd.function import Function\n\nclass FrozenBatchNorm2d(nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters\n are fixed\n \"\"\"\n\n def __init__(self, n):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n\n def forward(self, x):\n scale = self.weight * self.running_var.rsqrt()\n bias = self.bias - self.running_mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n return x * scale + bias\n\n\nclass AllReduce(Function):\n @staticmethod\n def forward(ctx, input):\n input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())]\n # Use allgather instead of allreduce since I don't trust in-place operations ..\n dist.all_gather(input_list, input, async_op=False)\n inputs = torch.stack(input_list, dim=0)\n return torch.sum(inputs, dim=0)\n\n @staticmethod\n def backward(ctx, grad_output):\n dist.all_reduce(grad_output, async_op=False)\n return grad_output\n\n\nclass NaiveSyncBatchNorm2d(nn.BatchNorm2d):\n \"\"\"\n In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient\n when the batch size on each worker is different.\n (e.g., when scale augmentation is used, or when it is applied to mask head).\n\n This is a slower but correct alternative to `nn.SyncBatchNorm`.\n\n Note:\n There isn't a single definition of Sync BatchNorm.\n\n When ``stats_mode==\"\"``, this module computes overall statistics by using\n statistics of each worker with equal weight. The result is true statistics\n of all samples (as if they are all on one worker) only when all workers\n have the same (N, H, W). This mode does not support inputs with zero batch size.\n\n When ``stats_mode==\"N\"``, this module computes overall statistics by weighting\n the statistics of each worker by their ``N``. The result is true statistics\n of all samples (as if they are all on one worker) only when all workers\n have the same (H, W). It is slower than ``stats_mode==\"\"``.\n\n Even though the result of this module may not be the true statistics of all samples,\n it may still be reasonable because it might be preferrable to assign equal weights\n to all workers, regardless of their (H, W) dimension, instead of putting larger weight\n on larger images. From preliminary experiments, little difference is found between such\n a simplified implementation and an accurate computation of overall mean & variance.\n \"\"\"\n\n def __init__(self, *args, stats_mode=\"\", **kwargs):\n super().__init__(*args, **kwargs)\n assert stats_mode in [\"\", \"N\"]\n self._stats_mode = stats_mode\n\n def forward(self, input):\n if comm.get_world_size() == 1 or not self.training:\n return super().forward(input)\n\n B, C = input.shape[0], input.shape[1]\n\n mean = torch.mean(input, dim=[0, 2, 3])\n meansqr = torch.mean(input * input, dim=[0, 2, 3])\n\n if self._stats_mode == \"\":\n assert B > 0, 'SyncBatchNorm(stats_mode=\"\") does not support zero batch size.'\n vec = torch.cat([mean, meansqr], dim=0)\n vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())\n mean, meansqr = torch.split(vec, C)\n momentum = self.momentum\n else:\n if B == 0:\n vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)\n vec = vec + input.sum() # make sure there is gradient w.r.t input\n else:\n vec = torch.cat(\n [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0\n )\n vec = AllReduce.apply(vec * B)\n\n total_batch = vec[-1].detach()\n momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0\n total_batch = torch.max(total_batch, torch.ones_like(total_batch)) # avoid div-by-zero\n mean, meansqr, _ = torch.split(vec / total_batch, C)\n\n var = meansqr - mean * mean\n invstd = torch.rsqrt(var + self.eps)\n scale = self.weight * invstd\n bias = self.bias - mean * scale\n scale = scale.reshape(1, -1, 1, 1)\n bias = bias.reshape(1, -1, 1, 1)\n\n self.running_mean += momentum * (mean.detach() - self.running_mean)\n self.running_var += momentum * (var.detach() - self.running_var)\n return input * scale + bias" ]
[ [ "torch.zeros_like", "torch.nonzero" ], [ "torch.zeros", "torch.rsqrt", "torch.cat", "torch.distributed.get_world_size", "torch.stack", "torch.split", "torch.distributed.all_gather", "torch.ones", "torch.distributed.all_reduce", "torch.ones_like", "torch.zeros_like", "torch.mean", "torch.sum" ] ]
abitrolly/numba-benchmark
[ "4bea9c23276fd0399df26452d19f13810a6496c7" ]
[ "benchmarks/bench_ising.py" ]
[ "\"\"\"\nIsing model benchmark, adapted from\nhttp://matthewrocklin.com/blog/work/2015/02/28/Ising/\n\"\"\"\n\nfrom math import exp, log, e, sqrt\n\nimport numpy as np\n\n\nkT = 2 / log(1 + sqrt(2), e)\n\nN = 200\n\nrandom = np.random.RandomState(0)\n\nx_start = random.randint(2, size=(N, N)).astype('i8')\nx_start[x_start == 0] = -1\n\nN_iterations = 10\n\n\ndef setup():\n\n from numba import jit\n\n global _update, update\n\n @jit(nopython=True)\n def _update(x, i, j):\n n, m = x.shape\n dE = 2* x[i, j] * (\n x[(i-1)%n, (j-1)%m]\n + x[(i-1)%n, j ]\n + x[(i-1)%n, (j+1)%m]\n\n + x[ i , (j-1)%m]\n + x[ i , (j+1)%m]\n\n + x[(i+1)%n, (j-1)%m]\n + x[(i+1)%n, j ]\n + x[(i+1)%n, (j+1)%m]\n )\n if dE <= 0 or exp(-dE / kT) > np.random.random():\n x[i, j] *= -1\n\n @jit(nopython=True)\n def update(x):\n n, m = x.shape\n\n for i in range(n):\n for j in range(0, m, 2): # Even columns first to avoid overlap\n _update(x, j, i)\n\n for i in range(n):\n for j in range(1, m, 2): # Odd columns second to avoid overlap\n _update(x, j, i)\n\n\nclass IsingModel:\n\n def time_ising(self):\n x = x_start.copy()\n for i in range(N_iterations):\n update(x)\n\n" ]
[ [ "numpy.random.random", "numpy.random.RandomState" ] ]
mhw32/temperature-as-uncertainty-public
[ "d6c6f05dc217b6169f31ba25385cb4bcdd28ab6a" ]
[ "src/systems/hib.py" ]
[ "import torch\nimport math\nimport numpy as np\nimport torch.optim as optim\nimport torch.autograd as autograd\nimport torchvision\nfrom torchvision.utils import save_image\nfrom typing import Callable, Optional\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\n\nfrom src.objectives.hib import HIB\nfrom src.systems.base import PretrainSystem\nfrom torch.optim.optimizer import Optimizer\nfrom src.scheduler.lars import LARSWrapper\n\n\nclass HIBSystem(PretrainSystem):\n\n def __init__(self, config):\n super().__init__(config)\n self.a = torch.tensor(0.5, requires_grad=True)\n self.b = torch.tensor(0., requires_grad=True)\n\n def get_loss(self, batch, train=True, **kwargs):\n _, image1, image2, _ = batch\n loc1, stdev1 = self.forward(image1)\n loc2, stdev2 = self.forward(image2)\n loss = HIB(loc1, stdev1, loc2, stdev2, self.a, self.b, \n beta=self.config.loss.beta, K=self.config.loss.K, \n t=self.config.loss.t).get_loss()\n s1 = stdev1.clone().detach().cpu().numpy()\n self.log(\"stdev\", np.mean(s1).item(), on_step=True)\n a = self.a.clone().detach().cpu().numpy()\n self.log(\"a\", a.item(), on_step=True)\n b = self.b.clone().detach().cpu().numpy()\n self.log(\"b\", b.item(), on_step=True)\n return loss\n\n def get_lr_schedule(self):\n batch_size = self.config.optimizer.batch_size\n iters_per_epoch = len(self.train_dataset) // batch_size\n start_lr = self.config.optimizer.start_lr\n final_lr = self.config.optimizer.final_lr\n learning_rate = self.config.optimizer.learning_rate\n warmup_epochs = self.config.optimizer.warmup_epochs\n max_epochs = self.config.num_epochs\n\n warmup_lr_schedule = np.linspace(start_lr, learning_rate, iters_per_epoch * warmup_epochs)\n iters = np.arange(iters_per_epoch * (max_epochs - warmup_epochs))\n cosine_lr_schedule = np.array([\n final_lr + 0.5 * (learning_rate - final_lr) *\n (1 + math.cos(math.pi * t / (iters_per_epoch * (max_epochs - warmup_epochs))))\n for t in iters\n ])\n lr_schedule = np.concatenate((warmup_lr_schedule, cosine_lr_schedule))\n return lr_schedule\n\n def configure_optimizers(self):\n self.lr_schedule = self.get_lr_schedule() # make lr schedule\n weight_decay = self.config.optimizer.weight_decay\n exclude_bn_bias = self.config.optimizer.exclude_bn_bias\n learning_rate = self.config.optimizer.learning_rate\n\n if exclude_bn_bias:\n params = self.exclude_from_wt_decay(self.named_parameters(), weight_decay=weight_decay)\n else:\n params = self.parameters()\n params = [{'params': params}, {'params': [self.a, self.b]}]\n\n if self.config.optimizer.name == 'sgd':\n optimizer = optim.SGD(params, lr=learning_rate, momentum=0.9, weight_decay=weight_decay)\n elif self.config.optimizer.name == 'adam':\n optimizer = optim.Adam(params, lr=learning_rate, weight_decay=weight_decay)\n else:\n raise Exception(f'Optimizer {self.config.optimizer.name} not supported.')\n\n optimizer = LARSWrapper(optimizer, eta=0.001, clip=False)\n return [optimizer], []\n\n def optimizer_step(\n self,\n epoch: int = None,\n batch_idx: int = None,\n optimizer: Optimizer = None,\n optimizer_idx: int = None,\n optimizer_closure: Optional[Callable] = None,\n on_tpu: bool = None,\n using_native_amp: bool = None,\n using_lbfgs: bool = None,\n ) -> None:\n # warm-up + decay schedule placed here since LARSWrapper is not optimizer class\n # adjust LR of optim contained within LARSWrapper\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = self.lr_schedule[self.trainer.global_step]\n\n if not isinstance(optimizer, LightningOptimizer):\n optimizer = LightningOptimizer.to_lightning_optimizer(optimizer, self.trainer)\n optimizer.step(closure=optimizer_closure)\n self.a.clamp(0, 1)\n\n def exclude_from_wt_decay(self, named_params, weight_decay, skip_list=['bias', 'bn']):\n params = []\n excluded_params = []\n\n for name, param in named_params:\n if not param.requires_grad:\n continue\n elif any(layer_name in name for layer_name in skip_list):\n excluded_params.append(param)\n else:\n params.append(param)\n\n return [{\n 'params': params,\n 'weight_decay': weight_decay\n }, {\n 'params': excluded_params,\n 'weight_decay': 0.,\n }]\n \n def training_step(self, batch, batch_idx):\n loss = self.get_loss(batch, train=True)\n metrics = {'train_loss': loss, 'learning_rate': self.lr_schedule[self.trainer.global_step]}\n self.log_dict(metrics)\n return loss\n\n" ]
[ [ "numpy.concatenate", "torch.optim.SGD", "torch.optim.Adam", "numpy.mean", "torch.tensor", "numpy.arange", "numpy.linspace" ] ]
SijinXiang/e3d
[ "1f3e555523e503b405f7d7195cee5ca551f74ef8" ]
[ "run.py" ]
[ "# Many thanks to daya for modifying the code :)\n# ==============================================================================\n\n\"\"\"Main function to run the code.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport numpy as np\nfrom src.data_provider import datasets_factory\nfrom src.models.model_factory import Model\nimport src.trainer as trainer\n#from src.utils import preprocess\nimport tensorflow as tf\nimport argparse\n\n\ndef add_arguments(parser):\n\tparser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n\n\tparser.add_argument(\"--train_data_paths\", type=str, default=\"\", help=\"train data paths\")\n\tparser.add_argument(\"--valid_data_paths\", type=str, default=\"\", help=\"validation data paths\")\n\tparser.add_argument(\"--test_data_paths\", type=str, default=\"\", help=\"train data paths\")\n\tparser.add_argument(\"--save_dir\", type=str, default=\"\", help=\"dir to store trained net\")\n\tparser.add_argument(\"--gen_frm_dir\", type=str, default=\"\", help=\"dir to store result.\")\n\n\tparser.add_argument(\"--is_training\", type=\"bool\", nargs=\"?\", const=True,\n\t\t\t\t\tdefault=False,\n\t\t\t\t\thelp=\"training or testing\")\n\tparser.add_argument(\"--dataset_name\", type=str, default=\"milan\", help=\"name of dataset\")\n\tparser.add_argument(\"--input_seq_length\", type=int, default=10, help=\"number of input snapshots\")\n\tparser.add_argument(\"--output_seq_length\", type=int, default=10, help=\"number of output snapshots\")\n\tparser.add_argument(\"--dimension_3D\", type=int, default=2, help=\"dimension of input depth\")\n\tparser.add_argument(\"--img_width\", type=int, default=100, help=\"input image width.\")\n\tparser.add_argument(\"--patch_size\", type=int, default=1, help=\"patch size on one dimension\")\n\tparser.add_argument(\"--reverse_input\", type=\"bool\", nargs=\"?\", const=True,\n\t\t\t\t\tdefault=False, \n\t\t\t\t\thelp=\"reverse the input/outputs during training.\")\n\n\tparser.add_argument(\"--model_name\", type=str, default=\"e3d_lstm\", help=\"The name of the architecture\")\n\tparser.add_argument(\"--pretrained_model\", type=str, default=\"\", help=\".ckpt file to initialize from\")\n\tparser.add_argument(\"--num_hidden\", type=str, default=\"10,10,10,10\", help=\"COMMA separated number of units of e3d lstms\")\n\tparser.add_argument(\"--filter_size\", type=int, default=5, help=\"filter of a e3d lstm layer\")\n\tparser.add_argument(\"--layer_norm\", type=\"bool\", nargs=\"?\", const=True,\n\t\t\t\t\tdefault=True, \n\t\t\t\t\thelp=\"whether to apply tensor layer norm\")\n\n\tparser.add_argument(\"--scheduled_sampling\", type=\"bool\", nargs=\"?\", const=True,\n\t\t\t\t\tdefault=True, \n\t\t\t\t\thelp=\"for scheduled sampling\")\n\tparser.add_argument(\"--sampling_stop_iter\", type=int, default=40, help=\"for scheduled sampling\")\n\tparser.add_argument(\"--sampling_start_value\", type=float, default=1.0, help=\"for scheduled sampling\")\n\tparser.add_argument(\"--sampling_changing_rate\", type=float, default=0.00002, help=\"for scheduled sampling\")\n\n\tparser.add_argument(\"--lr\", type=float, default=0.001, help=\"learning rate\")\n\tparser.add_argument(\"--batch_size\", type=int, default=50, help=\"batch size for training\")\n\tparser.add_argument(\"--max_iterations\", type=int, default=50, help=\"max num of steps\")\n\tparser.add_argument(\"--display_interval\", type=int, default=1, help=\"number of iters showing training loss\")\n\tparser.add_argument(\"--test_interval\", type=int, default=1, help=\"number of iters for test\")\n\tparser.add_argument(\"--snapshot_interval\", type=int, default=50, help=\"number of iters saving models\")\n#\tparser.add_argument(\"--num_save_samples\", type=int, default=10, help=\"number of sequences to be saved\")\n\tparser.add_argument(\"--n_gpu\", type=int, default=1, help=\"how many GPUs to distribute the training across\")\n\tparser.add_argument(\"--allow_gpu_growth\", type=\"bool\", nargs=\"?\", const=True,\n\t\t\t\t\tdefault=True, \n\t\t\t\t\thelp=\"allow gpu growth\")\n\n\n\ndef main(unused_argv):\n\t\"\"\"Main function.\"\"\"\n\tprint(FLAGS)\n\t# print(FLAGS.reverse_input)\n\tif FLAGS.is_training:\n\t\tif tf.gfile.Exists(FLAGS.save_dir):\n\t\t\ttf.gfile.DeleteRecursively(FLAGS.save_dir)\n\t\ttf.gfile.MakeDirs(FLAGS.save_dir)\n\tif tf.gfile.Exists(FLAGS.gen_frm_dir):\n\t\ttf.gfile.DeleteRecursively(FLAGS.gen_frm_dir)\n\ttf.gfile.MakeDirs(FLAGS.gen_frm_dir)\n\n\tgpu_list = np.asarray(\n\t\tos.environ.get('CUDA_VISIBLE_DEVICES', '-1').split(','), dtype=np.int32)\n\tFLAGS.n_gpu = len(gpu_list)\n\tprint('Initializing models')\n\n\tmodel = Model(FLAGS)\n\n\tif FLAGS.is_training:\n\t\ttrain_wrapper(model)\n\telse:\n\t\ttest_wrapper(model)\n\n\ndef schedule_sampling(eta, itr):\n\t\"\"\"Gets schedule sampling parameters for training.\"\"\"\n\tzeros = np.zeros((FLAGS.batch_size, FLAGS.output_seq_length // FLAGS.dimension_3D - 1, FLAGS.img_width, FLAGS.img_width, FLAGS.dimension_3D))\n\tif not FLAGS.scheduled_sampling:\n\t\treturn 0.0, zeros\n\n\tif itr < FLAGS.sampling_stop_iter:\n\t\teta -= FLAGS.sampling_changing_rate\n\telse:\n\t\teta = 0.0\n\trandom_flip = np.random.random_sample(\n\t\t(FLAGS.batch_size, FLAGS.output_seq_length // FLAGS.dimension_3D - 1))\n\ttrue_token = (random_flip < eta)\n\tones = np.ones((FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size, FLAGS.patch_size**2*FLAGS.dimension_3D))\n\tzeros = np.zeros((FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size, FLAGS.patch_size**2 * FLAGS.dimension_3D))\n\treal_input_flag = []\n\tfor i in range(FLAGS.batch_size):\n\t\tfor j in range(FLAGS.output_seq_length // FLAGS.dimension_3D - 1):\n\t\t\tif true_token[i, j]:\n\t\t\t\treal_input_flag.append(ones)\n\t\t\telse:\n\t\t\t\treal_input_flag.append(zeros)\n\treal_input_flag = np.array(real_input_flag)\n\treal_input_flag = np.reshape(real_input_flag,(FLAGS.batch_size, FLAGS.output_seq_length // FLAGS.dimension_3D - 1,FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,FLAGS.patch_size**2 * FLAGS.dimension_3D))\n \n\treturn eta, real_input_flag\n\n\ndef train_wrapper(model):\n\t\"\"\"Wrapping function to train the model.\"\"\"\n\tif FLAGS.pretrained_model:\n\t\tmodel.load(FLAGS.pretrained_model)\n # load data\n\ttrain_input_handle, test_input_handle = datasets_factory.data_provider(\n\t\tFLAGS.dataset_name,\n\t\tFLAGS.train_data_paths,\n\t\tFLAGS.valid_data_paths,\n\t\tFLAGS.batch_size * FLAGS.n_gpu,\n\t\tFLAGS.img_width,\n\t\tFLAGS.input_seq_length,\n\t\tFLAGS.output_seq_length,\n\t\tFLAGS.dimension_3D,\n\t\tis_training=True)\n\tprint('Data loaded.')\n\teta = FLAGS.sampling_start_value\n\n\ttra_cost = 0.0\n\tbatch_id = 0\n\tstopping = [10000000000000000]\n\tfor itr in range(2351, FLAGS.max_iterations + 1):\n\t\tif itr == 2:\n\t\t\tprint('training process started.')\n\t\t#if itr % 50 == 0:\n\t\t#\tprint('training timestep: ' + str(itr))\n\t\tif train_input_handle.no_batch_left() or itr % 50 == 0:\n\t\t\tmodel.save(itr)\n\t\t\tprint(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'itr: ' + str(itr))\n\t\t\tprint('training loss: ' + str(tra_cost / batch_id))\n\t\t\tval_cost = trainer.test(model, test_input_handle,FLAGS, itr)\n\t\t\tif val_cost < min(stopping):\n\t\t\t\tstopping = [val_cost]\n\t\t\telif len(stopping) < 10:\n\t\t\t\tstopping.append(val_cost)\n\t\t\tif len(stopping) == 10:\n\t\t\t\tbreak\n\t\t\ttrain_input_handle.begin(do_shuffle=True)\n\t\t\ttra_cost = 0\n\t\t\tbatch_id = 0\n\n\t\tims = train_input_handle.get_batch()\n\t\tbatch_id += 1\n\n\t\teta, real_input_flag = schedule_sampling(eta, itr)\n\n\t\ttra_cost += trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n\t\t#if itr % FLAGS.snapshot_interval == 0:\n\t\t\t#model.save(itr)\n\n\t\t#if itr % FLAGS.test_interval == 0:\n\t\t\t#trainer.test(model, test_input_handle, FLAGS, itr)\n\n\t\ttrain_input_handle.next_batch()\n\n\ndef test_wrapper(model):\n\tmodel.load(FLAGS.pretrained_model)\n\ttest_input_handle = datasets_factory.data_provider(\n\t\tFLAGS.dataset_name,\n\t\tFLAGS.train_data_paths, \n\t\tFLAGS.test_data_paths, # Should use test data rather than training or validation data.\n\t\tFLAGS.batch_size * FLAGS.n_gpu,\n\t\tFLAGS.img_width,\n\t\tFLAGS.input_seq_length,\n\t\tFLAGS.output_seq_length,\n\t\tFLAGS.dimension_3D,\n\t\tis_training=False)\n\ttrainer.test(model, test_input_handle, FLAGS, 'test_result')\n\n\nif __name__ == '__main__':\n\tnmt_parser = argparse.ArgumentParser()\n\tadd_arguments(nmt_parser)\n\tFLAGS, unparsed = nmt_parser.parse_known_args()\n\ttf.app.run(main=main)\n\n" ]
[ [ "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.ones", "tensorflow.gfile.Exists", "numpy.random.random_sample", "tensorflow.gfile.MakeDirs", "tensorflow.gfile.DeleteRecursively", "tensorflow.app.run" ] ]
systemshift/hivemind
[ "fdf92e5dc484e2ab4a639f0e2c788d3d061a3f1f" ]
[ "hivemind/client/averaging/__init__.py" ]
[ "\"\"\" A background process that averages your tensors with peers \"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport ctypes\nimport multiprocessing as mp\nimport os\nimport threading\nimport uuid\nimport weakref\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom dataclasses import asdict\nfrom typing import Sequence, Optional, Tuple, Any, Union, Dict, AsyncIterator\n\nimport grpc\nfrom grpc._cython.cygrpc import InternalError\nimport torch\nimport numpy as np\n\nfrom hivemind.dht import DHT, DHTID\nfrom hivemind.client.averaging.allreduce import AllReduceRunner, AllreduceException, GroupID, AveragingMode\nfrom hivemind.client.averaging.load_balancing import load_balance_peers\nfrom hivemind.client.averaging.matchmaking import Matchmaking, MatchmakingException\nfrom hivemind.client.averaging.group_info import GroupInfo\nfrom hivemind.proto import averaging_pb2, averaging_pb2_grpc, runtime_pb2\nfrom hivemind.utils.grpc import ChannelCache, GRPC_KEEPALIVE_OPTIONS, split_for_streaming, combine_from_streaming\nfrom hivemind.utils.compression import serialize_torch_tensor, deserialize_torch_tensor\nfrom hivemind.utils.asyncio import anext, achain, aiter, switch_to_uvloop\nfrom hivemind.utils.timed_storage import get_dht_time, ValueWithExpiration, DHTExpiration\nfrom hivemind.utils.serializer import MSGPackSerializer, SerializerBase\nfrom hivemind.utils import Endpoint, Port, MPFuture, get_logger, TensorDescriptor\n\n# flavour types\nStreamCallToLeader = grpc.aio.UnaryStreamCall[averaging_pb2.JoinRequest, averaging_pb2.MessageFromLeader]\nDataForGather = Any\nlogger = get_logger(__name__)\nDEFAULT_CHUNK_SIZE_BYTES = 2 ** 16\n\n\nclass DecentralizedAverager(mp.Process, averaging_pb2_grpc.DecentralizedAveragingServicer):\n \"\"\"\n\n Parameter averaging service. A trainer can run this service in background to periodically average his parameters\n with other trainers. The averaging pattern is chosen so that (1) you only need to average with a small\n group of peers at a time, but (2) all trainers will converge to global average in a logarithmic number of steps.\n\n :param averaged_tensors: a sequence of pytorch tensors that will be averaged in each all-reduce\n :param dht: a DHT node that will be used to find groups\n :param start: if True, starts the background process immediately\n\n :param prefix: a shared prefix for all group keys\n :param target_group_size: attempts to form groups with up to this many peers (recommended: a power of 2, e.g. 16)\n :param initial_group_bits: a string of bits ('0' and '1') that define the initial group key (bucket index)\n :param averaging_expiration: attempt to find a group for this many seconds, otherwise try again\n note - this expiration time only applies to looking for group, passing tensors in allreduce may take more time\n :param compression_type: optionally compress tensors with this compression algorithm before sending them to peers\n :param allreduce_timeout: spend at most this many seconds for allreduce (after group is formed)\n :param averaging_alpha: optional \"learning rate\" for averaging. If specified, local parameters will be shifted\n towards the (estimated) average by this coefficient. By default, local parameters are set equal to average.\n :param request_timeout: when looking for group, wait for a response from leader for at most this many seconds.\n :note: request_timeout must be smaller than averaging_expiration to avoid potential deadlocks.\n :param chunk_size_bytes: tensors for AllReduce will be divided into chunks of this size (to improve gRPC throughput)\n :param throughput: if specified, this value represents the network bandwidth available to averager.\n By default, the averager is assumed to have the average bandwidth of his group.\n If throughput == 0, averager will rely on its groupmates to do all the averaging.\n :param listen: if True (default), this averager will accept incoming requests from other peers and perform allreduce\n if False, the averager will register as a freeloader and attempt to fetch vectors from other averagers\n :param listen_on: network interface, e.g. \"0.0.0.0:1337\" or \"localhost:*\" (* means pick any port) or \"[::]:7654\"\n :param channel_options: options for grpc.aio.insecure_channel, e.g. [('grpc.enable_retries', 0)]\n see https://grpc.github.io/grpc/core/group__grpc__arg__keys.html for a list of all options\n :param kwargs: extra parameters forwarded to grpc.aio.server\n :param auxiliary: if this flag is specified, averager.step will only assist others without sending\n local tensors for averaging\n :param allow_state_sharing: if set to True, other peers can download this peer's state. Can be overwritten\n with averager.allow_state_sharing = True / False\n\n Example:\n\n >>> averager = DecentralizedAverager(...)\n >>> with averager.get_tensors() as tensors:\n >>> # run some code, modify tensors if necessary\n >>> tensors[0] += 1\n >>> # do not use tensors after the lock is released\n >>> metadata = averager.step(gather=dict(my_batch_size=32))\n >>> # run averaging once (in-place), gather metadata from groupmates\n >>> with averager.get_tensors() as tensors_after_averaging:\n >>> pass # use the averaged tensors\n \"\"\"\n _matchmaking: Matchmaking\n _pending_group_assembled: asyncio.Event\n serializer = MSGPackSerializer\n\n def __init__(self, averaged_tensors: Sequence[torch.Tensor], dht: DHT, *, start: bool,\n prefix: str, target_group_size: int, min_group_size: int = 2, initial_group_bits: Optional[str] = None,\n averaging_expiration: float = 15, request_timeout: float = 3, chunk_size_bytes: int = 2 ** 16,\n allreduce_timeout: Optional[float] = None, averaging_alpha: float = 1.0,\n compression_type: runtime_pb2.CompressionType = runtime_pb2.CompressionType.NONE,\n throughput: Optional[float] = None, min_vector_size: int = 0,\n auxiliary: bool = False, allow_state_sharing: Optional[bool] = None,\n listen: bool = True, listen_on: Endpoint = '0.0.0.0:*', daemon: bool = True,\n channel_options: Optional[Sequence[Tuple[str, Any]]] = None, **kwargs):\n assert '.' not in prefix, \"group prefix must be a string without trailing '.'\"\n assert throughput is None or (throughput >= 0 and np.isfinite(np.float32(throughput))), \\\n \"throughput must be a non-negative float32\"\n if not is_power_of_two(target_group_size):\n logger.warning(\"It is recommended to set target_group_size to a power of 2.\")\n assert initial_group_bits is None or all(bit in '01' for bit in initial_group_bits)\n assert listen or not auxiliary, \"auxiliary peers must accept incoming connections\"\n\n super().__init__()\n self.dht = dht\n self.listen, self.listen_on, self.kwargs = listen, listen_on, kwargs\n if not self.listen:\n self.mode = AveragingMode.CLIENT\n elif auxiliary:\n self.mode = AveragingMode.AUX\n else:\n self.mode = AveragingMode.NODE\n\n self.channel_options = channel_options\n self.daemon = daemon\n\n self._averaged_tensors = tuple(averaged_tensors)\n self.lock_averaged_tensors = mp.Lock()\n self.last_updated: DHTExpiration = -float('inf')\n for tensor in self._averaged_tensors:\n assert tensor.grad_fn is None, \"averaged_tensors must be either parameters or leaf tensors\"\n tensor.share_memory_()\n self.total_size = sum(map(torch.Tensor.numel, self._averaged_tensors))\n self.schema_hash = compute_schema_hash(self._averaged_tensors)\n self._throughput = throughput\n\n self.matchmaking_kwargs = dict(\n prefix=prefix, initial_group_bits=initial_group_bits, target_group_size=target_group_size,\n min_group_size=min_group_size, averaging_expiration=averaging_expiration, request_timeout=request_timeout)\n self.allreduce_kwargs = dict(compression_type=compression_type, chunk_size_bytes=chunk_size_bytes,\n min_vector_size=min_vector_size)\n self._averaging_alpha, self._allreduce_timeout = averaging_alpha, allreduce_timeout\n self._running_groups: Dict[GroupID, AllReduceRunner] = {} # one or more assembled groups that run all-reduce\n\n self._pipe, self.pipe = mp.Pipe(duplex=True) # a control pipe used to communicate with a background process\n self._port = mp.Value(ctypes.c_uint32, 0) # assigned when averager starts, accessible via self.port\n\n self._allow_state_sharing = mp.Value(ctypes.c_bool, 0)\n self.allow_state_sharing = (listen and not auxiliary) if allow_state_sharing is None else allow_state_sharing\n\n self._averager_endpoint: Optional[Endpoint] = None\n if not self.listen:\n self._averager_endpoint = f'client::{uuid.uuid4()}'\n\n self.ready = mp.Event() # whether the averager process has started (and ready for incoming requests)\n # note: we create a background thread weakref and with daemon=True to ensure garbage collection\n background_fetcher = threading.Thread(\n daemon=True, target=_background_thread_fetch_current_state,\n args=[self.serializer, self.pipe, weakref.WeakMethod(self.get_current_state)])\n background_fetcher.start()\n if start:\n self.run_in_background(await_ready=True)\n\n @property\n def port(self) -> Optional[Port]:\n return self._port.value if self._port.value != 0 else None\n\n @property\n def allow_state_sharing(self) -> bool:\n \"\"\" if set to True, other peers can download this peer's state \"\"\"\n return bool(self._allow_state_sharing.value)\n\n @allow_state_sharing.setter\n def allow_state_sharing(self, value: bool):\n if value is True and not self.listen:\n logger.warning(\"Cannot allow state sharing: averager in client mode (listen=False) cannot share its state.\")\n else:\n self._allow_state_sharing.value = value\n\n @property\n def endpoint(self) -> Optional[Endpoint]:\n if self.listen and self._averager_endpoint is None:\n assert self.port is not None, \"Averager is not running yet\"\n self._averager_endpoint = f\"{self.dht.get_visible_address()}:{self.port}\"\n logger.debug(f\"Assuming averager endpoint to be {self._averager_endpoint}\")\n return self._averager_endpoint\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.endpoint})\"\n\n def run(self):\n \"\"\"\n Run averager function in a background thread; this is needed to avoid a heisenbug with broken OMP on fork\n Turns out, using a non-main thread creates a separate OMP pool that works even if the original pool is corrupted\n Read more: https://github.com/pytorch/pytorch/issues/17199\n \"\"\"\n thread = threading.Thread(target=self._run_internal, daemon=True)\n thread.start()\n thread.join()\n\n def _run_internal(self):\n \"\"\" Serve DecentralizedAverager forever. This function will not return until the averager is shut down \"\"\"\n loop = switch_to_uvloop()\n # initialize asyncio synchronization primitives in this event loop\n with ThreadPoolExecutor(max_workers=1) as pipe_awaiter:\n async def _run():\n grpc.aio.init_grpc_aio()\n\n if self.listen:\n server = grpc.aio.server(**self.kwargs, options=GRPC_KEEPALIVE_OPTIONS)\n averaging_pb2_grpc.add_DecentralizedAveragingServicer_to_server(self, server)\n found_port = server.add_insecure_port(self.listen_on)\n assert found_port != 0, f\"Failed to listen to {self.listen_on}\"\n self._port.value = found_port\n await server.start()\n else:\n logger.debug(f\"The averager is running in client mode.\")\n\n self._matchmaking = Matchmaking(self.endpoint, self.schema_hash, self.dht, **self.matchmaking_kwargs,\n client_mode=not self.listen)\n if self.listen:\n asyncio.create_task(self._declare_for_download_periodically())\n\n self._pending_group_assembled = asyncio.Event()\n self._pending_group_assembled.set()\n self.ready.set()\n\n while True:\n method, args, kwargs = await loop.run_in_executor(pipe_awaiter, self._pipe.recv)\n asyncio.create_task(getattr(self, method)(*args, **kwargs))\n\n loop.run_until_complete(_run())\n\n def run_in_background(self, await_ready=True, timeout=None):\n \"\"\"\n Starts averager in a background process. if await_ready, this method will wait until background dht\n is ready to process incoming requests or for :timeout: seconds max.\n \"\"\"\n self.start()\n if await_ready and not self.ready.wait(timeout=timeout):\n raise TimeoutError(f\"Server didn't notify .ready in {timeout} seconds\")\n\n def shutdown(self) -> None:\n \"\"\" Shut down the averager process \"\"\"\n # TODO notify peers before terminating\n if self._parent_pid != os.getpid() or self.is_alive():\n self._pipe.send(('_SHUTDOWN', None))\n self.terminate()\n else:\n logger.warning(\"DHT shutdown has no effect: the process is not alive\")\n\n def __del__(self):\n if self._parent_pid != os.getpid() or self.is_alive():\n self.shutdown()\n\n def step(self, gather: Optional[DataForGather] = None, weight: float = 1.0, timeout: Optional[float] = None,\n allow_retries: bool = True, wait: bool = True) -> Union[Optional[Dict[Endpoint, DataForGather]], MPFuture]:\n \"\"\"\n Set up the averager to look for a group and run one round of averaging, return True on success, False on failure\n\n :param gather: optionally send this informaton to all peers in the next group and gather it from every groupmate\n (this operation is known as all-gather). The gathered data will be available as the output of this function.\n :param weight: averaging weight for this peer, int or float, must be strictly positive\n :param allow_retries: if averager fails to run one round of allreduce, this option will allow it to try again\n within the specified timeout\n :param timeout: if averager was unable to *find* a group in this many seconds, consider allreduce failedK\n :param wait: if True (default), return when finished. Otherwise return MPFuture and run in background.\n :returns: on success, update averaged_tensors and return group info; on failure, return None\n \"\"\"\n if self.mode == AveragingMode.AUX and weight != 1:\n logger.warning(\"Averager is running in auxiliary mode, weight is unused.\")\n else:\n assert isinstance(weight, (int, float)) and weight > 0, f\"Expected a positive int/float, got {type(weight)}\"\n\n future, _future = MPFuture.make_pair()\n gather_binary = self.serializer.dumps(gather) # serialize here to avoid loading modules in the averager process\n self.pipe.send(('_step', [], dict(future=_future, gather_binary=gather_binary, weight=weight,\n allow_retries=allow_retries, timeout=timeout)))\n return future.result() if wait else future\n\n async def _step(self, *, future: MPFuture, gather_binary: bytes, weight: float,\n allow_retries: bool, timeout: Optional[float]):\n loop = asyncio.get_event_loop()\n start_time = get_dht_time()\n group_id = None\n\n try:\n while not future.done():\n try:\n self._pending_group_assembled.clear()\n data_for_gather = self.serializer.dumps([weight, self._throughput, self.mode.value, gather_binary]) \n group_info = await self._matchmaking.look_for_group(timeout=timeout,\n data_for_gather=data_for_gather)\n if group_info is None:\n raise AllreduceException(\"Averaging step failed: could not find a group.\")\n group_id = group_info.group_id\n allreduce_runner = await self._make_allreduce_runner(group_info, **self.allreduce_kwargs)\n self._running_groups[group_id] = allreduce_runner\n self._pending_group_assembled.set()\n await asyncio.wait_for(allreduce_runner.run(), self._allreduce_timeout)\n if self.mode != AveragingMode.AUX:\n await loop.run_in_executor(None, self.update_tensors, allreduce_runner)\n\n # averaging is finished, exit the loop\n future.set_result(allreduce_runner.gathered)\n\n except (AllreduceException, MatchmakingException, AssertionError, StopAsyncIteration, InternalError,\n asyncio.CancelledError, asyncio.InvalidStateError, grpc.RpcError, grpc.aio.AioRpcError) as e:\n time_elapsed = get_dht_time() - start_time\n if not allow_retries or (timeout is not None and timeout < time_elapsed):\n logger.exception(f\"Averager caught {repr(e)}\")\n future.set_exception(e)\n else:\n logger.warning(f\"Averager caught {repr(e)}, retrying\")\n\n finally:\n _ = self._running_groups.pop(group_id, None)\n self._pending_group_assembled.set()\n\n except BaseException as e:\n if not future.done():\n future.set_exception(e)\n raise\n finally:\n if not future.done():\n future.set_exception(RuntimeError(\"Internal sanity check failed: averager.step left future pending.\"\n \" Please report this to hivemind issues.\"))\n\n async def _make_allreduce_runner(self, group_info: GroupInfo, min_vector_size: int, **kwargs) -> AllReduceRunner:\n \"\"\" Use a group description found by Matchmaking to form AllreduceRunner \"\"\"\n try:\n weights, throughputs, mode_ids, user_gathered = zip(*map(self.serializer.loads, group_info.gathered))\n user_gathered = dict(zip(group_info.endpoints, map(self.serializer.loads, user_gathered)))\n # compute optimal part sizes from peer throughputs\n modes = tuple(map(AveragingMode, mode_ids))\n incoming_throughputs = [thr if mode != AveragingMode.CLIENT else 0.0 for thr, mode in zip(throughputs, modes)] # TODO: replace with proper load balancing\n part_sizes = await asyncio.get_event_loop().run_in_executor(\n None, load_balance_peers, self.total_size, incoming_throughputs, min_vector_size)\n async with self.get_tensors_async() as averaged_tensors:\n return AllReduceRunner(group_id=group_info.group_id, tensors=averaged_tensors, endpoint=self.endpoint,\n ordered_group_endpoints=group_info.endpoints, part_sizes=part_sizes,\n weights=weights, gathered=user_gathered, return_deltas=True, modes=modes, **kwargs)\n except Exception as e:\n raise MatchmakingException(f\"Unable to create allreduce runner ({e}), group_info: {weights, throughputs, modes, user_gathered}\")\n\n def update_tensors(self, allreduce_group: AllReduceRunner):\n \"\"\"\n a private (extendable) method that applies changes from a finished allreduce to local tensors\n \"\"\"\n assert allreduce_group.return_deltas and allreduce_group.future.done()\n averaging_deltas = allreduce_group.future.result()\n\n with torch.no_grad(), self.get_tensors() as local_tensors:\n assert len(local_tensors) == len(self._averaged_tensors)\n for tensor, update in zip(local_tensors, averaging_deltas):\n tensor.add_(update, alpha=self._averaging_alpha)\n self.last_updated = get_dht_time()\n\n @contextlib.contextmanager\n def get_tensors(self) -> Sequence[torch.Tensor]:\n \"\"\"\n A contextmanager that gives user access to averaged tensors.\n It is guaranteed that the averager will not modify tensors while this context is active.\n Please do not modify the yielded tensors in-place after the context is released.\n \"\"\"\n with self.lock_averaged_tensors:\n yield self._averaged_tensors\n self.last_updated = get_dht_time()\n\n @contextlib.asynccontextmanager\n async def get_tensors_async(self) -> Sequence[torch.Tensor]:\n \"\"\" Like get_tensors, but uses an asynchronous contextmanager \"\"\"\n try:\n await asyncio.get_event_loop().run_in_executor(None, self.lock_averaged_tensors.acquire)\n yield self._averaged_tensors\n finally:\n self.lock_averaged_tensors.release()\n\n async def rpc_join_group(self, request: averaging_pb2.JoinRequest, context: grpc.ServicerContext\n ) -> AsyncIterator[averaging_pb2.MessageFromLeader]:\n \"\"\" accept or reject a join request from another averager; if accepted, run him through allreduce steps \"\"\"\n async for response in self._matchmaking.rpc_join_group(request, context):\n yield response\n\n async def rpc_aggregate_part(self, stream: AsyncIterator[averaging_pb2.AveragingData], context: grpc.ServicerContext\n ) -> AsyncIterator[averaging_pb2.AveragingData]:\n \"\"\" a groupmate sends us a part of his tensor; we should average it with other peers and return the result \"\"\"\n request = await anext(stream)\n if request.group_id not in self._running_groups:\n # this handles a special case when leader accepted us to group AND began allreduce right away,\n # but his response with group_id was delayed and other peers got to us first\n await self._pending_group_assembled.wait()\n\n group = self._running_groups.get(request.group_id)\n if group is None:\n yield averaging_pb2.AveragingData(code=averaging_pb2.BAD_GROUP_ID)\n return\n\n async for message in group.rpc_aggregate_part(achain(aiter(request), stream), context):\n yield message\n\n async def _declare_for_download_periodically(self):\n download_key = f'{self._matchmaking.group_key_manager.prefix}.all_averagers'\n while True:\n if self.allow_state_sharing:\n asyncio.create_task(asyncio.wait_for(self.dht.store(\n download_key, subkey=self.endpoint, value=self.last_updated,\n expiration_time=get_dht_time() + self._matchmaking.averaging_expiration, return_future=True),\n timeout=self._matchmaking.averaging_expiration))\n await asyncio.sleep(self._matchmaking.averaging_expiration)\n\n async def rpc_download_state(self, request: averaging_pb2.DownloadRequest, context: grpc.ServicerContext\n ) -> AsyncIterator[averaging_pb2.DownloadData]:\n \"\"\"\n Get the up-to-date trainer state from a peer.\n The state consists of two parts: (serialized_metadata, tensors)\n\n - serialized_metadata is a small serialized bytestring meant to store scalars and hyperparameters\n - tensors is a sequence of pytorch tensors that represent model parameters or optimizer statistics\n \"\"\"\n if not self.allow_state_sharing:\n return # deny request and direct peer to the next prospective averager\n chunk_size_bytes = self.matchmaking_kwargs.get('chunk_size_bytes', DEFAULT_CHUNK_SIZE_BYTES)\n metadata, tensors = await self._get_current_state_from_host_process()\n\n for tensor in tensors:\n for part in split_for_streaming(serialize_torch_tensor(tensor), chunk_size_bytes):\n if metadata is not None:\n yield averaging_pb2.DownloadData(tensor_part=part, metadata=metadata)\n metadata = None\n else:\n yield averaging_pb2.DownloadData(tensor_part=part)\n\n def get_current_state(self) -> Tuple[Any, Sequence[torch.Tensor]]:\n \"\"\"\n Get current state and send it to a peer. executed in the host process. Meant to be overriden.\n :returns: a tuple of (small metadata, sequence of torch tensors)\n :note: metadata must be seriablizable with self.serializer (default = MSGPackSerializer)\n \"\"\"\n with self.get_tensors() as tensors:\n return dict(group_key=self.get_group_bits()), tensors\n\n async def _get_current_state_from_host_process(self):\n \"\"\" Executed in the averager process inside rpc_download_state \"\"\"\n future, _future = MPFuture.make_pair()\n self._pipe.send(('_TRIGGER_GET_CURRENT_STATE', _future))\n return await future\n\n def load_state_from_peers(self, wait=True) -> Optional[Tuple[Any, Sequence[torch.Tensor]]]:\n \"\"\"\n Try to download the latest optimizer state one of the existing peer.\n :returns: on success, return a 2-tuple with (metadata, tensors), where\n\n - metadata is a small object containing metadata (e.g. hyperparameters, scalars, etc)\n - tensors is a sequence of pytorch tensors meant to contain peer's model weights and optimizer statistics\n\n The exact contents of both metadata and tensors are determined by get_current_state method\n \"\"\"\n future, _future = MPFuture.make_pair()\n self.pipe.send(('_load_state_from_peers', [], dict(future=_future)))\n return future.result() if wait else future\n\n async def _load_state_from_peers(self, future: MPFuture):\n try:\n key_manager = self._matchmaking.group_key_manager\n peer_priority, _ = self.dht.get(f\"{key_manager.prefix}.all_averagers\", latest=True) or ({}, None)\n peer_priority = {peer: float(info.value) for peer, info in peer_priority.items()\n if isinstance(info, ValueWithExpiration) and isinstance(info.value, (float, int))}\n\n if not isinstance(peer_priority, dict) or len(peer_priority) == 0:\n logger.info(f\"Averager could not load state from peers: peer dict empty or corrupted {peer_priority}.\")\n future.set_result(None)\n return\n\n metadata = None\n for peer in sorted(peer_priority.keys(), key=peer_priority.get, reverse=True):\n if peer != self.endpoint:\n logger.info(f\"Downloading parameters from peer {peer}\")\n stream = None\n try:\n stub = ChannelCache.get_stub(peer, averaging_pb2_grpc.DecentralizedAveragingStub, aio=True)\n stream = stub.rpc_download_state(averaging_pb2.DownloadRequest())\n current_tensor_parts, tensors = [], []\n async for message in stream:\n if message.metadata:\n metadata = self.serializer.loads(message.metadata)\n if message.tensor_part.dtype and current_tensor_parts:\n # tensor_part.dtype indicates the start of the new tensor, so we should wrap up this one\n tensors.append(deserialize_torch_tensor(combine_from_streaming(current_tensor_parts)))\n current_tensor_parts = []\n current_tensor_parts.append(message.tensor_part)\n if current_tensor_parts:\n tensors.append(deserialize_torch_tensor(combine_from_streaming(current_tensor_parts)))\n\n if not metadata:\n logger.debug(f\"Peer {peer} did not send its state.\")\n continue\n\n logger.info(f\"Finished downloading state from {peer}\")\n future.set_result((metadata, tensors))\n self.last_updated = get_dht_time()\n return\n except BaseException as e:\n logger.exception(f\"Failed to download state from {peer} - {repr(e)}\")\n finally:\n if stream is not None:\n await stream.code()\n\n finally:\n if not future.done():\n logger.warning(\"Averager could not load state from peers: all requests have failed.\")\n future.set_result(None)\n\n def get_group_bits(self, wait: bool = True):\n \"\"\"\n :param wait: if True, return bits immediately. Otherwise return awaitable MPFuture\n :returns: averager's current group key bits (without prefix)\n \"\"\"\n future, _future = MPFuture.make_pair()\n self.pipe.send(('_get_group_bits', [], dict(future=_future)))\n return future.result() if wait else future\n\n async def _get_group_bits(self, future: MPFuture):\n future.set_result(self._matchmaking.group_key_manager.group_bits)\n\n def set_group_bits(self, group_bits: str, wait: bool = True):\n \"\"\"\n :param group_bits: group bits (string of '0' or '1') to be used in averager's group key\n :param wait: if True, wait until the update is confirmed by the averager. Otherwise return immediately\n \"\"\"\n future, _future = MPFuture.make_pair()\n assert all(bit in '01' for bit in group_bits)\n self.pipe.send(('_set_group_bits', [], dict(group_bits=group_bits, future=_future)))\n return future.result() if wait else future\n\n async def _set_group_bits(self, group_bits: str, future: MPFuture):\n try:\n self._matchmaking.group_key_manager.group_bits = group_bits\n return future.set_result(None)\n except Exception as e:\n if not future.done():\n future.set_exception(e)\n\n\ndef is_power_of_two(n):\n \"\"\" Check whether n is a power of 2 \"\"\"\n return (n != 0) and (n & (n - 1) == 0)\n\n\ndef _background_thread_fetch_current_state(serializer: SerializerBase, pipe: mp.connection.Connection,\n get_current_state_ref: weakref.WeakMethod):\n \"\"\"\n Executed in the host process as a background thread. Fetches the averager state when asked by peers.\n :param serializer: a serializer with which to convert metadata into bytes\n :param pipe: DecentralizedAverager's control pipe (from host process side)\n :param get_current_state_ref: a WeakMethod wrapped around DecentralizedAverager.get_current_state (instance-bound)\n \"\"\"\n while True:\n try:\n trigger, future = pipe.recv()\n except BaseException as e:\n logger.debug(f\"Averager background thread finished: {repr(e)}\")\n break\n \n if trigger == '_SHUTDOWN':\n break\n\n assert trigger == '_TRIGGER_GET_CURRENT_STATE'\n try:\n get_current_state = get_current_state_ref()\n if get_current_state is None:\n break\n state_metadata, state_tensors = get_current_state()\n del get_current_state\n\n state_metadata = serializer.dumps(state_metadata)\n state_tensors = tuple(tensor.cpu().detach().requires_grad_(tensor.requires_grad)\n for tensor in state_tensors)\n # note: we cast tensors to CPU on host side to avoid initializing cuda in the guest process\n future.set_result((state_metadata, state_tensors))\n except BaseException as e:\n future.set_exception(e)\n logger.warning(e)\n continue\n\n\ndef compute_schema_hash(tensors: Sequence[torch.Tensor]) -> bytes:\n \"\"\" A hash that describes follower's tensor shapes, dtypes, devices, but not the actual values \"\"\"\n schema_dicts = [{field_name: str(field_value)\n for field_name, field_value in asdict(TensorDescriptor.from_tensor(tensor)).items()}\n for tensor in tensors]\n return DHTID.generate(source=schema_dicts).to_bytes()\n" ]
[ [ "torch.no_grad", "numpy.float32" ] ]
kainoj/pytorch-lightning
[ "4610fddb19d502e534b5c5d77c3dfd6f2e5359a5" ]
[ "tests/plugins/test_ddp_plugin.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom unittest import mock\n\nimport pytest\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel\n\nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.plugins import DDPPlugin\nfrom pytorch_lightning.plugins.environments import LightningEnvironment\nfrom pytorch_lightning.trainer.states import TrainerFn\nfrom tests.helpers.boring_model import BoringModel\nfrom tests.helpers.runif import RunIf\n\n\nclass BoringModelGPU(BoringModel):\n def on_train_start(self) -> None:\n # make sure that the model is on GPU when training\n assert self.device == torch.device(f\"cuda:{self.trainer.training_type_plugin.local_rank}\")\n self.start_cuda_memory = torch.cuda.memory_allocated()\n\n\n@RunIf(skip_windows=True, min_gpus=2, special=True)\ndef test_ddp_with_2_gpus():\n \"\"\"Tests if device is set correctely when training and after teardown for DDPPlugin.\"\"\"\n trainer = Trainer(gpus=2, accelerator=\"ddp\", fast_dev_run=True)\n # assert training type plugin attributes for device setting\n assert isinstance(trainer.training_type_plugin, DDPPlugin)\n assert trainer.training_type_plugin.on_gpu\n assert not trainer.training_type_plugin.on_tpu\n local_rank = trainer.training_type_plugin.local_rank\n assert trainer.training_type_plugin.root_device == torch.device(f\"cuda:{local_rank}\")\n\n model = BoringModelGPU()\n\n trainer.fit(model)\n\n # assert after training, model is moved to CPU and memory is deallocated\n assert model.device == torch.device(\"cpu\")\n cuda_memory = torch.cuda.memory_allocated()\n assert cuda_memory < model.start_cuda_memory\n\n\nclass BarrierModel(BoringModel):\n def setup(self, stage=None):\n assert not isinstance(self.trainer.accelerator.model, DistributedDataParallel)\n self.trainer.training_type_plugin.barrier(\"barrier before model is wrapped\")\n\n def on_train_start(self):\n assert isinstance(self.trainer.accelerator.model, DistributedDataParallel)\n self.trainer.training_type_plugin.barrier(\"barrier after model is wrapped\")\n\n\n@RunIf(min_gpus=4, special=True)\[email protected](\"torch.distributed.barrier\")\ndef test_ddp_barrier_non_consecutive_device_ids(barrier_mock, tmpdir):\n \"\"\"Test correct usage of barriers when device ids do not start at 0 or are not consecutive.\"\"\"\n model = BoringModel()\n gpus = [1, 3]\n trainer = Trainer(default_root_dir=tmpdir, max_steps=1, gpus=gpus, accelerator=\"ddp\")\n trainer.fit(model)\n barrier_mock.assert_any_call(device_ids=[gpus[trainer.local_rank]])\n\n\[email protected](os.environ, {\"LOCAL_RANK\": \"1\"})\ndef test_incorrect_ddp_script_spawning(tmpdir):\n \"\"\"Test an error message when user accidentally instructs Lightning to spawn children processes on rank > 0.\"\"\"\n\n class WronglyImplementedEnvironment(LightningEnvironment):\n def creates_children(self):\n # returning false no matter what means Lightning would spawn also on ranks > 0 new processes\n return False\n\n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n accelerator=\"ddp\",\n num_processes=2,\n plugins=[DDPPlugin(), WronglyImplementedEnvironment()],\n )\n with pytest.raises(\n RuntimeError, match=\"Lightning attempted to launch new distributed processes with `local_rank > 0`.\"\n ):\n trainer.fit(model)\n\n\n@RunIf(skip_windows=True)\ndef test_ddp_configure_ddp():\n \"\"\"Tests with ddp plugin.\"\"\"\n model = BoringModel()\n ddp_plugin = DDPPlugin()\n trainer = Trainer(\n max_epochs=1,\n plugins=[ddp_plugin],\n )\n # test wrap the model if fitting\n trainer.state.fn = TrainerFn.FITTING\n trainer.accelerator.connect(model)\n trainer.accelerator.setup_environment()\n trainer.accelerator.setup(trainer)\n trainer.lightning_module.trainer = trainer\n assert isinstance(trainer.model, LightningModule)\n trainer._pre_dispatch()\n # in DDPPlugin configure_ddp(), model wrapped by DistributedDataParallel\n assert isinstance(trainer.model, DistributedDataParallel)\n\n trainer = Trainer(\n max_epochs=1,\n plugins=[ddp_plugin],\n )\n # test do not wrap the model if trainerFN is not fitting\n trainer.accelerator.connect(model)\n trainer.accelerator.setup_environment()\n trainer.accelerator.setup(trainer)\n trainer.lightning_module.trainer = trainer\n trainer._pre_dispatch()\n # in DDPPlugin configure_ddp(), model are still LightningModule\n assert isinstance(trainer.model, LightningModule)\n" ]
[ [ "torch.device", "torch.cuda.memory_allocated" ] ]
MarkusPrim/flavio
[ "0d277f637c4e2eb8cd1849c88d9844200ddfbc4f" ]
[ "flavio/test_classes.py" ]
[ "import unittest\nimport numpy as np\nimport flavio\nfrom flavio.classes import *\nfrom flavio.statistics.probability import *\nfrom flavio.config import config\nimport scipy.integrate\nimport math\n\nclass TestClasses(unittest.TestCase):\n def test_parameter_class(self):\n p = Parameter( 'test_mb' )\n self.assertEqual( p, Parameter.get_instance('test_mb') )\n self.assertEqual( p, Parameter['test_mb'] )\n p.set_description('b quark mass')\n self.assertEqual( p.description, 'b quark mass' )\n # removing dummy instances\n Parameter.del_instance('test_mb')\n\n def test_constraints_class(self):\n p = Parameter( 'test_mb' )\n self.assertEqual( p, Parameter.get_instance('test_mb') )\n self.assertEqual( p, Parameter['test_mb'] )\n c = ParameterConstraints()\n d = NormalDistribution(4.2, 0.2)\n c.add_constraint( ['test_mb'], d )\n # checking central values\n self.assertEqual( c.get_central('test_mb'), 4.2)\n # checking types and shapes of random values\n self.assertEqual( type(d.get_random()), float)\n self.assertEqual( d.get_random(3).shape, (3,))\n self.assertEqual( d.get_random((4,5)).shape, (4,5))\n test_1derrors_random = c.get_1d_errors_random()\n self.assertAlmostEqual( test_1derrors_random['test_mb'], 0.2, delta=0.5)\n test_1derrors_rightleft = c.get_1d_errors_rightleft()\n self.assertEqual( test_1derrors_rightleft['test_mb'], (0.2, 0.2))\n d = AsymmetricNormalDistribution(4.2, 0.2, 0.1)\n c.add_constraint( ['test_mb'], d )\n test_1derrors_rightleft = c.get_1d_errors_rightleft()\n self.assertEqual( test_1derrors_rightleft['test_mb'], (0.2, 0.1))\n pc = Parameter( 'test_mc' )\n c.add_constraint( ['test_mc', 'test_mb'], MultivariateNormalDistribution([1.2,4.2],[[0.01,0],[0,0.04]]) )\n c.get_logprobability_all(c.get_central_all())\n test_1derrors_random = c.get_1d_errors_random()\n self.assertAlmostEqual( test_1derrors_random['test_mb'], 0.2, delta=0.05)\n self.assertAlmostEqual( test_1derrors_random['test_mc'], 0.1, delta=0.05)\n test_1derrors_rightleft = c.get_1d_errors_rightleft()\n self.assertEqual( test_1derrors_rightleft['test_mb'], (0.2, 0.2))\n self.assertEqual( test_1derrors_rightleft['test_mc'], (0.1, 0.1))\n # removing dummy instances\n # check that they have been removed, using old and new syntax\n c.remove_constraint('test_mb')\n Parameter.del_instance('test_mb')\n with self.assertRaises(KeyError):\n Parameter['test_mb']\n del Parameter['test_mc']\n with self.assertRaises(KeyError):\n Parameter.get_instance('test_mc')\n\n def test_set_constraint(self):\n p = Parameter( 'test_mb' )\n c = ParameterConstraints()\n c.set_constraint('test_mb', '4.2 +- 0.1 +- 0.2')\n cons = c._parameters['test_mb'][1]\n self.assertIsInstance(cons, NormalDistribution)\n self.assertEqual(cons.central_value, 4.2)\n self.assertEqual(cons.standard_deviation, math.sqrt(0.1**2+0.2**2))\n c.set_constraint('test_mb', '4.3 + 0.3 - 0.4')\n cons = c._parameters['test_mb'][1]\n self.assertIsInstance(cons, AsymmetricNormalDistribution)\n self.assertEqual(cons.central_value, 4.3)\n self.assertEqual(cons.right_deviation, 0.3)\n self.assertEqual(cons.left_deviation, 0.4)\n c.set_constraint('test_mb', 4.4)\n cons = c._parameters['test_mb'][1]\n self.assertIsInstance(cons, DeltaDistribution)\n self.assertEqual(cons.central_value, 4.4)\n cons_dict_1 = {'distribution': 'normal',\n 'central_value': '4.5',\n 'standard_deviation': '0.1'}\n cons_dict_2 = {'distribution': 'normal',\n 'central_value': 4.5,\n 'standard_deviation': 0.2}\n c.set_constraint('test_mb', constraint_dict=cons_dict_1)\n cons = c._parameters['test_mb'][1]\n self.assertIsInstance(cons, NormalDistribution)\n self.assertEqual(cons.central_value, 4.5)\n self.assertEqual(cons.standard_deviation, 0.1)\n c.set_constraint('test_mb', constraint_dict=[cons_dict_1, cons_dict_2])\n cons = c._parameters['test_mb'][1]\n self.assertIsInstance(cons, NormalDistribution)\n self.assertEqual(cons.central_value, 4.5)\n self.assertEqual(cons.standard_deviation, math.sqrt(0.1**2+0.2**2))\n Parameter.del_instance('test_mb')\n\n def test_observable_class(self):\n o = Observable( 'test_obs' )\n self.assertEqual( o, Observable.get_instance('test_obs') )\n self.assertEqual( o, Observable['test_obs'] )\n o.set_description('some test observables')\n self.assertEqual( o.description, 'some test observables' )\n # removing dummy instances\n Observable.del_instance('test_obs')\n\n def test_measurement_class(self):\n o = Observable( 'test_obs' )\n d = NormalDistribution(4.2, 0.2)\n m = Measurement( 'measurement of test_obs' )\n m.add_constraint(['test_obs'], d)\n # removing dummy instances\n Observable.del_instance('test_obs')\n Measurement.del_instance('measurement of test_obs')\n\n def test_prediction_class(self):\n o = Observable( 'test_obs' )\n p = Parameter( 'test_parameter' )\n def f(wc_obj, par_dict):\n return par_dict['test_parameter']*2\n pr = Prediction( 'test_obs', f )\n wc_obj = None\n c = ParameterConstraints()\n c.add_constraint( ['test_parameter'], NormalDistribution(1.2, 0.1) )\n self.assertEqual( pr.get_central(c, wc_obj), 2.4)\n self.assertEqual( o.prediction_central(c, wc_obj), 2.4)\n # removing dummy instances\n Observable.del_instance('test_obs')\n Parameter.del_instance('test_parameter')\n\n def test_implementation_class(self):\n a = AuxiliaryQuantity( 'test_aux' )\n p = Parameter( 'test_parameter' )\n def f(wc_obj, par_dict):\n return par_dict['test_parameter']*2\n imp = Implementation( 'test_imp', 'test_aux', f )\n config['implementation']['test_aux'] = 'test_imp'\n wc_obj = None\n c = ParameterConstraints()\n c.add_constraint( ['test_parameter'], NormalDistribution(1.2, 0.1) )\n self.assertEqual( imp.get_central(c, wc_obj), 2.4)\n self.assertEqual( a.prediction_central(c, wc_obj), 2.4)\n Implementation.show_all()\n # removing dummy instances\n AuxiliaryQuantity.del_instance('test_aux')\n Parameter.del_instance('test_parameter')\n Implementation.del_instance('test_imp')\n del config['implementation']['test_aux']\n\n def test_constraints_class_exclude(self):\n Parameter( 'test_ma' )\n Parameter( 'test_mb' )\n Parameter( 'test_mc' )\n c2 = np.array([1e-3, 2])\n c3 = np.array([1e-3, 2, 0.4])\n cov22 = np.array([[(0.2e-3)**2, 0.2e-3*0.5*0.3],[0.2e-3*0.5*0.3, 0.5**2]])\n cov33 = np.array([[(0.2e-3)**2, 0.2e-3*0.5*0.3 , 0],[0.2e-3*0.5*0.3, 0.5**2, 0.01], [0, 0.01, 0.1**2]])\n pdf1 = NormalDistribution(2, 0.5)\n pdf2 = MultivariateNormalDistribution(c2, cov22)\n pdf3 = MultivariateNormalDistribution(c3, cov33)\n c1 = ParameterConstraints()\n c2 = ParameterConstraints()\n c3 = ParameterConstraints()\n c1.add_constraint(['test_mb'], pdf1)\n c2.add_constraint(['test_ma', 'test_mb'], pdf2)\n c3.add_constraint(['test_ma', 'test_mb', 'test_mc'], pdf3)\n par_dict = {'test_ma': 1.2e-3, 'test_mb': 2.4, 'test_mc': 0.33}\n self.assertEqual(\n c1.get_logprobability_all(par_dict)[pdf1],\n c2.get_logprobability_all(par_dict, exclude_parameters=['test_ma', 'test_mc'])[pdf2],\n )\n self.assertEqual(\n c1.get_logprobability_all(par_dict)[pdf1],\n c3.get_logprobability_all(par_dict, exclude_parameters=['test_ma', 'test_mc'])[pdf3],\n )\n self.assertEqual(\n c2.get_logprobability_all(par_dict)[pdf2],\n c3.get_logprobability_all(par_dict, exclude_parameters=['test_mc'])[pdf3],\n )\n # remove dummy instances\n Parameter.del_instance('test_ma')\n Parameter.del_instance('test_mb')\n Parameter.del_instance('test_mc')\n\n def test_logprobability_single(self):\n c_corr = Constraints()\n c_uncorr = Constraints()\n d1 = NormalDistribution(2, 0.3)\n c_corr.add_constraint(['par_1'], d1)\n c_uncorr.add_constraint(['par_1'], d1)\n d23 = MultivariateNormalDistribution([4, 5],\n covariance=[[0.2**2, 0.5*0.2*0.3], [0.5*0.2*0.3, 0.3**2]])\n d2 = NormalDistribution(4, 0.2)\n d3 = NormalDistribution(5, 0.3)\n c_corr.add_constraint(['par_2', 'par_3'], d23)\n d23_uncorr = MultivariateNormalDistribution([4, 5], covariance=[[0.2, 0], [0, 0.3]])\n c_uncorr.add_constraint(['par_2'], d2)\n c_uncorr.add_constraint(['par_3'], d3)\n d = {'par_1': 2.8, 'par_2': 4.9, 'par_3': 4.3}\n # all logprobs for the uncorrelated case\n l_all = c_uncorr.get_logprobability_all(d)\n # the dict should contain the same values as the \"single\" ones in\n # the correlated case\n for k, v in l_all.items():\n par = dict(c_uncorr._constraints)[k][0]\n self.assertEqual(v,\n c_corr.get_logprobability_single(par, d[par]),\n msg=\"Failed for {}\".format(par))\n\n def test_pdf(self):\n # for the normal dist's, just check that no error is raised\n pd = NormalDistribution(1., 0.2)\n pd.logpdf(0.5)\n pd = MultivariateNormalDistribution([1.,2.], [[0.04,0],[0,0.09]])\n pd.logpdf([1.05,2.08])\n # for the asymmetric dist, more scrutiny needed\n pd = AsymmetricNormalDistribution(1., 0.2, 0.5)\n eps = 1.e-8\n # check that the PDF is continuos\n self.assertAlmostEqual( pd.logpdf(1. - eps), pd.logpdf(1. + eps), places=8)\n # check that the PDF is properly normalized\n self.assertEqual( scipy.integrate.quad(lambda x: math.exp(pd.logpdf(x)), -np.inf, +np.inf)[0], 1)\n\n def test_observable_from_function(self):\n o1 = Observable('test_obs_1', arguments=['a1'])\n o2 = Observable('test_obs_2', arguments=['a2'])\n with self.assertRaises(ValueError):\n # non-existent obs\n Observable.from_function('test_obs_12',\n ['test_obs_x', 'test_obs_2'],\n lambda x, y: x-y)\n with self.assertRaises(AssertionError):\n # depend on different arguments\n Observable.from_function('test_obs_12',\n ['test_obs_1', 'test_obs_2'],\n lambda x, y: x-y)\n o2 = Observable('test_obs_2', arguments=['a1'])\n with self.assertRaises(AssertionError):\n # obs without prediction\n Observable.from_function('test_obs_12',\n ['test_obs_1', 'test_obs_2'],\n lambda x, y: x-y)\n Prediction('test_obs_1', lambda wc_obj, par: 3)\n Prediction('test_obs_2', lambda wc_obj, par: 7)\n Observable.from_function('test_obs_12',\n ['test_obs_1', 'test_obs_2'],\n lambda x, y: x-y)\n self.assertEqual(\n Observable['test_obs_12'].prediction_central(flavio.default_parameters, None),\n -4)\n self.assertEqual(Observable['test_obs_12'].arguments, ['a1'])\n # delete dummy instances\n Observable.del_instance('test_obs_1')\n Observable.del_instance('test_obs_2')\n Observable.del_instance('test_obs_12')\n\n def test_observable_taxonomy(self):\n o1 = Observable( 'test_obs_1' )\n o2 = Observable( 'test_obs_2' )\n o1.add_taxonomy('test 1 :: test 2 :: test 3')\n o2.add_taxonomy('test 1 :: test 2 :: test 3')\n self.assertDictEqual(\n Observable.taxonomy_dict()['test 1'],\n {'test 2': {'test 3': {'test_obs_1' :{}, 'test_obs_2':{}}}}\n )\n # remove test from taxonomy\n Observable.taxonomy.pop('test 1', None)\n # removing dummy instances\n Observable.del_instance('test_obs_1')\n Observable.del_instance('test_obs_2')\n\n def test_parameter_constraints_yaml(self):\n yaml = flavio.default_parameters.get_yaml()\n pnew = ParameterConstraints.from_yaml(yaml)\n yaml2 = pnew.get_yaml()\n self.assertEqual(yaml, yaml2)\n\n def test_measurements_yaml(self):\n import json\n for m in Measurement.instances.values():\n if (m.name == 'Belle B->D*lnu hadronic tag 2017'\n or 'Pseudo-measurement' in m.name):\n continue # known failure ...\n yaml = m.get_yaml_dict()\n mnew = Measurement.from_yaml_dict(yaml)\n yaml2 = mnew.get_yaml_dict()\n self.assertEqual(yaml, yaml2)\n\n def test_from_yaml_dict(self):\n c = Constraints.from_yaml_dict([\n {'my_par_1': '1 +- 0.3 +- 0.4'},\n {'parameters': ['my_par_2'],\n 'values': {\n 'distribution': 'normal',\n 'central_value': 2,\n 'standard_deviation': 0.6,\n }\n },\n {'parameters': ['my_par_3'],\n 'values': [{\n 'distribution': 'normal',\n 'central_value': 7,\n 'standard_deviation': 0.3,\n },\n {\n 'distribution': 'normal',\n 'central_value': 7,\n 'standard_deviation': 0.4,\n }]\n\n }\n ])\n self.assertListEqual(list(c._parameters.keys()), ['my_par_1', 'my_par_2', 'my_par_3'])\n c1 = c._parameters['my_par_1'][1]\n self.assertEqual(type(c1), NormalDistribution)\n self.assertEqual(c1.central_value, 1)\n self.assertEqual(c1.standard_deviation, 0.5)\n c2 = c._parameters['my_par_2'][1]\n self.assertEqual(type(c2), NormalDistribution)\n self.assertEqual(c2.central_value, 2)\n self.assertEqual(c2.standard_deviation, 0.6)\n c3 = c._parameters['my_par_3'][1]\n self.assertEqual(type(c3), NormalDistribution)\n self.assertEqual(c3.central_value, 7)\n self.assertEqual(c3.standard_deviation, 0.5)\n del c\n\n def test_repr_meas(self):\n mtest = Measurement('repr test')\n self.assertEqual(repr(mtest), \"Measurement('repr test')\")\n mtest._repr_markdown_()\n mtest.description = \"bla\"\n self.assertIn(\"bla\", mtest._repr_markdown_())\n mtest.url = \"blo\"\n self.assertIn(\"blo\", mtest._repr_markdown_())\n del Measurement['repr test']\n\n def test_repr_obs(self):\n mtest = Observable('repr test')\n self.assertEqual(repr(mtest),\n \"Observable('repr test', arguments=None)\")\n mtest._repr_markdown_()\n mtest.description = \"bla\"\n self.assertIn(\"bla\", mtest._repr_markdown_())\n mtest.tex = \"blo\"\n self.assertIn(\"blo\", mtest._repr_markdown_())\n mtest.arguments = [\"blu\"]\n self.assertIn(\"blu\", mtest._repr_markdown_())\n self.assertEqual(repr(mtest),\n \"Observable('repr test', arguments=['blu'])\")\n del Observable['repr test']\n\n def test_argument_format(self):\n with self.assertRaises(KeyError):\n Observable.argument_format('dont_exist')\n with self.assertRaises(KeyError):\n Observable.argument_format(['dont_exist', 1])\n with self.assertRaises(ValueError):\n Observable.argument_format(['eps_K', 1])\n with self.assertRaises(KeyError):\n Observable.argument_format({'name': 'dBR/dq2(B0->Denu)', 'bla': 1})\n with self.assertRaises(ValueError):\n Observable.argument_format(['dBR/dq2(B0->Denu)'])\n with self.assertRaises(ValueError):\n Observable.argument_format('dBR/dq2(B0->Denu)')\n self.assertTupleEqual(Observable.argument_format({'name': 'dBR/dq2(B0->Denu)', 'q2': 1}, 'tuple'),\n ('dBR/dq2(B0->Denu)', 1))\n self.assertListEqual(Observable.argument_format({'name': 'dBR/dq2(B0->Denu)', 'q2': 1}, 'list'),\n ['dBR/dq2(B0->Denu)', 1])\n self.assertDictEqual(Observable.argument_format({'name': 'dBR/dq2(B0->Denu)', 'q2': 1}, 'dict'),\n {'name': 'dBR/dq2(B0->Denu)', 'q2': 1})\n self.assertEqual(Observable.argument_format('eps_K', 'tuple'),\n 'eps_K')\n self.assertEqual(Observable.argument_format('eps_K', 'list'),\n 'eps_K')\n self.assertDictEqual(Observable.argument_format('eps_K', 'dict'),\n {'name': 'eps_K'})\n" ]
[ [ "numpy.array" ] ]
arsimone/flightmare
[ "c546d9d54970c7ad803f3ada4c2ea64c51ab7287" ]
[ "flightrl/stable-baselines3/stable_baselines3/ppo/ppo.py" ]
[ "from typing import Any, Dict, Optional, Type, Union\n\nimport numpy as np\nimport torch as th\nfrom gym import spaces\nfrom torch.nn import functional as F\n\nfrom stable_baselines3.common import logger\nfrom stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm\nfrom stable_baselines3.common.policies import ActorCriticPolicy\nfrom stable_baselines3.common.masked_mse_loss import MaskedMSELoss\nfrom stable_baselines3.common.masked_mse_loss import MaskedABSLoss\nfrom stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule\nfrom stable_baselines3.common.utils import explained_variance, get_schedule_fn\n\n\nclass PPO(OnPolicyAlgorithm):\n \"\"\"\n Proximal Policy Optimization algorithm (PPO) (clip version)\n\n Paper: https://arxiv.org/abs/1707.06347\n Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)\n https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and\n and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)\n\n Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress remaining (from 1 to 0)\n :param n_steps: The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param batch_size: Minibatch size\n :param n_epochs: Number of epoch when optimizing the surrogate loss\n :param gamma: Discount factor\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n :param clip_range: Clipping parameter, it can be a function of the current progress\n remaining (from 1 to 0).\n :param clip_range_vf: Clipping parameter for the value function,\n it can be a function of the current progress remaining (from 1 to 0).\n This is a parameter specific to the OpenAI implementation. If None is passed (default),\n no clipping will be done on the value function.\n IMPORTANT: this clipping depends on the reward scaling.\n :param ent_coef: Entropy coefficient for the loss calculation\n :param vf_coef: Value function coefficient for the loss calculation\n :param max_grad_norm: The maximum value for the gradient clipping\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param target_kl: Limit the KL divergence between updates,\n because the clipping is not enough to prevent large update\n see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)\n By default, there is no limit on the kl div.\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param create_eval_env: Whether to create a second environment that will be\n used for evaluating the agent periodically. (Only available when passing string for the environment)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: the verbosity level: 0 no output, 1 info, 2 debug\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[ActorCriticPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 3e-4,\n n_steps: int = 2048,\n batch_size: Optional[int] = 64,\n n_epochs: int = 10,\n gamma: float = 0.99,\n gae_lambda: float = 0.95,\n clip_range: Union[float, Schedule] = 0.2,\n clip_range_vf: Union[None, float, Schedule] = None,\n ent_coef: float = 0.0,\n vf_coef: float = 0.5,\n max_grad_norm: float = 0.5,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n target_kl: Optional[float] = None,\n tensorboard_log: Optional[str] = None,\n create_eval_env: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super(PPO, self).__init__(\n policy,\n env,\n learning_rate=learning_rate,\n n_steps=n_steps,\n gamma=gamma,\n gae_lambda=gae_lambda,\n ent_coef=ent_coef,\n vf_coef=vf_coef,\n max_grad_norm=max_grad_norm,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n tensorboard_log=tensorboard_log,\n policy_kwargs=policy_kwargs,\n verbose=verbose,\n device=device,\n create_eval_env=create_eval_env,\n seed=seed,\n _init_setup_model=False,\n )\n\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.clip_range = clip_range\n self.clip_range_vf = clip_range_vf\n self.target_kl = target_kl\n self.save_collected = 1\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super(PPO, self)._setup_model()\n\n # Initialize schedules for policy/value clipping\n self.clip_range = get_schedule_fn(self.clip_range)\n if self.clip_range_vf is not None:\n if isinstance(self.clip_range_vf, (float, int)):\n assert self.clip_range_vf > 0, \"`clip_range_vf` must be positive, \" \"pass `None` to deactivate vf clipping\"\n\n self.clip_range_vf = get_schedule_fn(self.clip_range_vf)\n\n def train(self) -> None:\n \"\"\"\n Update policy using the currently gathered rollout buffer.\n \"\"\"\n # Update optimizer learning rate\n self._update_learning_rate(self.policy.optimizer)\n # Compute current clip range\n clip_range = self.clip_range(self._current_progress_remaining)\n # Optional: clip range for the value function\n if self.clip_range_vf is not None:\n clip_range_vf = self.clip_range_vf(self._current_progress_remaining)\n\n entropy_losses, all_kl_divs = [], []\n pg_losses, value_losses = [], []\n clip_fractions = []\n\n # train for n_epochs epochs\n for epoch in range(self.n_epochs):\n approx_kl_divs = []\n # Do a complete pass on the rollout buffer\n for rollout_data in self.rollout_buffer.get(self.batch_size):\n actions = rollout_data.actions\n if isinstance(self.action_space, spaces.Discrete):\n # Convert discrete action from float to long\n actions = rollout_data.actions.long().flatten()\n\n # Re-sample the noise matrix because the log_std has changed\n # TODO: investigate why there is no issue with the gradient\n # if that line is commented (as in SAC)\n if self.use_sde:\n self.policy.reset_noise(self.batch_size)\n\n values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)\n # print(\"LogProb is \",rollout_data.old_log_prob)\n values = values.flatten()\n # Normalize advantage\n advantages = rollout_data.advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)\n # ratio between old and new policy, should be one at the first iteration\n ratio = th.exp(log_prob - rollout_data.old_log_prob)\n # print(\"Ratio is \",ratio)\n\n # clipped surrogate loss\n policy_loss_1 = advantages * ratio\n policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)\n policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()\n\n # Logging\n pg_losses.append(policy_loss.item())\n clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()\n clip_fractions.append(clip_fraction)\n\n if self.clip_range_vf is None:\n # No clipping\n values_pred = values\n else:\n # Clip the different between old and new value\n # NOTE: this depends on the reward scaling\n values_pred = rollout_data.old_values + th.clamp(\n values - rollout_data.old_values, -clip_range_vf, clip_range_vf\n )\n # Value loss using the TD(gae_lambda) target\n value_loss = F.mse_loss(rollout_data.returns, values_pred)\n value_losses.append(value_loss.item())\n\n # Entropy loss favor exploration\n if entropy is None:\n # Approximate entropy when no analytical form\n entropy_loss = -th.mean(-log_prob)\n else:\n entropy_loss = -th.mean(entropy)\n \n entropy_losses.append(entropy_loss.item())\n # decoder_loss = self.reconstruction_loss(decoding, rollout_data.shifted_obs[:,18:,0], rollout_data.mask.unsqueeze(1).repeat(1,512,1))\n # decoder_losses.append(decoder_loss.item())\n loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss\n\n # Optimization step\n self.policy.optimizer.zero_grad()\n loss.backward()\n # Clip grad norm\n th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n\n # for param in self.policy.parameters(): \n # print(type(param), param.size())\n # tensor_size = param.size()\n # size_tup = (512, 512, 2)\n # size_tup_mlp = (128,256)\n # if tensor_size==size_tup:\n # print(param.grad) \n # if tensor_size==size_tup_mlp:\n # print(param.grad) \n\n self.policy.optimizer.step()\n # print(\"Approx KL \",th.mean(rollout_data.old_log_prob - log_prob).detach().cpu().numpy())\n approx_kl_divs.append(th.mean(rollout_data.old_log_prob - log_prob).detach().cpu().numpy())\n\n all_kl_divs.append(np.mean(approx_kl_divs))\n\n if self.target_kl is not None and np.mean(approx_kl_divs) > 1.5 * self.target_kl:\n print(f\"Early stopping at step {epoch} due to reaching max kl: {np.mean(approx_kl_divs):.2f}\")\n break\n\n # for epoch in range(self.n_epochs):\n # # Do a complete pass on the rollout buffer\n # for rollout_data in self.rollout_buffer.get(int((self.batch_size+1)/2)):\n # decoding_present, decoding_future1, decoding_future2 = self.policy.decode(rollout_data.observations)\n # decoder_loss_present = self.reconstruction_loss(decoding_present, rollout_data.observations[:,18:,0], th.ones(rollout_data.observations[:,18:,0].size()).to(self.device))\n # decoder_present_losses.append(decoder_loss_present.item())\n # decoder_loss_future1 = self.reconstruction_loss(decoding_future1, rollout_data.shifted_obs[:,18:,0], rollout_data.mask.unsqueeze(1).repeat(1,decoding_future1.size()[1],1))\n # decoder_future_losses.append(decoder_loss_future1.item())\n # decoder_loss_future2 = self.reconstruction_loss(decoding_future2, rollout_data.double_shifted_obs[:,18:,0], rollout_data.mask2.unsqueeze(1).repeat(1,decoding_future2.size()[1],1))\n # decoder_future2_losses.append(decoder_loss_future2.item())\n # decoder_loss = decoder_loss_present + decoder_loss_future1 + decoder_loss_future2\n # decoder_losses.append(decoder_loss.item())\n # self.decoder_optimizer.zero_grad()\n # decoder_loss.backward()\n # self.decoder_optimizer.step()\n # self.decoder_scheduler.step(decoder_loss)\n\n\n self._n_updates += self.n_epochs\n explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())\n\n # Logs\n logger.record(\"train/entropy_loss\", np.mean(entropy_losses))\n logger.record(\"train/policy_gradient_loss\", np.mean(pg_losses))\n # logger.record(\"train/decoder_loss\", np.mean(decoder_losses))\n # logger.record(\"train/decoder_loss_present\", np.mean(decoder_present_losses))\n # logger.record(\"train/decoder_loss_future1\", np.mean(decoder_future_losses))\n # logger.record(\"train/decoder_loss_future2\", np.mean(decoder_future2_losses))\n logger.record(\"train/value_loss\", np.mean(value_losses))\n logger.record(\"train/approx_kl\", np.mean(approx_kl_divs))\n logger.record(\"train/clip_fraction\", np.mean(clip_fractions))\n logger.record(\"train/loss\", loss.item())\n logger.record(\"train/explained_variance\", explained_var)\n if hasattr(self.policy, \"log_std\"):\n logger.record(\"train/std\", th.exp(self.policy.log_std).mean().item())\n\n logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n logger.record(\"train/clip_range\", clip_range)\n if self.clip_range_vf is not None:\n logger.record(\"train/clip_range_vf\", clip_range_vf)\n\n def learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 1,\n eval_env: Optional[GymEnv] = None,\n eval_freq: int = -1,\n n_eval_episodes: int = 5,\n tb_log_name: str = \"PPO\",\n eval_log_path: Optional[str] = None,\n reset_num_timesteps: bool = True,\n ) -> \"PPO\":\n\n return super(PPO, self).learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n eval_env=eval_env,\n eval_freq=eval_freq,\n n_eval_episodes=n_eval_episodes,\n tb_log_name=tb_log_name,\n eval_log_path=eval_log_path,\n reset_num_timesteps=reset_num_timesteps,\n )\n" ]
[ [ "torch.min", "torch.clamp", "numpy.mean", "torch.nn.functional.mse_loss", "torch.abs", "torch.exp", "torch.mean" ] ]
CommerciumBlockchain/electrum
[ "1c27908d537ff822220ab4fa96c3f2dbd29a59f6" ]
[ "lib/plot.py" ]
[ "import datetime\nfrom collections import defaultdict\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\n\nfrom .i18n import _\nfrom .bitcoin import COIN\n\n\nclass NothingToPlotException(Exception):\n def __str__(self):\n return _(\"Nothing to plot.\")\n\n\ndef plot_history(history):\n if len(history) == 0:\n raise NothingToPlotException()\n hist_in = defaultdict(int)\n hist_out = defaultdict(int)\n for item in history:\n if not item['confirmations']:\n continue\n if item['timestamp'] is None:\n continue\n value = item['value'].value/COIN\n date = item['date']\n datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))\n if value > 0:\n hist_in[datenum] += value\n else:\n hist_out[datenum] -= value\n\n f, axarr = plt.subplots(2, sharex=True)\n plt.subplots_adjust(bottom=0.2)\n plt.xticks( rotation=25 )\n ax = plt.gca()\n plt.ylabel('CMM')\n plt.xlabel('Month')\n xfmt = md.DateFormatter('%Y-%m-%d')\n ax.xaxis.set_major_formatter(xfmt)\n axarr[0].set_title('Monthly Volume')\n xfmt = md.DateFormatter('%Y-%m')\n ax.xaxis.set_major_formatter(xfmt)\n width = 20\n\n r1 = None\n r2 = None\n dates_values = list(zip(*sorted(hist_in.items())))\n if dates_values and len(dates_values) == 2:\n dates, values = dates_values\n r1 = axarr[0].bar(dates, values, width, label='incoming')\n axarr[0].legend(loc='upper left')\n dates_values = list(zip(*sorted(hist_out.items())))\n if dates_values and len(dates_values) == 2:\n dates, values = dates_values\n r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')\n axarr[1].legend(loc='upper left')\n if r1 is None and r2 is None:\n raise NothingToPlotException()\n return plt\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.subplots", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gca", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks" ] ]
akira-l/online_mmdetection
[ "10c60467a57a605b783486b7fbc508776394ea79" ]
[ "mmdet/models/detectors/two_stage.py" ]
[ "import torch\nimport torch.nn as nn\n\n# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\nimport pdb\n\[email protected]_module()\nclass TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n \"\"\"\n\n def __init__(self,\n backbone,\n neck=None,\n rpn_head=None,\n roi_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(TwoStageDetector, self).__init__()\n self.backbone = build_backbone(backbone)\n\n if neck is not None:\n self.neck = build_neck(neck)\n\n if rpn_head is not None:\n rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n rpn_head_ = rpn_head.copy()\n rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n self.rpn_head = build_head(rpn_head_)\n\n if roi_head is not None:\n # update train and test cfg here for now\n # TODO: refactor assigner & sampler\n rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n roi_head.update(train_cfg=rcnn_train_cfg)\n roi_head.update(test_cfg=test_cfg.rcnn)\n self.roi_head = build_head(roi_head)\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_rpn(self):\n \"\"\"bool: whether the detector has RPN\"\"\"\n return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n @property\n def with_roi_head(self):\n \"\"\"bool: whether the detector has a RoI head\"\"\"\n return hasattr(self, 'roi_head') and self.roi_head is not None\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in detector.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n super(TwoStageDetector, self).init_weights(pretrained)\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_roi_head:\n self.roi_head.init_weights(pretrained)\n\n def extract_feat(self, img):\n \"\"\"Directly extract features from the backbone+neck.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network flops.\n\n See `mmdetection/tools/get_flops.py`\n \"\"\"\n outs = ()\n # backbone\n x = self.extract_feat(img)\n # rpn\n if self.with_rpn:\n rpn_outs = self.rpn_head(x)\n outs = outs + (rpn_outs, )\n proposals = torch.randn(1000, 4).to(img.device)\n # roi_head\n roi_outs = self.roi_head.forward_dummy(x, proposals)\n outs = outs + (roi_outs, )\n return outs\n\n def forward_train(self,\n img,\n img_metas,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n **kwargs):\n \"\"\"\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n gt_labels (list[Tensor]): class indices corresponding to each box\n\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n proposals : override rpn proposals with custom proposals. Use when\n `with_rpn` is False.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n x = self.extract_feat(img)\n\n losses = dict()\n\n # RPN forward and loss\n if self.with_rpn:\n proposal_cfg = self.train_cfg.get('rpn_proposal',\n self.test_cfg.rpn)\n rpn_losses, proposal_list = self.rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks,\n **kwargs)\n losses.update(roi_losses)\n\n return losses\n\n async def async_simple_test(self,\n img,\n img_meta,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = await self.rpn_head.async_simple_test_rpn(\n x, img_meta)\n else:\n proposal_list = proposals\n\n return await self.roi_head.async_simple_test(\n x, proposal_list, img_meta, rescale=rescale)\n\n def simple_test(self, img, img_metas, proposals=None, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n x = self.extract_feat(img)\n\n if proposals is None:\n proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n return self.roi_head.simple_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self, imgs, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n # recompute feats to save memory\n x = self.extract_feats(imgs)\n proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n return self.roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n" ]
[ [ "torch.randn" ] ]
stepan-anokhin/VideoDeduplication
[ "27d7726601ded654ac3710952baf989674aecf7e" ]
[ "extract_features.py" ]
[ "import numpy as np\nimport os\n\nos.environ['WINNOW_CONFIG'] = os.path.abspath('config.yaml')\n\nfrom glob import glob\nfrom winnow.feature_extraction import IntermediateCnnExtractor,frameToVideoRepresentation,SimilarityModel\nfrom winnow.utils import create_directory,scan_videos,create_video_list,get_original_fn_from_artifact\nfrom db import *\nfrom db.schema import *\nimport yaml\n\n\nif __name__ == '__main__':\n\n representations = ['frame_level','video_level','video_signatures']\n\n with open(\"config.yaml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\n DATASET_DIR = cfg['video_source_folder']\n DST_DIR = cfg['destination_folder']\n VIDEO_LIST_TXT = cfg['video_list_filename']\n ROOT_FOLDER_INTERMEDIATE_REPRESENTATION =cfg['root_folder_intermediate']\n USE_DB = cfg['use_db'] \n CONNINFO = cfg['conninfo']\n KEEP_FILES = cfg['keep_fileoutput']\n FRAME_LEVEL_SAVE_FOLDER = os.path.abspath(DST_DIR + '{}/{}'.format(ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0]))\n VIDEO_LEVEL_SAVE_FOLDER = DST_DIR + '{}/{}'.format(ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[1])\n VIDEO_SIGNATURES_FILENAME = 'video_signatures'\n FRAME_LEVEL_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[0]) \n VIDEO_LEVEL_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[1])\n VIDEO_SIGNATURES_SAVE_FOLDER = os.path.join(DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION,representations[2])\n VIDEO_SIGNATURES_FILENAME = 'video_signatures.npy'\n \n\n print('Creating Intermediate Representations folder on :{}'.format(os.path.abspath(DST_DIR)))\n\n create_directory(representations,DST_DIR,ROOT_FOLDER_INTERMEDIATE_REPRESENTATION)\n \n print('Searching for Dataset Video Files')\n\n videos = scan_videos(DATASET_DIR,'**')\n\n print('Number of files found: {}'.format(len(videos)))\n\n processed_videos = scan_videos(FRAME_LEVEL_SAVE_FOLDER,'**_vgg_features.npy')\n\n print('Found {} videos that have already been processed.'.format(len(processed_videos)))\n\n # Get filenames\n processed_filenames = get_original_fn_from_artifact(processed_videos,'_vgg_features')\n full_video_names = [os.path.basename(x) for x in videos]\n\n # Check for remaining videos\n remaining_videos = [i for i,x in enumerate(full_video_names) if x not in processed_filenames]\n\n remaining_videos_path = np.array(videos)[remaining_videos]\n\n print('There are {} videos left'.format(len(remaining_videos_path)))\n\n VIDEOS_LIST = create_video_list(remaining_videos_path,VIDEO_LIST_TXT)\n\n print('Processed video List saved on :{}'.format(VIDEOS_LIST))\n\n if len(remaining_videos_path) > 0:\n # Instantiates the extractor\n extractor = IntermediateCnnExtractor(VIDEOS_LIST,FRAME_LEVEL_SAVE_FOLDER)\n # Starts Extracting Frame Level Features\n extractor.start(batch_size=16,cores=4)\n\n print('Converting Frame by Frame representations to Video Representations')\n\n converter = frameToVideoRepresentation(FRAME_LEVEL_SAVE_FOLDER,VIDEO_LEVEL_SAVE_FOLDER)\n\n converter.start()\n\n print('Extracting Signatures from Video representations')\n\n sm = SimilarityModel()\n video_signatures = sm.predict(VIDEO_LEVEL_SAVE_FOLDER)\n\n video_signatures = np.nan_to_num(video_signatures)\n\n print('Saving Video Signatures on :{}'.format(VIDEO_SIGNATURES_SAVE_FOLDER))\n\n if USE_DB:\n db_engine,session = create_engine_session(CONNINFO)\n create_tables(db_engine)\n add_signatures(session,video_signatures,sm.original_filenames)\n try:\n session.commit()\n \n except:\n session.rollback()\n print('DB Exception')\n # raise\n\n finally:\n # Get DB stats\n signatures = get_all(session,Signature)\n print(f\"Signatures table rows:{len(signatures)}\")\n\n if KEEP_FILES or USE_DB is False:\n\n np.save(os.path.join(VIDEO_SIGNATURES_SAVE_FOLDER,'{}.npy'.format(VIDEO_SIGNATURES_FILENAME)),video_signatures)\n np.save(os.path.join(VIDEO_SIGNATURES_SAVE_FOLDER,'{}-filenames.npy'.format(VIDEO_SIGNATURES_FILENAME)),sm.original_filenames)\n print('Signatures of shape {} saved on :{}'.format(video_signatures.shape,VIDEO_SIGNATURES_SAVE_FOLDER))\n\n" ]
[ [ "numpy.array", "numpy.nan_to_num" ] ]
iambaim/pyEcholab
[ "6e165ad1a947e62fc233467631c445fe9ebcdad2" ]
[ "examples/simple_ek60_test.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"A simple ek60 reader test.\n\nThis script demonstrates simple file reading and plotting of ek60 data. In\ngeneral, the script reads files passed to it, stores data in a data object,\nparses information from the data, and generates plots. Specifically,\nthis script demonstrates processes such as retrieving values from the data (\npower, Sv, and angles from specified channels/frequencies), appending and\ninserting data from different sample intervals, and using matplotlib to\nplot echograms.\n\"\"\"\n\nfrom matplotlib.pyplot import figure, show, subplots_adjust, get_cmap\nfrom echolab2.instruments import EK60\nfrom echolab2.plotting.matplotlib import echogram\n\n\n# Create the list of input files. For this test we purposely picked two files\n# with the same channels, but with different pulse lengths and a different\n# installation order.\n\n# The descriptions below apply to reading these 2 files in the following order.\nrawfiles = ['./data/EK60/DY1201_EK60-D20120214-T231011.raw',\n './data/EK60/DY1706_EK60-D20170609-T005736.raw']\n\n# Create a matplotlib figure to plot our echograms on.\nfig = figure()\n# Set some properties for the sub plot layout.\nsubplots_adjust(left=0.11, bottom=0.1, right=0.98, top=.93, wspace=None,\n hspace=0.9)\n\n# Create an instance of the EK60 instrument. This is the top level object used\n# to interact with EK60 and data sources.\nek60 = EK60.EK60()\n\n# Use the read_raw method to read in our list of data files. Assuming the\n# data files haven't been changed above, the these files contain data from a\n# 5 frequency EK60 system: 18, 38, 70, 120, and 200 kHz.\nek60.read_raw(rawfiles)\n\n# Print some basic info about our object. As you will see, 10 channels are\n# reported. Each file has 5 channels, and are in fact, physically the same\n# hardware. The reason 10 channels are reported is because the transceiver\n# installation order was changed in the ER60 software which changes the\n# channel ID for that transceiver + transducer combination.\nprint(ek60)\n\n# If you look at the output of the print statement you'll see some basic information\n# about the channels that have been read. Each channel is listed in the form\n#\n# channel number :: channel ID :: data type (data size)\n#\n# The channel number is simply the order the unique channel IDs were encountered\n# in the the reading process. Channels are identified in the file header. I\n# believe that they are always sorted by frequency from low to high and the\n# reader encounters them in this order so channel 1 should always be the lowest\n# frequency in the file. How channels are ordered if you have multiple GPTs\n# operating at the same frequency or if you're using ER60 multiplexing is unknown.\n#\n# Channel ID is the unique string that the ER60/EK80 software assigns the\n# channel. This usually includes a hardware ID (MAC address) and the\n# transducer model number. The ER60 software adds the transceiver installation\n# order and MUX channel between these but I am not sure about EK80.\n#\n# Data type describes the kind of data stored in the raw_data object associated\n# with this channel. Raw files can contain power only, angle only, power AND angle,\n# and EK80 files can contain complex data. Following this, raw_data objects can contain\n# power, angle, power AND angle, or complex data. If a new data type is encountered\n# while reading data from a specific channel ID, a new raw_data object is created for\n# that data. In our case, all of the channels contain power and angle data. But if\n# you read a file that contained only angle data, then a file with the same channels\n# that had power/angle data, each channel would contain two raw_data objects, one\n# containing the angle only data and one containing the power and angle data.\n\n# The size of the data array(s) is printed next in the form (n pings, n samples)\n\n\n# ER60 objects contain all the data stored in a .raw file organized in a number\n# of attributes. The bulk of the data are stored in raw_data objects and getting\n# a reference to these raw_data objects is one of the first things you'll usually\n# do when processing .raw data. You can access the ER60.raw_data attribute directly\n# (it is a dictionary keyed by channel ID) or you can use the EK60.get_channel_data()\n# method which allows you to get references by channel number, frequency, and/or\n# channel ID.\n\n# Here I will get references to the 38 kHz data. Since we read data at 38 kHz from\n# two channels, this will return a list 2 items long. Each item in this list will\n# be a raw_data object containing 38 kHz data associate with a channel+datatype. In\n# this example the data files only have power/angle data so there will only be a\n# single raw_data object per channel.\nraw_data_38 = ek60.get_channel_data(frequencies=38000)\n\n# When working with this library, you are either going to know something about the\n# data you are reading and you will be able to make assumptions about the dict that\n# is returned or you'll know nothing and need to iterate through the dict keys and\n# lists they reference and process each raw_data object as needed. Here we know that\n# the dict will have a single key (38000) and that list will be 2 elements long.\n# Because we know this we can unpack \"manually\"\n\n# get a reference to the first raw_data object at 38 kHz\nraw_data_38_1 = raw_data_38[38000][0]\n# ^ ^\n# frequency --| |-- channel+datatype index\n\n# and to the second....\nraw_data_38_2 = raw_data_38[38000][1]\n# ^ ^\n# frequency --| |-- channel+datatype index\n\n\n# The sample data from the first 38 kHz channel is contained in a 136x994 array.\n# The data was recorded with a 1024us transmit pulse length, which on the EK60\n# and related hardware results in a sample interval of 256us (sample interval = pulse\n# length / 4). The data were recorded in 2012.\nprint(raw_data_38_1)\n\n# The sample data from the first 38 kHz channel is contained in a 763x1059 array\n# recoded with a 512us pulse length resulting in a sample interval of 128us.\n# These data were recorded in 2017.\nprint(raw_data_38_2)\n\n\n# Append the 2nd object's data to the first and print out the results. We use the\n# append method, passing the raw_data_38_2 reference. Note that data in raw_data\n# objects are stored in the order read, inserted, or appended. NOT in time order.\n# Also, data in raw_data objects are not gridded. Adjacent samples may or may be\n# adjacent after converting to \"processed data\".\nraw_data_38_1.append(raw_data_38_2)\n\n# The result of this append is that raw_data_38_1 now contains data from 899\n# pings. The first 136 pings are the 2012 data and the next 763 the 2017\n# data. The sample data arrays are 899x1059 and the object contains 2 unique\n# sample intervals.\nprint(raw_data_38_1)\n\n# Insert the 2nd object's data into the first at ping 50. Using the insert\n# method we will insert the 2nd object's 763 pings into the first object\n# starting at ping 51.\nraw_data_38_1.insert(raw_data_38_2, ping_number=50, insert_after=True)\n\n# Now raw_data_38_1 contains 1662 pings. Pings 1-50 are from the 2012 data.\n# Pings 51-813 are the 763 pings from the 2012 data. Pings 814-899 are the\n# rest of the 2012 data and pings 900-1663 are a second copy of the 2017 data.\n# This example isn't practical in any way, I am just illustrating\nprint(raw_data_38_1)\n\n# Create an axis.\nax_1 = fig.add_subplot(3, 1, 1)\n# Create an echogram to plot up the raw sample data.\nechogram_2 = echogram.Echogram(ax_1, raw_data_38_1, 'power')\nax_1.set_title(\"Power as stored in raw_data object\")\n\n# You will notice that the 2017 data has more samples so there will be empty\n# samples padding the 2012 data. Also notice that the data is not in time order\n\n\n# At this point, we have a 1662x1059 array with data recorded at two different\n# sample intervals. When we convert this data to return a processed_data\n# object, we have to resample to a constant sample interval. By default,\n# the get_* methods will resample to the shortest sample interval (highest\n# resolution) in the data that is being returned. In our case, that will\n# result in the 136 pings from 2012 recorded with a sample rate of 256us\n# being resampled to 128us.\n\n# The two files were also recorded with slightly different sound speed values\n# and we're not going to supply a constant sound speed (or any calibration\n# values) to the get_power method so it will use the calibration parameter\n# values from the raw data. When no sound speed calibration data is provided,\n# the get_* methods will resort to interpolating range using the sound speed\n# that occurs most in the data (in other words, it interpolates the fewest\n# pings it needs to).\n\n# When we request data using the get_* methods, we can provide a time range or\n# ping range to return data from. Providing no constraints on the range of\n# data returned will return all of the data. By default, the data will be in\n# time order. You can force the method to return data in ping order (the\n# order it exists in the raw_data object) by setting the time_order keyword to\n# False. Advanced indexing can be done outside of the get_* methods and\n# passed into them using the return_indices keyword.\n\n\n# Call get_power to get a processed_data object that contains power data. We\n# provide no arguments so we get all pings ordered by time. With this specific\n# data, we will have the 136 pings from 2012, followed by 2 x 763 pings\n# recorded in 2017. The data from 2017 will be duplicated. Since the data are\n# in time order, the 2017 pings will be interleaved. Here are the times where\n# the data transitions from 2012 to 2017:\n#\n# ping 136: 2012-02-14T23:10:51.642 -- 2012 ping 136\n# ping 137: 2017-06-09T00:57:36.074 -- 2017 ping 1\n# ping 138: 2017-06-09T00:57:36.074 -- 2017 ping 1\n# ping 139: 2017-06-09T00:57:36.745 -- 2017 ping 2\n# ping 140: 2017-06-09T00:57:36.745 -- 2017 ping 2\nprocessed_power_1 = raw_data_38_1.get_power()\n\n# The processed_data object should be 1662 pings by 1988 samples.\nprint(processed_power_1)\n\n# Create an axis.\nax_2 = fig.add_subplot(3, 1, 2)\n# Create an echogram which will display on our newly created axis.\nechogram_2 = echogram.Echogram(ax_2, processed_power_1)\nax_2.set_title(\"Power data in time order\")\n\ncal_obj = raw_data_38_1.get_calibration()\n\n# Now request Sv data in time order.\nSv = raw_data_38_1.get_Sv(calibation=cal_obj)\n# This will also be 1662 pings by 1988 samples, but is Sv ordered by time.\nprint(Sv)\n\n# Create another axis.\nax_3 = fig.add_subplot(3, 1, 3)\n# Create an echogram which will display on our newly created axis.\nechogram_3 = echogram.Echogram(ax_3, Sv, threshold=[-70,-34])\nax_3.set_title(\"Sv data in time order\")\n\n# Show our figure.\nshow()\n\n# Create another matplotlib figure.\nfig = figure()\n# Set some properties for the sub plot layout.\nsubplots_adjust(left=0.1, bottom=0.1, right=0.98, top=.93, wspace=None,\n hspace=0.5)\n\nangle_cmap = get_cmap('plasma')\n\n# Now request angles data in time order.\nangles_along, angles_athwart = raw_data_38_1.get_physical_angles()\nprint(angles_along)\nprint(angles_athwart)\n\n# Create another axis.\nax_1 = fig.add_subplot(2, 1, 1)\n# Create an echogram which will display on our newly created axis.\nechogram_3 = echogram.Echogram(ax_1, angles_along, cmap=angle_cmap)\nax_1.set_title(\"angles_alongship data in time order\")\n\n# Create another axis.\nax_2 = fig.add_subplot(2, 1, 2)\n# Create an echogram which will display on our newly created axis.\nechogram_3 = echogram.Echogram(ax_2, angles_athwart, cmap=angle_cmap)\nax_2.set_title(\"angles_athwartship data in time order\")\n\n# Show our figure.\nshow()\n\n\npass\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.figure" ] ]
frcmi/opensight
[ "3b0c0c63a877f739a1ee8261a9befbb44a3b7796" ]
[ "opsi/util/cv/shape.py" ]
[ "from math import atan2, cos, sin, sqrt\nfrom typing import NamedTuple\n\nimport cv2\nimport numpy as np\nfrom numpy.core._multiarray_umath import ndarray\n\nfrom opsi.util.cache import cached_property\n\n\n# Also represents dimensions\nclass Point(NamedTuple):\n def nt_serialize(self):\n return {\"x\": self.x, \"y\": self.y}\n\n # implicit classmethod Point._make - create from existing iterable\n\n x: float\n y: float\n\n @classmethod\n def _make_rev(cls, iter): # make reversed (y, x)\n return cls(iter[1], iter[0])\n\n @property\n def area(self):\n return self.x * self.y\n\n @property\n def hypot(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5\n\n @property\n def perimeter(self):\n return 2 * (self.x + self.y)\n\n # usage: normalized = Point(width, height).normalize(Point(x, y))\n def normalize(self, point: \"Point\") -> \"Point\":\n x = (2 * point.x / self.x) - 1\n y = (2 * point.y / self.y) - 1\n\n return Point(x, y)\n\n\nclass Shape:\n def __init__(self):\n raise TypeError(\"Must be made with from_* classmethods\")\n\n @property\n def perimeter(self):\n return None\n\n @property\n def area(self):\n return None\n\n\nclass Rect(Shape):\n def nt_serialize(self):\n return {\n \"x\": self.tl.x,\n \"y\": self.tl.y,\n \"width\": self.dim.x,\n \"height\": self.dim.y,\n }\n\n # create from top-left coordinate and dimensions\n @classmethod\n def from_params(cls, x, y, width, height):\n inst = cls.__new__(cls)\n\n inst.tl = Point(x, y)\n inst.dim = Point(width, height)\n\n return inst # Big oof will occur if you forget this\n\n @classmethod\n def from_contour(cls, contour_raw):\n return cls.from_params(*cv2.boundingRect(contour_raw))\n\n @cached_property\n def tr(self):\n return Point(self.tl.x + self.dim.x, self.tl.y)\n\n @cached_property\n def bl(self):\n return Point(self.tl.x, self.tl.y + self.dim.y)\n\n @cached_property\n def br(self):\n return Point(self.tl.x + self.dim.x, self.tl.y + self.dim.y)\n\n @cached_property\n def center(self):\n return Point(self.tl.x + self.dim.x / 2, self.tl.y + self.dim.y / 2)\n\n @cached_property\n def perimeter(self):\n return self.dim.perimeter\n\n @cached_property\n def area(self):\n return self.dim.area\n\n\nclass RotatedRect(Shape):\n def nt_serialize(self):\n return {\n \"cx\": self.center.x,\n \"cy\": self.center.y,\n \"width\": self.dim.x,\n \"heigh\": self.dim.y,\n \"angle\": self.angle,\n }\n\n # create from top-left coordinate and dimensions\n @classmethod\n def from_params(cls, center, size, angle):\n inst = cls.__new__(cls)\n\n inst.center = Point(center[0], center[1])\n inst.dim = Point(size[0], size[1])\n inst.angle = angle\n\n return inst\n\n @classmethod\n def from_contour(cls, contour_raw):\n return cls.from_params(*cv2.minAreaRect(contour_raw))\n\n @cached_property\n def box_points(self):\n return cv2.boxPoints((self.center, self.dim, self.angle))\n\n @cached_property\n def perimeter(self):\n return self.dim.perimeter\n\n @cached_property\n def area(self):\n return self.dim.area\n\n # Returns the angle of the rectangle from -90 to 90, where 0 is the rectangle vertical on its shortest side.\n @cached_property\n def vertical_angle(self):\n rect_angle = self.angle\n if self.dim[0] > self.dim[1]:\n rect_angle += 90\n return rect_angle\n\n\n# Stores corners used for SolvePNP\nclass Corners(NamedTuple):\n def nt_serialize(self):\n return {\n \"tlx\": self.tl.x,\n \"tly\": self.tl.y,\n \"trx\": self.tr.x,\n \"try\": self.tr.y,\n \"blx\": self.bl.x,\n \"bly\": self.bl.y,\n \"brx\": self.br.x,\n \"bry\": self.br.y,\n }\n\n tl: Point\n tr: Point\n bl: Point\n br: Point\n\n def to_matrix(self):\n return np.array([self.tl, self.tr, self.bl, self.br], dtype=np.float)\n\n def calculate_pose(self, object_points, camera_matrix, distortion_coefficients):\n img_points_mat = self.to_matrix()\n\n ret, rvec, tvec = cv2.solvePnP(\n object_points,\n img_points_mat,\n camera_matrix,\n distortion_coefficients,\n flags=cv2.SOLVEPNP_AP3P,\n )\n\n return ret, rvec, tvec\n\n\nclass Pose3D(NamedTuple):\n def nt_serialize(self):\n return {\"rvec\": self.rvec.ravel(), \"tvec\": self.tvec.ravel()}\n\n rvec: ndarray\n tvec: ndarray\n\n def position_2d(self, tilt_angle: float):\n # var x = tVec.get(0, 0)[0];\n # var z = FastMath.sin(tilt_angle) * tVec.get(1, 0)[0] + tVec.get(2, 0)[0] * FastMath.cos(tilt_angle);\n x = self.tvec[0, 0]\n z = sin(tilt_angle) * self.tvec[1, 0] + cos(tilt_angle) * self.tvec[2, 0]\n\n distance = sqrt(x * x + z * z)\n\n # From Team 5190: Green Hope Falcons\n # https://github.com/FRC5190/2019CompetitionSeason/blob/51f1940c5742a74bdcd25c4c9b6e9cfe187ec2fa/vision/jevoisvision/modules/ghrobotics/ReflectiveTape/ReflectiveTape.py#L94\n\n # Find the horizontal angle between camera center line and target\n camera_to_target_angle = -atan2(x, z)\n\n rot, _ = cv2.Rodrigues(self.rvec)\n rot_inv = rot.transpose()\n\n pzero_world = np.matmul(rot_inv, -self.tvec)\n\n target_angle = -atan2(pzero_world[0][0], pzero_world[2][0])\n\n trans_2d = Point(\n distance * cos(camera_to_target_angle),\n distance * sin(camera_to_target_angle),\n )\n\n return trans_2d, target_angle, camera_to_target_angle, distance\n\n def object_to_image_points(\n self, obj_points, camera_matrix, distortion_coefficients\n ):\n img_points, jacobian = cv2.projectPoints(\n obj_points, self.rvec, self.tvec, camera_matrix, distortion_coefficients\n )\n return img_points.astype(np.int)\n\n\nclass Circles(ndarray):\n def nt_serialize(self):\n return {\n \"x\": [float(circle[0]) for circle in self[0]],\n \"y\": [float(circle[1]) for circle in self[0]],\n \"radius\": [float(circle[2]) for circle in self[0]],\n }\n\n\nclass Segments(ndarray):\n def nt_serialize(self):\n return {\n \"x1\": [float(seg[0][0]) for seg in self],\n \"y1\": [float(seg[0][1]) for seg in self],\n \"x2\": [float(seg[0][2]) for seg in self],\n \"y2\": [float(seg[0][3]) for seg in self],\n }\n\n\n# Never actually used, but might be in the future?\nclass Lines(ndarray):\n pass\n" ]
[ [ "numpy.array", "numpy.matmul" ] ]
zhua1/pythonActivities
[ "d4bebd9a43d658f4802b998f6963c9c87a94b7d0" ]
[ "activity1.py" ]
[ "# -*- coding: utf-8 -*-\nimport pandas as pd\n\nfile = pd.read_csv('C:\\\\Users\\\\meifl\\\\Desktop\\\\UCIRV201804DATA3-Class-Repository-DATA\\\\01-LessonPlan\\\\03-Python\\\\3\\\\Activities\\\\Unsolved\\\\01-Stu_CerealCleaner\\\\Resources\\\\cereal.csv', header = None)\n\nprint(file[0][file[7] >= 5])\n\n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#\n\nfile1 = pd.read_csv('C:\\\\Users\\\\meifl\\\\Desktop\\\\UCIRV201804DATA3-Class-Repository-DATA\\\\01-LessonPlan\\\\03-Python\\\\3\\\\Activities\\\\Unsolved\\\\01-Stu_CerealCleaner\\\\Resources\\\\cereal_bonus.csv')\n\nprint(file1['name'][file1['fiber'] >= 5])\n" ]
[ [ "pandas.read_csv" ] ]
ttglennhall/simple_dev_python
[ "68c199255beba0053c7c9b905dca7ea5ec919c66" ]
[ "database.py" ]
[ "import sqlite3 as lite\nimport pandas as pd\n\ncon = lite.connect('challenge_database.db')\n\ncities = (('Las Vegas', 'NV'), ('Atlanta', 'GA'), ('New York City', 'NY'),\n ('Boston', 'MA'), ('Chicago', 'IL'), ('Miami', 'FL'), ('Dallas', 'TX'),\n ('Seattle', 'WA'), ('Portland', 'OR'), ('San Francisco', 'CA'),\n ('Los Angeles', 'CA'))\n\nweather = (('Las Vegas', 2013, 'July', 'December', 15), ('Atlanta', 2013, 'July', 'January', 22), \n\t('New York City', 2013, 'July', 'January', 62), ('Boston', 2013, 'July', 'January', 59), \n\t('Chicago', 2013, 'July', 'January', 59), ('Miami', 2013, 'August', 'January', 84), \n\t('Dallas', 2013, 'July', 'January', 77), ('Seattle', 2013, 'July', 'January', 61), \n\t('Portland', 2013, 'July', 'December', 63), ('San Francisco', 2013, 'September', 'December', 64), \n\t('Los Angeles', 2013, 'September', 'December', 75))\n\nwith con:\n\tcur = con.cursor()\n\tcur.execute(\"DROP TABLE IF EXISTS cities\")\n\tcur.execute(\"DROP TABLE IF EXISTS weather\")\n\n\tcur.execute(\"CREATE TABLE cities (name text, state text)\")\n\tcur.execute(\"CREATE TABLE weather (city text, year integer, warm_month text, cold_month text, average_high integer)\")\n\n\tcur.executemany(\"INSERT INTO cities VALUES(?,?)\", cities)\n\tcur.executemany(\"INSERT INTO weather VALUES(?,?,?,?,?)\", weather)\n\n\tcur.execute(\"SELECT name, state, year, warm_month, cold_month, average_high FROM cities INNER JOIN weather ON name = city\")\n\t#cur.execute(\"SELECT * FROM cities\")\n\trows = cur.fetchall()\n\tcols = [desc[0] for desc in cur.description]\n\tdf = pd.DataFrame(rows, columns=cols)\n\nall_info = df.loc[df['warm_month'] == 'July']\n\nnames = all_info['name']\n\nprint('The cities that are warmest in July are: {}'.format(', '.join(names)))\n\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
swfarnsworth/tmnt
[ "a53c8d62d0ddc6be5fc62013e6801019c345a6f4" ]
[ "tmnt/estimator.py" ]
[ "# coding: utf-8\n# Copyright (c) 2020 The MITRE Corporation.\n\"\"\"\nEstimator module to train/fit/estimate individual models with fixed hyperparameters.\nEstimators are used by trainers to manage training with specific datasets; in addition,\nthe estimator API supports inference/encoding with fitted models.\n\"\"\"\n\nimport logging\nimport math\nimport logging\nimport time\nimport io\nimport os\nimport psutil\nimport mxnet as mx\nimport numpy as np\nimport scipy.sparse as sp\nimport json\nfrom mxnet import autograd\nfrom mxnet import gluon\nimport gluonnlp as nlp\nimport umap\n#import umap.plot\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import average_precision_score, top_k_accuracy_score, roc_auc_score, ndcg_score, f1_score, precision_recall_fscore_support\nfrom tmnt.data_loading import DataIterLoader, SparseMatrixDataIter\nfrom tmnt.modeling import BowVAEModel, CovariateBowVAEModel, SeqBowVED\nfrom tmnt.modeling import GeneralizedSDMLLoss, MetricSeqBowVED\nfrom tmnt.eval_npmi import EvaluateNPMI\nfrom tmnt.distribution import HyperSphericalDistribution, LogisticGaussianDistribution, BaseDistribution, GaussianDistribution\nimport autogluon.core as ag\nfrom itertools import cycle\nfrom typing import List, Tuple, Dict, Optional, Union, NoReturn\n\nMAX_DESIGN_MATRIX = 250000000\n\ndef multilabel_pr_fn(cutoff, recall=False):\n\n def get_recall_or_precision(yvec, pvec):\n prec, rec, _, support = precision_recall_fscore_support(yvec, pvec, zero_division=0, average='samples')\n if recall:\n return rec\n else:\n return prec\n \n def multilabel_recall_fn_x(label, pred):\n num_labels = label[0].shape[0]\n pred_decision = np.where(pred >= cutoff, 1.0, 0.0)\n w_sum = get_recall_or_precision(label, pred_decision)\n return w_sum, label.shape[0]\n\n return multilabel_recall_fn_x\n\ndef get_composite_p_and_r_metric():\n metrics = mx.metric.CompositeEvalMetric()\n prec_metric = mx.metric.CustomMetric(feval=multilabel_pr_fn(0.5, recall=False))\n rec_metric = mx.metric.CustomMetric(feval=multilabel_pr_fn(0.5, recall=True))\n metrics.add(prec_metric)\n metrics.add(rec_metric)\n return metrics\n\n\nclass BaseEstimator(object):\n \"\"\"Base class for all VAE-based estimators.\n \n Parameters:\n log_method: Method for logging. 'print' | 'log', optional (default='log')\n quiet: Flag for whether to force minimal logging/ouput. optional (default=False)\n coherence_coefficient: Weight to tradeoff influence of coherence vs perplexity in model \n selection objective (default = 8.0)\n reporter: Callback reporter to include information for \n model selection via AutoGluon\n ctx: MXNet context for the estimator\n latent_distribution: Latent distribution of the variational autoencoder - defaults to LogisticGaussian with 20 dimensions \n optimizer: MXNet optimizer (default = \"adam\")\n lr: Learning rate of training. (default=0.005)\n coherence_reg_penalty: Regularization penalty for topic coherence. optional (default=0.0)\n redundancy_reg_penalty: Regularization penalty for topic redundancy. optional (default=0.0)\n batch_size: Batch training size. optional (default=128)\n epochs : Number of training epochs. optional(default=40)\n coherence_via_encoder: Flag to use encoder to derive coherence scores (via gradient attribution)\n pretrained_param_file: Path to pre-trained parameter file to initialize weights\n warm_start: Subsequent calls to `fit` will use existing model weights rather than reinitializing\n \"\"\"\n def __init__(self,\n log_method: str = 'log',\n quiet: bool = False,\n coherence_coefficient: float = 8.0,\n reporter: Optional[object] = None,\n ctx: Optional[mx.context.Context] = mx.cpu(),\n latent_distribution: Optional[BaseDistribution] = None,\n optimizer: str = \"adam\",\n lr: float = 0.005, \n coherence_reg_penalty: float = 0.0,\n redundancy_reg_penalty: float = 0.0,\n batch_size: int = 128,\n epochs: int = 40,\n coherence_via_encoder: bool = False,\n pretrained_param_file: Optional[str] = None,\n warm_start: bool = False):\n self.log_method = log_method\n self.quiet = quiet\n self.model = None\n self.coherence_coefficient = coherence_coefficient\n self.reporter = reporter\n self.ctx = ctx\n self.latent_distribution = latent_distribution or LogisticGaussianDistribution(20)\n self.optimizer = optimizer\n self.lr = lr\n self.n_latent = self.latent_distribution.n_latent\n self.coherence_reg_penalty = coherence_reg_penalty\n self.redundancy_reg_penalty = redundancy_reg_penalty\n self.batch_size = batch_size\n self.epochs = epochs\n self.coherence_via_encoder = coherence_via_encoder\n self.pretrained_param_file = pretrained_param_file\n self.warm_start = warm_start\n self.num_val_words = -1 ## will be set later for computing Perplexity on validation dataset\n self.latent_distribution.ctx = self.ctx\n\n\n def _np_one_hot(self, vec, n_outputs):\n ovec = np.zeros((vec.size, n_outputs))\n ovec[np.arange(vec.size), vec.astype('int32')] = 1.0\n return ovec\n \n\n def _output_status(self, status_string):\n if self.log_method == 'print':\n print(status_string)\n elif self.log_method == 'log':\n logging.info(status_string)\n\n def get_topic_vectors(self):\n raise NotImplementedError()\n\n\n def _get_model(self):\n \"\"\"\n Returns:\n (:class:`mxnet.gluon.HybridBlock`): MXNet model initialized using provided hyperparameters\n \"\"\"\n raise NotImplementedError()\n\n\n def _npmi(self, X, k=10):\n \"\"\"\n Calculate NPMI(Normalized Pointwise Mutual Information) for data X\n\n Parameters:\n X (array-like or sparse matrix): Document word matrix. shape [n_samples, vocab_size]\n k (int): Threshold at which to compute npmi. optional (default=10)\n\n Returns:\n npmi (float): NPMI score.\n \"\"\"\n sorted_ids = self.model.get_ordered_terms()\n num_topics = min(self.n_latent, sorted_ids.shape[-1])\n top_k_words_per_topic = [[int(i) for i in list(sorted_ids[:k, t])] for t in range(self.n_latent)]\n npmi_eval = EvaluateNPMI(top_k_words_per_topic)\n npmi = npmi_eval.evaluate_csr_mat(X)\n unique_term_ids = set()\n unique_limit = 5 ## only consider the top 5 terms for each topic when looking at degree of redundancy\n for i in range(num_topics):\n topic_ids = list(top_k_words_per_topic[i][:unique_limit])\n for j in range(len(topic_ids)):\n unique_term_ids.add(topic_ids[j])\n redundancy = (1.0 - (float(len(unique_term_ids)) / num_topics / unique_limit)) ** 2\n return npmi, redundancy\n\n\n def _get_objective_from_validation_result(self, val_result):\n \"\"\"\n Get the final objective value from the various validation metrics.\n\n Parameters:\n val_result (dict): Dictionary of validation metrics calculated. \n \"\"\"\n raise NotImplementedError()\n \n\n def fit(self, X: sp.csr.csr_matrix, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit VAE model according to the given training data X with optional co-variates y.\n \n Parameters:\n X: representing input data\n y: representing covariate/labels associated with data elements\n \"\"\"\n raise NotImplementedError()\n \n\n def fit_with_validation(self, X: sp.csr.csr_matrix, y: np.ndarray, val_X: sp.csr.csr_matrix, val_Y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit VAE model according to the given training data X with optional co-variates y;\n validate (potentially each epoch) with validation data val_X and optional co-variates val_Y\n \n Parameters:\n X: representing training data\n y: representing covariate/labels associated with data elements in training data\n val_X: representing validation data\n val_y: representing covariate/labels associated with data elements in validation data\n \"\"\"\n raise NotImplementedError()\n\n\nclass BaseBowEstimator(BaseEstimator):\n \"\"\"\n Bag of words variational autoencoder algorithm\n\n Parameters:\n vocabulary (:class:`gluonnlp.Vocab`): GluonNLP Vocabulary object\n n_labels: Number of possible labels/classes when provided supervised data\n gamma: Coefficient that controls how supervised and unsupervised losses are weighted against each other\n enc_hidden_dim (int): Size of hidden encoder layers. optional (default=150)\n embedding_source (str): Word embedding source for vocabulary.\n 'random' | 'glove' | 'fasttext' | 'word2vec', optional (default='random')\n embedding_size (int): Word embedding size, ignored if embedding_source not 'random'. optional (default=128)\n fixed_embedding (bool): Enable fixed embeddings. optional(default=False)\n num_enc_layers: Number of layers in encoder. optional(default=1)\n enc_dr: Dropout probability in encoder. optional(default=0.1)\n coherence_via_encoder: Flag \n validate_each_epoch: Perform validation of model against heldout validation \n data after each training epoch\n multilabel: Assume labels are vectors denoting label sets associated with each document\n \"\"\"\n def __init__(self,\n vocabulary: nlp.Vocab,\n n_labels: int = 0,\n gamma: float = 1.0,\n multilabel: bool = False,\n validate_each_epoch: bool = False,\n enc_hidden_dim: int = 150,\n embedding_source: str = \"random\",\n embedding_size: int = 128,\n fixed_embedding: bool = False,\n num_enc_layers: int = 1,\n enc_dr: float = 0.1,\n classifier_dropout: float = 0.1,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.enc_hidden_dim = enc_hidden_dim\n self.fixed_embedding = fixed_embedding\n self.n_encoding_layers = num_enc_layers\n self.enc_dr = enc_dr\n self.classifier_dropout = classifier_dropout\n self.vocabulary = vocabulary \n self.embedding_source = embedding_source\n self.embedding_size = embedding_size\n self.validate_each_epoch = validate_each_epoch\n self.multilabel = multilabel\n self.gamma = gamma\n self.n_labels = n_labels\n self.has_classifier = n_labels > 1\n self.loss_function = gluon.loss.SigmoidBCELoss() if multilabel else gluon.loss.SoftmaxCELoss()\n\n @classmethod\n def from_saved(cls, model_dir: str) -> 'BaseBowEstimator':\n \"\"\"\n Instantiate a BaseBowEstimator object from a saved model\n\n Parameters:\n model_dir: String representing the path to the saved model directory\n Returns:\n BaseBowEstimator object\n \"\"\"\n return cls.from_config(config = model_dir+'/model.config',\n vocabulary = model_dir+'/vocab.json',\n pretrained_param_file = model_dir+'/model.params')\n\n @classmethod\n def from_config(cls, config: Union[str, dict], vocabulary: Union[str, nlp.Vocab],\n n_labels: int = 0,\n coherence_coefficient: float = 8.0,\n coherence_via_encoder: bool = False,\n validate_each_epoch: bool = False,\n pretrained_param_file: Optional[str] = None,\n reporter: Optional[object] = None,\n ctx: mx.context.Context = mx.cpu()) -> 'BaseBowEstimator':\n \"\"\"\n Create an estimator from a configuration file/object rather than by keyword arguments\n \n Parameters:\n config: Path to a json representation of a configuation or TMNT config dictionary\n vocabulary: Path to a json representation of a vocabulary or GluonNLP vocabulary object\n pretrained_param_file: Path to pretrained parameter file if using pretrained model\n reporter: Callback reporter to include information for model selection via AutoGluon\n ctx: MXNet context for the estimator\n\n Returns:\n An estimator for training and evaluation of a single model\n \"\"\"\n if isinstance(config, str):\n try:\n with open(config, 'r') as f:\n config_dict = json.load(f)\n except:\n logging.error(\"File {} does not appear to be a valid config instance\".format(config))\n raise Exception(\"Invalid Json Configuration File\")\n config = ag.space.Dict(**config_dict)\n if isinstance(vocabulary, str):\n try:\n with open(vocabulary, 'r') as f:\n voc_js = f.read()\n except:\n logging.error(\"File {} does not appear to be a valid vocabulary file\".format(vocabulary))\n raise Exception(\"Invalid Json Configuration File\") \n vocabulary = nlp.Vocab.from_json(voc_js)\n if vocabulary.embedding is not None:\n emb_size = vocabulary.embedding.idx_to_vec[0].size\n else:\n emb_size = config.embedding.get('size')\n if not emb_size:\n emb_size = config.derived_info.get('embedding_size')\n if not emb_size:\n raise Exception(\"Embedding size must be provided as the 'size' attribute of 'embedding' or as 'derived_info.embedding_size'\")\n gamma = config.get('gamma', 1.0)\n multilabel = config.get('multilabel', False)\n lr = config.lr\n latent_distrib = config.latent_distribution\n optimizer = config.optimizer\n n_latent = int(config.n_latent)\n enc_hidden_dim = int(config.enc_hidden_dim)\n coherence_reg_penalty = float(config.coherence_loss_wt)\n redundancy_reg_penalty = float(config.redundancy_loss_wt)\n batch_size = int(config.batch_size)\n embedding_source = config.embedding.source\n fixed_embedding = config.embedding.get('fixed') == True\n covar_net_layers = config.covar_net_layers\n n_encoding_layers = config.num_enc_layers\n enc_dr = config.enc_dr\n epochs = int(config.epochs)\n ldist_def = config.latent_distribution\n kappa = 0.0\n alpha = 1.0\n latent_distrib = ldist_def.dist_type\n if latent_distrib == 'logistic_gaussian':\n alpha = ldist_def.alpha\n latent_distribution = LogisticGaussianDistribution(n_latent, ctx=ctx, alpha=alpha)\n elif latent_distrib == 'vmf':\n kappa = ldist_def.kappa\n latent_distribution = HyperSphericalDistribution(n_latent, ctx=ctx, kappa=kappa)\n else:\n latent_distribution = GaussianDistribution(n_latent, ctx=ctx)\n model = \\\n cls(vocabulary,\n n_labels=n_labels,\n gamma = gamma,\n multilabel = multilabel,\n validate_each_epoch=validate_each_epoch,\n coherence_coefficient=coherence_coefficient,\n reporter=reporter, \n ctx=ctx, lr=lr, latent_distribution=latent_distribution, optimizer=optimizer,\n enc_hidden_dim=enc_hidden_dim,\n coherence_reg_penalty=coherence_reg_penalty,\n redundancy_reg_penalty=redundancy_reg_penalty, batch_size=batch_size, \n embedding_source=embedding_source, embedding_size=emb_size, fixed_embedding=fixed_embedding,\n num_enc_layers=n_encoding_layers, enc_dr=enc_dr, \n epochs=epochs, log_method='log', coherence_via_encoder=coherence_via_encoder,\n pretrained_param_file = pretrained_param_file,\n warm_start = (pretrained_param_file is not None))\n return model\n\n\n def _get_config(self):\n config = {}\n config['lr'] = self.lr\n config['enc_hidden_dim'] = self.enc_hidden_dim\n config['n_latent'] = self.n_latent\n config['optimizer'] = self.optimizer\n config['epochs'] = self.epochs\n config['batch_size'] = self.batch_size\n config['num_enc_layers'] = self.n_encoding_layers\n config['enc_dr'] = self.enc_dr\n config['coherence_loss_wt'] = self.coherence_reg_penalty\n config['redundancy_loss_wt'] = self.redundancy_reg_penalty\n config['n_labels'] = self.n_labels\n config['covar_net_layers'] = 1\n if isinstance(self.latent_distribution, HyperSphericalDistribution):\n config['latent_distribution'] = {'dist_type':'vmf', 'kappa': self.latent_distribution.kappa}\n elif isinstance(self.latent_distribution, LogisticGaussianDistribution):\n config['latent_distribution'] = {'dist_type':'logistic_gaussian', 'alpha':self.latent_distribution.alpha}\n else:\n config['latent_distribution'] = {'dist_type':'gaussian'}\n if self.embedding_source != 'random':\n config['embedding'] = {'source': self.embedding_source}\n else:\n config['embedding'] = {'source': 'random', 'size': self.embedding_size}\n config['derived_info'] = {'embedding_size': self.embedding_size}\n return config\n \n\n def write_model(self, model_dir):\n if not os.path.exists(model_dir):\n os.mkdir(model_dir)\n pfile = os.path.join(model_dir, 'model.params')\n sp_file = os.path.join(model_dir, 'model.config')\n vocab_file = os.path.join(model_dir, 'vocab.json')\n logging.info(\"Model parameters, configuration and vocabulary written to {}\".format(model_dir))\n self.model.save_parameters(pfile)\n config = self._get_config()\n specs = json.dumps(config, sort_keys=True, indent=4)\n with io.open(sp_file, 'w') as fp:\n fp.write(specs)\n with io.open(vocab_file, 'w') as fp:\n fp.write(self.model.vocabulary.to_json())\n\n\n def _get_wd_freqs(self, X, max_sample_size=1000000):\n sample_size = min(max_sample_size, X.shape[0])\n sums = X.sum(axis=0)\n return sums\n\n def _get_model(self):\n raise NotImplementedError()\n\n def _npmi_with_dataloader(self, dataloader, k=10):\n sorted_ids = self.model.get_ordered_terms_encoder(dataloader) if self.coherence_via_encoder else self.model.get_ordered_terms()\n num_topics = min(self.n_latent, sorted_ids.shape[-1])\n top_k_words_per_topic = [[int(i) for i in list(sorted_ids[:k, t])] for t in range(self.n_latent)]\n npmi_eval = EvaluateNPMI(top_k_words_per_topic)\n npmi = npmi_eval.evaluate_csr_loader(dataloader)\n unique_term_ids = set()\n unique_limit = 5 ## only consider the top 5 terms for each topic when looking at degree of redundancy\n for i in range(num_topics):\n topic_ids = list(top_k_words_per_topic[i][:unique_limit])\n for j in range(len(topic_ids)):\n unique_term_ids.add(topic_ids[j])\n redundancy = (1.0 - (float(len(unique_term_ids)) / num_topics / unique_limit)) ** 2\n return npmi, redundancy\n \n def _perplexity(self, dataloader, total_words):\n total_rec_loss = 0\n total_kl_loss = 0\n last_batch_size = dataloader.last_batch_size\n num_batches = dataloader.num_batches\n for i, (data,labels) in enumerate(dataloader):\n if labels is None: \n labels = mx.nd.expand_dims(mx.nd.zeros(data.shape[0]), 1)\n data = data.as_in_context(self.ctx)\n labels = labels.as_in_context(self.ctx)\n _, kl_loss, rec_loss, _, _, _ = self._forward(self.model, data, labels)\n if i == num_batches - 1 and last_batch_size > 0:\n total_rec_loss += rec_loss[:last_batch_size].sum().asscalar()\n total_kl_loss += kl_loss[:last_batch_size].sum().asscalar()\n else:\n total_rec_loss += rec_loss.sum().asscalar()\n total_kl_loss += kl_loss.sum().asscalar()\n if ((total_rec_loss + total_kl_loss) / total_words) < 709.0:\n perplexity = math.exp((total_rec_loss + total_kl_loss) / total_words)\n else:\n perplexity = 1e300\n return perplexity\n\n def perplexity(self, X, y):\n dataloader = self._get_val_dataloader(X, y)\n self.num_val_words = X.sum()\n return self._perplexity(dataloader, self.num_val_words)\n\n def _get_val_dataloader(self, val_X, val_y):\n test_size = val_X.shape[0] * val_X.shape[1]\n test_batch_size = min(val_X.shape[0], self.batch_size)\n last_batch_size = val_X.shape[0] % test_batch_size if test_batch_size < val_X.shape[0] else test_batch_size\n num_val_batches = val_X.shape[0] // test_batch_size\n if last_batch_size > 0 and last_batch_size < test_batch_size:\n num_val_batches += 1\n if test_size < MAX_DESIGN_MATRIX:\n val_X = mx.nd.sparse.csr_matrix(val_X).tostype('default')\n val_y = mx.nd.array(val_y) if val_y is not None else None\n val_dataloader = DataIterLoader(mx.io.NDArrayIter(val_X, val_y, test_batch_size,\n last_batch_handle='pad', shuffle=False),\n num_batches=num_val_batches, last_batch_size = last_batch_size)\n else:\n val_dataloader = DataIterLoader(SparseMatrixDataIter(val_X, val_y, batch_size = test_batch_size,\n last_batch_handle='pad', shuffle=False),\n )\n return val_dataloader\n\n def validate(self, val_X, val_y):\n val_dataloader = self._get_val_dataloader(val_X, val_y)\n total_val_words = val_X.sum()\n if self.num_val_words < 0:\n self.num_val_words = total_val_words\n ppl = self._perplexity(val_dataloader, total_val_words)\n if self.coherence_via_encoder:\n npmi, redundancy = self._npmi_with_dataloader(val_dataloader)\n else:\n n = min(val_X.shape[0], 50000)\n npmi, redundancy = self._npmi(val_X[:n])\n v_res = {'ppl': ppl, 'npmi': npmi, 'redundancy': redundancy}\n prediction_arrays = []\n if self.has_classifier:\n tot_correct = 0\n tot = 0\n bs = min(val_X.shape[0], self.batch_size)\n num_std_batches = val_X.shape[0] // bs\n last_batch_size = val_X.shape[0] % bs\n for i, (data, labels) in enumerate(val_dataloader):\n data = data.as_in_context(self.ctx)\n labels = labels.as_in_context(self.ctx)\n if i == num_std_batches - 1 and last_batch_size > 0:\n data = data[:last_batch_size]\n labels = labels[:last_batch_size]\n predictions = self.model.predict(data) \n predictions_lists = [ p.asnumpy() for p in list(predictions) ]\n prediction_arrays.extend(predictions_lists)\n if len(labels.shape) == 1: ## standard single-label classification\n correct = mx.nd.argmax(predictions, axis=1) == labels\n tot_correct += mx.nd.sum(correct).asscalar()\n tot += (data.shape[0] - (labels < 0.0).sum().asscalar()) # subtract off labels < 0 (for unlabeled data)\n acc = float(tot_correct) / float(tot)\n v_res['accuracy'] = acc\n prediction_mat = np.array(prediction_arrays)\n ap_scores = []\n if len(val_y.shape) == 1:\n val_y = self._np_one_hot(val_y, self.n_labels)\n for c in range(self.n_labels):\n y_vec = val_y[:,c]\n pred_vec = prediction_mat[:,c]\n if not np.any(np.isnan(pred_vec)):\n ap_c = average_precision_score(y_vec, pred_vec)\n else:\n ap_c = 0.0\n ap_scores.append((ap_c, int(y_vec.sum())))\n prediction_np_mat = np.array(prediction_arrays)\n v_res['ap_scores_and_support'] = ap_scores\n return v_res\n\n\n def initialize_with_pretrained(self):\n raise NotImplementedError()\n\n\n def _get_objective_from_validation_result(self, val_result):\n npmi = val_result['npmi']\n ppl = val_result['ppl']\n redundancy = val_result['redundancy']\n obj = (npmi - redundancy) - ( ( ppl / 1000 ) / self.coherence_coefficient )\n b_obj = max(min(obj, 100.0), -100.0)\n sc_obj = 1.0 / (1.0 + math.exp(-b_obj))\n if self.has_classifier:\n orig_obj = sc_obj\n sc_obj = (sc_obj + self.gamma * val_result['accuracy']) / (1.0 + self.gamma)\n logging.info(\"Objective via classifier: {} based on accuracy = {} and topic objective = {}\"\n .format(sc_obj, val_result['accuracy'], orig_obj))\n else:\n logging.info(\"Pure topic model objective: {} (has classifier = {})\".format(sc_obj, self.has_classifier))\n return sc_obj\n\n\n def _get_losses(self, model, data, labels):\n elbo_ls, kl_ls, rec_ls, coherence_loss, red_ls, predicted_labels = \\\n self._forward(self.model, data, labels)\n if self.has_classifier:\n label_ls = self.loss_function(predicted_labels, labels).mean()\n total_ls = (self.gamma * label_ls) + elbo_ls.mean()\n else:\n total_ls = elbo_ls.mean()\n label_ls = mx.nd.zeros(total_ls.shape)\n return elbo_ls, kl_ls, rec_ls, red_ls, label_ls, total_ls\n\n def fit_with_validation_aux(self, \n X: sp.csr.csr_matrix,\n y: np.ndarray,\n val_X: Optional[sp.csr.csr_matrix],\n val_y: Optional[np.ndarray],\n aux_X: Optional[sp.csr.csr_matrix] = None) -> Tuple[float, float, float, float]:\n wd_freqs = self._get_wd_freqs(X)\n x_size = X.shape[0] * X.shape[1]\n if x_size > MAX_DESIGN_MATRIX:\n logging.info(\"Sparse matrix has total size = {}. Using Sparse Matrix data batcher.\".format(x_size))\n train_dataloader = \\\n DataIterLoader(SparseMatrixDataIter(X, y, batch_size = self.batch_size, last_batch_handle='discard', shuffle=True))\n else:\n y = mx.nd.array(y) if y is not None else None\n X = mx.nd.sparse.csr_matrix(X)\n train_dataloader = DataIterLoader(mx.io.NDArrayIter(X, y, self.batch_size, last_batch_handle='discard', shuffle=True))\n if aux_X is not None:\n aux_dataloader = \\\n DataIterLoader(SparseMatrixDataIter(X, None, batch_size = self.batch_size, last_batch_handle='discard', shuffle=True))\n \n if self.model is None or not self.warm_start:\n self.model = self._get_model()\n self.model.initialize_bias_terms(mx.nd.array(wd_freqs).squeeze()) ## initialize bias weights to log frequencies\n \n trainer = gluon.Trainer(self.model.collect_params(), self.optimizer, {'learning_rate': self.lr})\n sc_obj, npmi, ppl, redundancy = 0.0, 0.0, 0.0, 0.0\n v_res = None\n for epoch in range(self.epochs):\n ts_epoch = time.time()\n elbo_losses = []\n lab_losses = []\n for i, (data, labels) in enumerate(train_dataloader):\n if labels is None:\n labels = mx.nd.expand_dims(mx.nd.zeros(data.shape[0]), 1)\n labels = labels.as_in_context(self.ctx)\n data = data.as_in_context(self.ctx)\n with autograd.record():\n elbo_ls, kl_loss, _, _, lab_loss, total_ls = self._get_losses(self.model, data, labels)\n elbo_mean = elbo_ls.mean()\n total_ls.backward()\n trainer.step(1)\n if not self.quiet:\n elbo_losses.append(float(elbo_mean.asscalar()))\n if lab_loss is not None:\n lab_losses.append(float(lab_loss.mean().asscalar()))\n if not self.quiet and not self.validate_each_epoch:\n elbo_mean = np.mean(elbo_losses) if len(elbo_losses) > 0 else 0.0\n lab_mean = np.mean(lab_losses) if len(lab_losses) > 0 else 0.0\n self._output_status(\"Epoch [{}] finished in {} seconds. [elbo = {}, label loss = {}]\"\n .format(epoch+1, (time.time()-ts_epoch), elbo_mean, lab_mean))\n mx.nd.waitall()\n if val_X is not None and (self.validate_each_epoch or epoch == self.epochs-1):\n logging.info('Performing validation ....')\n v_res = self.validate(val_X, val_y)\n sc_obj = self._get_objective_from_validation_result(v_res)\n if self.has_classifier:\n self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}. Accuracy = {}.\"\n .format(epoch+1, sc_obj, v_res['ppl'],\n v_res['npmi'], v_res['redundancy'], v_res['accuracy']))\n else:\n self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}.\"\n .format(epoch+1, sc_obj, v_res['ppl'], v_res['npmi'], v_res['redundancy']))\n if self.reporter:\n self.reporter(epoch=epoch+1, objective=sc_obj, time_step=time.time(),\n coherence=v_res['npmi'], perplexity=v_res['ppl'], redundancy=v_res['redundancy'])\n mx.nd.waitall()\n return sc_obj, v_res\n\n def fit_with_validation(self,\n X: sp.csr.csr_matrix,\n y: np.ndarray,\n val_X: Optional[sp.csr.csr_matrix],\n val_y: Optional[np.ndarray],\n aux_X: Optional[sp.csr.csr_matrix] = None) -> Tuple[float, float, float, float]:\n \"\"\"\n Fit a model according to the options of this estimator and optionally evaluate on validation data\n\n Parameters:\n X: Input training tensor\n y: Input labels/co-variates to use (optionally) for co-variate models\n val_X: Validateion input tensor\n val_y: Validation co-variates\n aux_X: Auxilliary unlabeled data for semi-supervised training\n\n Returns:\n sc_obj, npmi, perplexity, redundancy\n \"\"\"\n wd_freqs = self._get_wd_freqs(X)\n x_size = X.shape[0] * X.shape[1]\n if x_size > MAX_DESIGN_MATRIX:\n logging.info(\"Sparse matrix has total size = {}. Using Sparse Matrix data batcher.\".format(x_size))\n train_dataloader = \\\n DataIterLoader(SparseMatrixDataIter(X, y, batch_size = self.batch_size, last_batch_handle='discard', shuffle=True))\n else:\n y = mx.nd.array(y) if y is not None else None\n X = mx.nd.sparse.csr_matrix(X)\n train_dataloader = DataIterLoader(mx.io.NDArrayIter(X, y, self.batch_size, last_batch_handle='discard', shuffle=True))\n\n if self.model is None or not self.warm_start:\n self.model = self._get_model()\n self.model.initialize_bias_terms(mx.nd.array(wd_freqs).squeeze()) ## initialize bias weights to log frequencies\n \n trainer = gluon.Trainer(self.model.collect_params(), self.optimizer, {'learning_rate': self.lr})\n sc_obj, npmi, ppl, redundancy = 0.0, 0.0, 0.0, 0.0\n v_res = None\n for epoch in range(self.epochs):\n ts_epoch = time.time()\n elbo_losses = []\n lab_losses = []\n for i, (data, labels) in enumerate(train_dataloader):\n if labels is None:\n labels = mx.nd.expand_dims(mx.nd.zeros(data.shape[0]), 1)\n labels = labels.as_in_context(self.ctx)\n data = data.as_in_context(self.ctx)\n with autograd.record():\n elbo_ls, kl_loss, _, _, lab_loss, total_ls = self._get_losses(self.model, data, labels)\n elbo_mean = elbo_ls.mean()\n total_ls.backward()\n trainer.step(1)\n if not self.quiet:\n elbo_losses.append(float(elbo_mean.asscalar()))\n if lab_loss is not None:\n lab_losses.append(float(lab_loss.mean().asscalar()))\n if not self.quiet and not self.validate_each_epoch:\n elbo_mean = np.mean(elbo_losses) if len(elbo_losses) > 0 else 0.0\n lab_mean = np.mean(lab_losses) if len(lab_losses) > 0 else 0.0\n self._output_status(\"Epoch [{}] finished in {} seconds. [elbo = {}, label loss = {}]\"\n .format(epoch+1, (time.time()-ts_epoch), elbo_mean, lab_mean))\n mx.nd.waitall()\n if val_X is not None and (self.validate_each_epoch or epoch == self.epochs-1):\n logging.info('Performing validation ....')\n v_res = self.validate(val_X, val_y)\n sc_obj = self._get_objective_from_validation_result(v_res)\n if self.has_classifier:\n self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}. Accuracy = {}.\"\n .format(epoch+1, sc_obj, v_res['ppl'],\n v_res['npmi'], v_res['redundancy'], v_res['accuracy']))\n else:\n self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}.\"\n .format(epoch+1, sc_obj, v_res['ppl'], v_res['npmi'], v_res['redundancy']))\n if self.reporter:\n self.reporter(epoch=epoch+1, objective=sc_obj, time_step=time.time(),\n coherence=v_res['npmi'], perplexity=v_res['ppl'], redundancy=v_res['redundancy'])\n mx.nd.waitall()\n return sc_obj, v_res\n\n \n def fit(self, X: sp.csr.csr_matrix, y: np.ndarray = None) -> 'BaseBowEstimator':\n \"\"\"\n Fit VAE model according to the given training data X with optional co-variates y.\n \n Parameters:\n X: representing input data\n y: representing covariate/labels associated with data elements\n\n Returns:\n self\n \"\"\"\n self.fit_with_validation(X, y, None, None)\n return self\n\n\nclass BowEstimator(BaseBowEstimator):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n return super().from_config(*args, **kwargs)\n\n @classmethod\n def from_saved(cls, *args, **kwargs):\n return super().from_saved(*args, **kwargs)\n \n def npmi(self, X, k=10):\n return self._npmi(X, k=k)\n\n def perplexity(self, X: sp.csr.csr_matrix) -> float:\n \"\"\"\n Calculate approximate perplexity for data X and y\n\n Parameters:\n X: Document word matrix of shape [n_samples, vocab_size]\n\n Returns:\n Perplexity score.\n \"\"\"\n return super().perplexity(X, None)\n\n def _forward(self, model: BowVAEModel, data: mx.nd.NDArray, labels: mx.nd.NDArray):\n \"\"\"\n Forward pass of BowVAE model given the supplied data\n\n Parameters:\n model: Core VAE model for bag-of-words topic model\n data: Document word matrix of shape (n_train_samples, vocab_size)\n labels: Ignored\n\n Returns:\n Tuple of:\n elbo, kl_loss, rec_loss, coherence_loss, redundancy_loss, reconstruction\n \"\"\"\n return model(data, labels)\n\n\n def initialize_with_pretrained(self):\n assert(self.pretrained_param_file is not None)\n self.model = self._get_model()\n self.model.load_parameters(self.pretrained_param_file, allow_missing=False)\n\n\n def _get_model(self):\n \"\"\"\n Initializes embedding weights and returns a `BowVAEModel` with hyperparameters provided.\n\n Returns:\n (:class:`BowVAEModel`) initialized using provided hyperparameters\n \"\"\"\n #vocab, emb_size = self._initialize_embedding_layer(self.embedding_source, self.embedding_size)\n if self.embedding_source != 'random' and self.vocabulary.embedding is None:\n e_type, e_name = tuple(self.embedding_source.split(':'))\n pt_embedding = nlp.embedding.create(e_type, source=e_name)\n self.vocabulary.set_embedding(pt_embedding)\n emb_size = len(self.vocabulary.embedding.idx_to_vec[0])\n for word in self.vocabulary.embedding._idx_to_token:\n if (self.vocabulary.embedding[word] == mx.nd.zeros(emb_size)).sum() == emb_size:\n self.vocabulary.embedding[word] = mx.nd.random.normal(0, 0.1, emb_size)\n else:\n emb_size = self.embedding_size\n model = \\\n BowVAEModel(self.enc_hidden_dim, emb_size, n_encoding_layers=self.n_encoding_layers,\n enc_dr=self.enc_dr, fixed_embedding=self.fixed_embedding,\n classifier_dropout=self.classifier_dropout,\n n_labels = self.n_labels,\n gamma = self.gamma,\n multilabel = self.multilabel,\n vocabulary=self.vocabulary, \n latent_distribution=self.latent_distribution, \n coherence_reg_penalty=self.coherence_reg_penalty, redundancy_reg_penalty=self.redundancy_reg_penalty,\n batch_size=self.batch_size, \n n_covars=0, ctx=self.ctx)\n if self.pretrained_param_file is not None:\n model.load_parameters(self.pretrained_param_file, allow_missing=False)\n return model\n \n\n def get_topic_vectors(self) -> mx.nd.NDArray:\n \"\"\"\n Get topic vectors of the fitted model.\n\n Returns:\n topic_distribution: topic_distribution[i, j] represents word j in topic i. shape=(n_latent, vocab_size)\n \"\"\"\n\n return self.model.get_topic_vectors() \n\n def transform(self, X: sp.csr.csr_matrix) -> mx.nd.NDArray:\n \"\"\"\n Transform data X according to the fitted model.\n\n Parameters:\n X: Document word matrix of shape {n_samples, n_features}\n\n Returns:\n topic_distribution: shape=(n_samples, n_latent) Document topic distribution for X\n \"\"\"\n mx_array = mx.nd.array(X,dtype='float32')\n return self.model.encode_data(mx_array).asnumpy()\n\n\nclass CovariateBowEstimator(BaseBowEstimator):\n\n def __init__(self, n_covars=0, *args, **kwargs):\n \n super().__init__(*args, **kwargs)\n\n self.covar_net_layers = 1 ### XXX - temp hardcoded\n self.n_covars = n_covars\n\n\n @classmethod\n def from_config(cls, n_covars, *args, **kwargs):\n est = super().from_config(*args, **kwargs)\n est.n_covars = n_covars\n return est\n \n def _get_model(self):\n \"\"\"\n Returns\n MXNet model initialized using provided hyperparameters\n \"\"\"\n if self.embedding_source != 'random' and self.vocabulary.embedding is None:\n e_type, e_name = tuple(self.embedding_source.split(':'))\n pt_embedding = nlp.embedding.create(e_type, source=e_name)\n self.vocabulary.set_embedding(pt_embedding)\n emb_size = len(self.vocabulary.embedding.idx_to_vec[0])\n for word in self.vocabulary.embedding._idx_to_token:\n if (self.vocabulary.embedding[word] == mx.nd.zeros(emb_size)).sum() == emb_size:\n self.vocabulary.embedding[word] = mx.nd.random.normal(0, 0.1, emb_size)\n else:\n emb_size = self.embedding_size\n model = \\\n CovariateBowVAEModel(n_covars=self.n_covars,\n vocabulary=self.vocabulary, enc_dim=self.enc_hidden_dim, embedding_size=emb_size,\n fixed_embedding=self.fixed_embedding, latent_distribution=self.latent_distribution,\n coherence_reg_penalty=self.coherence_reg_penalty, redundancy_reg_penalty=self.redundancy_reg_penalty,\n batch_size=self.batch_size, n_encoding_layers=self.n_encoding_layers, enc_dr=self.enc_dr,\n ctx=self.ctx)\n return model\n\n\n def _get_config(self):\n config = super()._get_config()\n config['n_covars'] = self.n_covars\n return config\n \n \n def _forward(self, model, data, labels):\n \"\"\"\n Forward pass of BowVAE model given the supplied data\n\n Parameters:\n model (MXNet model): Model that returns elbo, kl_loss, rec_loss, l1_pen, coherence_loss, redundancy_loss, reconstruction\n data ({array-like, sparse matrix}): Document word matrix of shape (n_train_samples, vocab_size) \n labels ({array-like, sparse matrix}): Covariate matrix of shape (n_train_samples, n_covars)\n\n Returns:\n (tuple): Tuple of: \n elbo, kl_loss, rec_loss, l1_pen, coherence_loss, redundancy_loss, reconstruction\n \"\"\"\n self.train_data = data\n self.train_labels = labels\n return model(data, labels)\n\n\n def _npmi_per_covariate(self, X, y, k=10):\n \"\"\"\n Calculate NPMI(Normalized Pointwise Mutual Information) for each covariate for data X\n\n Parameters:\n X (array-like or sparse matrix): Document word matrix. shape [n_samples, vocab_size]\n y (array-like or sparse matrix): Covariate matrix. shape [n_samples, n_covars]\n k (int): Threshold at which to compute npmi. optional (default=10)\n\n Returns:\n (dict): Dictionary of npmi scores for each covariate.\n \"\"\"\n X_train = X.toarray()\n y_train = y\n covars = np.unique(y_train, axis=0)\n covar_npmi = {}\n npmi_total = 0\n for covar in covars:\n mask = (y_train == covar).all(axis=1)\n X_covar, y_covar = mx.nd.array(X_train[mask], dtype=np.float32), mx.nd.array(y_train[mask], dtype=np.float32)\n sorted_ids = self.model.get_ordered_terms_with_covar_at_data(X_covar,k, y_covar)\n top_k_words_per_topic = [[int(i) for i in list(sorted_ids[:k, t].asnumpy())] for t in range(self.n_latent)]\n npmi_eval = EvaluateNPMI(top_k_words_per_topic)\n npmi = npmi_eval.evaluate_csr_mat(X_covar)\n\n #if(self.label_map):\n # covar_key = covar[0]\n #else:\n # covar_key = np.where(covar)[0][0]\n covar_keky = covar[0]\n covar_npmi[covar_key] = npmi\n npmi_total += npmi\n return npmi_total / len(covars)\n\n def _npmi(self, X, k=10):\n return super()._npmi(X, k=k)\n #return self._npmi_per_covariate(X, y, k)\n\n def _get_objective_from_validation_result(self, v_res):\n return v_res['npmi']\n\n def validate(self, X, y):\n npmi, redundancy = self._npmi(X)\n return {'npmi': npmi, 'redundancy': redundancy, 'ppl': 0.0}\n\n def get_topic_vectors(self) -> mx.nd.NDArray:\n \"\"\"\n Get topic vectors of the fitted model.\n\n Returns:\n topic_vectors: Topic word distribution. topic_distribution[i, j] represents word j in topic i. \n shape=(n_latent, vocab_size)\n \"\"\"\n\n return self.model.get_topic_vectors(self.train_data, self.train_labels)\n\n def transform(self, X: sp.csr.csr_matrix, y: np.ndarray):\n \"\"\"\n Transform data X and y according to the fitted model.\n\n Parameters:\n X: Document word matrix of shape {n_samples, n_features)\n y: Covariate matrix of shape (n_train_samples, n_covars)\n\n Returns:\n Document topic distribution for X and y of shape=(n_samples, n_latent)\n \"\"\"\n x_mxnet, y_mxnet = mx.nd.array(X, dtype=np.float32), mx.nd.array(y, dtype=np.float32)\n return self.model.encode_data_with_covariates(x_mxnet, y_mxnet).asnumpy()\n \n\nclass SeqBowEstimator(BaseEstimator):\n\n def __init__(self, bert_base, *args,\n bert_model_name = 'bert_12_768_12',\n bert_data_name = 'book_corpus_wiki_en_uncased',\n bow_vocab = None,\n n_labels = 0,\n log_interval=5,\n warmup_ratio=0.1,\n gamma=1.0,\n multilabel=False,\n decoder_lr = 0.01,\n checkpoint_dir = None,\n optimizer = 'bertadam',\n classifier_dropout = 0.0,\n pure_classifier_objective = False,\n **kwargs):\n super(SeqBowEstimator, self).__init__(*args, optimizer=optimizer, **kwargs)\n self.pure_classifier_objective = pure_classifier_objective\n self.minimum_lr = 1e-9\n self.checkpoint_dir = checkpoint_dir\n self.bert_base = bert_base\n self.bert_model_name = bert_model_name\n self.bert_data_name = bert_data_name\n self.has_classifier = n_labels >= 2\n self.classifier_dropout = classifier_dropout\n self.multilabel = multilabel\n self.n_labels = n_labels\n self.metric = get_composite_p_and_r_metric() if multilabel else mx.metric.Accuracy()\n self.warmup_ratio = warmup_ratio\n self.log_interval = log_interval\n self.loss_function = gluon.loss.SigmoidBCELoss() if multilabel else gluon.loss.SoftmaxCELoss(sparse_label=False)\n self.gamma = gamma\n self.decoder_lr = decoder_lr\n self._bow_matrix = None\n self.bow_vocab = bow_vocab\n\n\n @classmethod\n def from_config(cls,\n config: Union[str, ag.space.Dict],\n bert_base: nlp.model.bert.BERTModel,\n bow_vocab: nlp.Vocab,\n n_labels: int = 0,\n reporter: Optional[object] = None,\n log_interval: int = 1,\n pretrained_param_file: Optional[str] = None,\n ctx: mx.context.Context = mx.cpu()) -> 'SeqBowEstimator':\n \"\"\"\n Instantiate an object of this class using the provided `config`\n\n Parameters:\n config: String to configuration path (in json format) or an autogluon dictionary representing the config\n bert_base: GluonNLP BERT model\n bow_vocab: Bag-of-words vocabulary used for decoding reconstruction target\n n_labels: Number of labels for (semi-)supervised modeling\n repoter: Autogluon reporter object with callbacks for logging model selection\n log_interval: Logging frequency (default = 1)\n pretrained_param_file: Parameter file\n ctx: MXNet context\n \n Returns:\n An object of this class\n \"\"\"\n if isinstance(config, str):\n try:\n with open(config, 'r') as f:\n config_dict = json.load(f)\n except:\n logging.error(\"File {} does not appear to be a valid config instance\".format(config))\n raise Exception(\"Invalid Json Configuration File\")\n config = ag.space.Dict(**config_dict)\n ldist_def = config.latent_distribution\n kappa = 0.0\n alpha = 1.0\n latent_distrib = ldist_def.dist_type\n n_latent = int(config.n_latent)\n if latent_distrib == 'logistic_gaussian':\n alpha = ldist_def.alpha\n latent_distribution = LogisticGaussianDistribution(n_latent, ctx=ctx, alpha=alpha)\n elif latent_distrib == 'vmf':\n kappa = ldist_def.kappa\n latent_distribution = HyperSphericalDistribution(n_latent, ctx=ctx, kappa=kappa)\n else:\n latent_distribution = GaussianDistribution(n_latent, ctx=ctx)\n model = cls(bert_base,\n bert_model_name = config.bert_model_name,\n bert_data_name = config.bert_data_name,\n bow_vocab = bow_vocab, \n n_labels = n_labels,\n latent_distribution = latent_distribution,\n batch_size = int(config.batch_size),\n redundancy_reg_penalty = 0.0,\n warmup_ratio = config.warmup_ratio,\n optimizer = config.optimizer,\n classifier_dropout = config.classifier_dropout,\n epochs = int(config.epochs),\n gamma = config.gamma,\n lr = config.lr,\n decoder_lr = config.decoder_lr,\n pretrained_param_file = pretrained_param_file,\n warm_start = (pretrained_param_file is not None),\n reporter=reporter,\n ctx=ctx,\n log_interval=log_interval)\n return model\n \n\n def initialize_with_pretrained(self):\n assert(self.pretrained_param_file is not None)\n self.model = self._get_model()\n self.model.load_parameters(self.pretrained_param_file, allow_missing=False)\n\n\n def _get_model_bias_initialize(self, train_data):\n model = self._get_model()\n tr_bow_counts = self._get_bow_wd_counts(train_data)\n model.initialize_bias_terms(tr_bow_counts)\n return model\n \n \n def _get_model(self):\n latent_dist = HyperSphericalDistribution(self.n_latent, kappa=64.0, ctx=self.ctx)\n model = SeqBowVED(self.bert_base, latent_dist, num_classes=self.n_labels, n_latent=self.n_latent,\n bow_vocab_size = len(self.bow_vocab), dropout=self.classifier_dropout)\n model.decoder.initialize(init=mx.init.Xavier(), ctx=self.ctx)\n model.latent_dist.initialize(init=mx.init.Xavier(), ctx=self.ctx)\n model.latent_dist.post_init(self.ctx)\n if model.has_classifier:\n model.classifier.initialize(init=mx.init.Normal(0.02), ctx=self.ctx)\n return model\n\n def _get_config(self):\n config = {}\n config['lr'] = self.lr\n config['decoder_lr'] = self.decoder_lr\n config['optimizer'] = self.optimizer\n config['n_latent'] = self.n_latent\n config['n_labels'] = self.n_labels\n config['batch_size'] = self.batch_size\n if isinstance(self.latent_distribution, HyperSphericalDistribution):\n config['latent_distribution'] = {'dist_type':'vmf', 'kappa': self.latent_distribution.kappa}\n elif isinstance(self.latent_distribution, LogisticGaussianDistribution):\n config['latent_distribution'] = {'dist_type':'logistic_gaussian', 'alpha':self.latent_distribution.alpha}\n else:\n config['latent_distribution'] = {'dist_type':'gaussian'}\n config['epochs'] = self.epochs\n #config['embedding_source'] = self.embedding_source\n config['gamma'] = self.gamma\n config['redundancy_reg_penalty'] = self.redundancy_reg_penalty\n config['warmup_ratio'] = self.warmup_ratio\n config['bert_model_name'] = self.bert_model_name\n config['bert_data_name'] = self.bert_data_name\n config['classifier_dropout'] = self.classifier_dropout\n return config\n\n def write_model(self, model_dir: str, suffix: str ='') -> None:\n \"\"\"\n Writes the model within this estimator to disk.\n\n Parameters:\n model_dir: Output directory for model parameters, config and vocabulary\n suffix: Suffix to use for model (e.g. at different checkpoints)\n \"\"\"\n pfile = os.path.join(model_dir, ('model.params' + suffix))\n conf_file = os.path.join(model_dir, ('model.config' + suffix))\n vocab_file = os.path.join(model_dir, ('vocab.json' + suffix))\n self.model.save_parameters(pfile)\n config = self._get_config()\n specs = json.dumps(config, sort_keys=True, indent=4)\n with open(conf_file, 'w') as f:\n f.write(specs)\n with open(vocab_file, 'w') as f:\n f.write(self.bow_vocab.to_json())\n\n\n def log_train(self, batch_id, batch_num, metric, step_loss, rec_loss, red_loss, class_loss,\n log_interval, epoch_id, learning_rate):\n \"\"\"Generate and print out the log message for training. \"\"\"\n if self.has_classifier:\n metric_nm, metric_val = metric.get()\n if not isinstance(metric_nm, list):\n metric_nm, metric_val = [metric_nm], [metric_val]\n self._output_status(\"Epoch {} Batch {}/{} loss={}, (rec_loss = {}), (red_loss = {}), (class_loss = {}) lr={:.10f}, metrics[{}]: {}\"\n .format(epoch_id+1, batch_id+1, batch_num, step_loss/log_interval, rec_loss/log_interval, red_loss/log_interval,\n class_loss/log_interval, learning_rate, metric_nm, metric_val))\n else:\n self._output_status(\"Epoch {} Batch {}/{} loss={}, (rec_loss = {}), (red_loss = {}), (class_loss = {}) lr={:.10f}\"\n .format(epoch_id+1, batch_id+1, batch_num, step_loss/log_interval, rec_loss/log_interval, red_loss/log_interval,\n class_loss/log_interval, learning_rate))\n\n def log_eval(self, batch_id, batch_num, metric, step_loss, rec_loss, log_interval):\n metric_nm, metric_val = metric.get()\n if not isinstance(metric_nm, list):\n metric_nm, metric_val = [metric_nm], [metric_val]\n self._output_status(\"Batch {}/{} loss={} (rec_loss = {}), metrics: {:.10f}\"\n .format(batch_id+1, batch_num, step_loss/log_interval, rec_loss/log_interval, *metric_val))\n\n def _get_bow_matrix(self, dataloader, cache=False):\n bow_matrix = []\n max_rows = 2000000000 / len(self.bow_vocab)\n logging.info(\"Maximum rows for BOW matrix = {}\".format(max_rows))\n rows = 0\n for i, data_batch in enumerate(dataloader):\n seqs = data_batch[0]\n bow_batch = list(seqs[3].squeeze(axis=1))\n rows += len(bow_batch)\n if i >= max_rows:\n break\n bow_matrix.extend(bow_batch)\n bow_matrix = mx.nd.stack(*bow_matrix)\n if cache:\n self._bow_matrix = bow_matrix\n return bow_matrix\n\n def _get_bow_wd_counts(self, dataloader):\n sums = mx.nd.zeros(len(self.bow_vocab))\n for i, data_batch in enumerate(dataloader):\n seqs = data_batch[0]\n bow_batch = seqs[3].squeeze(axis=1)\n sums += bow_batch.sum(axis=0)\n return sums\n\n def _get_objective_from_validation_result(self, val_result):\n npmi = val_result['npmi']\n ppl = val_result['ppl']\n redundancy = val_result['redundancy']\n obj = (npmi - redundancy) - ( ( ppl / 1000 ) / self.coherence_coefficient )\n b_obj = max(min(obj, 100.0), -100.0)\n sc_obj = 1.0 / (1.0 + math.exp(-b_obj))\n if self.has_classifier and self.gamma >= 0.0:\n orig_obj = sc_obj\n sc_obj = val_result['accuracy'] if self.pure_classifier_objective else (sc_obj + self.gamma * val_result['accuracy']) / (1.0 + self.gamma)\n logging.info(\"Objective via classifier: {} based on accuracy = {} and topic objective = {}\"\n .format(sc_obj, val_result['accuracy'], orig_obj))\n return sc_obj\n\n def _get_losses(self, model, batch_data):\n input_ids, valid_length, type_ids, bow, label = batch_data\n elbo_ls, rec_ls, kl_ls, red_ls, out = model(\n input_ids.as_in_context(self.ctx), type_ids.as_in_context(self.ctx),\n valid_length.astype('float32').as_in_context(self.ctx), bow.as_in_context(self.ctx))\n if self.has_classifier:\n label = label.as_in_context(self.ctx)\n label_ls = self.loss_function(out, label)\n label_ls = label_ls.mean()\n total_ls = (self.gamma * label_ls) + elbo_ls.mean()\n ## update label metric (e.g. accuracy)\n if not self.multilabel:\n label_ind = label.argmax(axis=1)\n self.metric.update(labels=[label_ind], preds=[out])\n else:\n self.metric.update(labels=[label], preds=[out])\n else:\n total_ls = elbo_ls.mean()\n label_ls = mx.nd.zeros(total_ls.shape) \n return elbo_ls, rec_ls, kl_ls, red_ls, label_ls, total_ls\n\n def _get_unlabeled_losses(self, model, batch_data):\n inputs, vl, tt, bow, _ = batch_data\n elbo_ls, rec_ls, kl_ls, red_ls, out = model(\n inputs.as_in_context(self.ctx), tt.as_in_context(self.ctx),\n vl.astype('float32').as_in_context(self.ctx), bow.as_in_context(self.ctx))\n total_ls = elbo_ls.mean() / self.gamma\n return elbo_ls, rec_ls, kl_ls, red_ls, total_ls\n \n\n def fit_with_validation(self,\n train_data: gluon.data.DataLoader,\n dev_data: gluon.data.DataLoader,\n num_train_examples: int,\n aux_data: bool=True):\n \"\"\"\n Training function.\n\n Parameters:\n train_data: Gluon dataloader with training data.\n dev_data: Gluon dataloader with dev/validation data.\n num_train_examples: Number of training samples\n aux_data: Flag for whether auxilliary data is provided\n \"\"\"\n if self.model is None or not self.warm_start:\n model = self._get_model_bias_initialize(train_data)\n self.model = model\n \n accumulate = False\n\n all_model_params = model.collect_params()\n optimizer_params = {'learning_rate': self.lr, 'epsilon': 1e-6, 'wd': 0.02}\n non_decoder_params = {**model.bert.collect_params()}\n decoder_params = {**model.decoder.collect_params(), **model.latent_dist.collect_params()}\n if self.has_classifier:\n decoder_params.update(model.classifier.collect_params())\n\n trainer = gluon.Trainer(non_decoder_params, self.optimizer,\n optimizer_params, update_on_kvstore=False)\n dec_trainer = gluon.Trainer(decoder_params, 'adam', {'learning_rate': self.decoder_lr, 'epsilon': 1e-6, 'wd': 0.00001})\n #if args.dtype == 'float16':\n # amp.init_trainer(trainer)\n\n num_effective_samples = num_train_examples\n\n #step_size = self.batch_size * accumulate if accumulate else self.batch_size\n #num_train_steps = int((num_effective_samples / step_size) * self.epochs) + 1\n num_train_steps = len(train_data)\n if accumulate:\n num_train_steps /= accumulate\n \n warmup_ratio = self.warmup_ratio\n num_warmup_steps = int(num_train_steps * warmup_ratio)\n logging.info(\"Number of warmup steps = {}, num total train steps = {}, train examples = {}, batch_size = {}, epochs = {}\"\n .format(num_warmup_steps, num_train_steps, num_effective_samples, self.batch_size, self.epochs))\n step_num = 0\n\n # Do not apply weight decay on LayerNorm and bias terms\n for _, v in model.collect_params('.*beta|.*gamma|.*bias').items():\n v.wd_mult = 0.0\n # Collect differentiable parameters\n params = [p for p in all_model_params.values() if p.grad_req != 'null']\n clipped_params = []\n for p in non_decoder_params.values():\n if p.grad_req != 'null':\n clipped_params.append(p)\n \n # Set grad_req if gradient accumulation is required\n if (accumulate and accumulate > 1) or aux_data:\n for p in params:\n p.grad_req = 'add'\n\n loss_details = { 'step_loss': 0.0, 'elbo_loss': 0.0, 'red_loss': 0.0, 'class_loss': 0.0 }\n def update_loss_details(total_ls, elbo_ls, red_ls, class_ls):\n loss_details['step_loss'] += total_ls.mean().asscalar()\n loss_details['elbo_loss'] += elbo_ls.mean().asscalar()\n loss_details['red_loss'] += red_ls.mean().asscalar()\n if class_ls is not None:\n loss_details['class_loss'] += class_ls.mean().asscalar()\n \n for epoch_id in range(self.epochs):\n self.metric.reset()\n all_model_params.zero_grad()\n \n for (batch_id, data_batch) in enumerate(train_data):\n # data_batch is either a 2-tuple of: (labeled, unlabeled)\n # OR a 1-tuple of (labeled,)\n if len(data_batch) == 2:\n seqs, aux_seqs = data_batch\n else:\n seqs, aux_seqs = data_batch[0], None\n # learning rate schedule \n if step_num < num_warmup_steps:\n new_lr = self.lr * (step_num+1) / num_warmup_steps\n else:\n non_warmup_steps = step_num - num_warmup_steps\n offset = non_warmup_steps / (num_train_steps - num_warmup_steps)\n new_lr = self.lr - offset * self.lr\n new_lr = max(new_lr, self.minimum_lr)\n trainer.set_learning_rate(new_lr)\n # forward and backward with optional auxilliary data\n with mx.autograd.record():\n elbo_ls, rec_ls, kl_ls, red_ls, label_ls, total_ls = self._get_losses(model, seqs)\n total_ls.backward()\n if aux_seqs is not None:\n with mx.autograd.record():\n elbo_ls_2, rec_ls_2, kl_ls_2, red_ls_2, total_ls_2 = self._get_unlabeled_losses(model, aux_seqs)\n total_ls_2.backward()\n\n update_loss_details(total_ls, elbo_ls, red_ls, label_ls)\n if aux_seqs is not None:\n update_loss_details(total_ls_2, elbo_ls_2, red_ls_2, None)\n\n # update\n if not accumulate or (batch_id + 1) % accumulate == 0:\n trainer.allreduce_grads()\n dec_trainer.allreduce_grads()\n nlp.utils.clip_grad_global_norm(clipped_params, 1.0, check_isfinite=True)\n trainer.update(accumulate if accumulate else 1)\n dec_trainer.update(accumulate if accumulate else 1)\n step_num += 1\n if (accumulate and accumulate > 1) or aux_data:\n # set grad to zero for gradient accumulation\n all_model_params.zero_grad()\n if (batch_id + 1) % (self.log_interval) == 0:\n self.log_train(batch_id, num_train_steps, self.metric, loss_details['step_loss'],\n loss_details['elbo_loss'], loss_details['red_loss'], loss_details['class_loss'], self.log_interval,\n epoch_id, trainer.learning_rate)\n ## reset loss details\n for d in loss_details:\n loss_details[d] = 0.0\n mx.nd.waitall()\n\n # inference on dev data\n if dev_data is not None:\n sc_obj, v_res = self._perform_validation(model, dev_data, epoch_id)\n else:\n sc_obj, v_res = None, None\n if self.checkpoint_dir:\n self.write_model(self.checkpoint_dir, suffix=str(epoch_id))\n return sc_obj, v_res\n\n\n def _compute_coherence(self, model, k, test_data, log_terms=False):\n num_topics = model.n_latent\n sorted_ids = model.get_top_k_terms(k, ctx=self.ctx)\n num_topics = min(num_topics, sorted_ids.shape[-1])\n top_k_words_per_topic = [[ int(i) for i in list(sorted_ids[:k, t])] for t in range(num_topics)]\n npmi_eval = EvaluateNPMI(top_k_words_per_topic)\n npmi = npmi_eval.evaluate_csr_mat(test_data)\n unique_term_ids = set()\n unique_limit = 5 ## only consider the top 5 terms for each topic when looking at degree of redundancy\n for i in range(num_topics):\n topic_ids = list(top_k_words_per_topic[i][:unique_limit])\n for j in range(len(topic_ids)):\n unique_term_ids.add(topic_ids[j])\n redundancy = (1.0 - (float(len(unique_term_ids)) / num_topics / unique_limit)) ** 2.0\n logging.info(\"Test Coherence: {}\".format(npmi))\n #if log_terms:\n # top_k_tokens = [list(map(lambda x: self.vocabulary.idx_to_token[x], list(li))) for li in top_k_words_per_topic]\n # for i in range(num_topics):\n # logging.info(\"Topic {}: {}\".format(i, top_k_tokens[i]))\n return npmi, redundancy\n \n\n def _perform_validation(self, model, dev_data, epoch_id):\n v_res, metric_nm, metric_val = self.validate(model, dev_data)\n sc_obj = self._get_objective_from_validation_result(v_res)\n if 'accuracy' in v_res:\n self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}. Accuracy = {}.\"\n .format(epoch_id, sc_obj, v_res['ppl'], v_res['npmi'], v_res['redundancy'], v_res['accuracy']))\n else:\n self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}.\"\n .format(epoch_id, sc_obj, v_res['ppl'], v_res['npmi'], v_res['redundancy']))\n if self.reporter:\n if 'accuracy' in v_res:\n self.reporter(epoch=epoch_id+1, objective=sc_obj, time_step=time.time(), coherence=v_res['npmi'],\n perplexity=v_res['ppl'], redundancy=v_res['redundancy'], accuracy=v_res['accuracy'])\n else:\n self.reporter(epoch=epoch_id+1, objective=sc_obj, time_step=time.time(), coherence=v_res['npmi'],\n perplexity=v_res['ppl'], redundancy=v_res['redundancy'])\n return sc_obj, v_res\n \n \n def validate(self, model, dataloader):\n bow_matrix = self._bow_matrix if self._bow_matrix is not None else self._get_bow_matrix(dataloader, cache=True)\n num_words = bow_matrix.sum().asscalar()\n npmi, redundancy = self._compute_coherence(model, 10, bow_matrix, log_terms=True)\n self.metric.reset()\n step_loss = 0\n elbo_loss = 0\n total_rec_loss = 0.0\n total_kl_loss = 0.0\n for batch_id, data_batch in enumerate(dataloader):\n seqs = data_batch[0]\n elbo_ls, rec_ls, kl_ls, red_ls, label_ls, total_ls = self._get_losses(model, seqs)\n total_rec_loss += rec_ls.sum().asscalar()\n total_kl_loss += kl_ls.sum().asscalar()\n step_loss += total_ls.mean().asscalar()\n elbo_loss += elbo_ls.mean().asscalar()\n if (batch_id + 1) % (self.log_interval) == 0:\n logging.debug('All loss terms: {}, {}, {}, {}, {}, {}'.format(elbo_ls, rec_ls, kl_ls, red_ls, label_ls, total_ls))\n self.log_eval(batch_id, len(dataloader), self.metric, step_loss, elbo_loss, self.log_interval)\n step_loss = 0\n elbo_loss = 0\n likelihood = (total_rec_loss + total_kl_loss) / num_words\n if likelihood < 709.0:\n perplexity = math.exp(likelihood)\n else:\n perplexity = 1e300\n v_res = {'ppl':perplexity, 'npmi': npmi, 'redundancy': redundancy}\n metric_nm = 0.0\n metric_val = 0.0\n if self.has_classifier:\n metric_nm, metric_val = self.metric.get()\n if not isinstance(metric_nm, list):\n metric_nm, metric_val = [metric_nm], [metric_val]\n self._output_status(\"Validation metric: {:.6}\".format(metric_val[0]))\n v_res['accuracy'] = metric_val[0]\n return v_res, metric_nm, metric_val\n\n\nclass SeqBowMetricEstimator(SeqBowEstimator):\n\n def __init__(self, *args, sdml_smoothing_factor=0.3, fixed_data=None, fixed_test_data=None, plot_dir=None, **kwargs):\n super(SeqBowMetricEstimator, self).__init__(*args, **kwargs)\n self.loss_function = GeneralizedSDMLLoss(smoothing_parameter=sdml_smoothing_factor)\n self.fixed_batch = None\n self.fixed_test_batch = None\n self.plot_dir = plot_dir\n if fixed_data:\n self.fixed_batch = next(enumerate(fixed_data))[1] # take the first batch and fix\n if fixed_test_data:\n self.fixed_test_batch = next(enumerate(fixed_test_data))[1]\n\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n est = super().from_config(*args, **kwargs)\n return est\n \n def _get_model(self, bow_size=-1):\n bow_size = bow_size if bow_size > 1 else len(self.bow_vocab)\n latent_dist = HyperSphericalDistribution(self.n_latent, kappa=64.0, ctx=self.ctx)\n model = MetricSeqBowVED(self.bert_base, latent_dist, n_latent=self.n_latent,\n bow_vocab_size = len(self.bow_vocab), dropout=self.classifier_dropout)\n model.decoder.initialize(init=mx.init.Xavier(), ctx=self.ctx)\n model.latent_dist.initialize(init=mx.init.Xavier(), ctx=self.ctx)\n model.latent_dist.post_init(self.ctx)\n if self.pretrained_param_file is not None:\n model.load_parameters(self.pretrained_param_file, allow_missing=False)\n return model\n\n def _get_model_bias_initialize(self, train_data):\n model = self._get_model()\n tr_bow_matrix = self._get_bow_matrix(train_data)\n model.initialize_bias_terms(tr_bow_matrix.sum(axis=0))\n return model\n \n\n def _get_bow_matrix(self, dataloader, cache=False):\n bow_matrix = []\n for _, data_batch in enumerate(dataloader):\n seqs = data_batch[0]\n if self.fixed_batch:\n batch_1 = seqs\n else:\n batch_1, batch_2 = seqs \n bow_matrix.extend(list(batch_2[3].squeeze(axis=1)))\n bow_matrix.extend(list(batch_1[3].squeeze(axis=1)))\n if self.fixed_batch:\n bow_matrix.extend(list(self.fixed_batch[3].squeeze(axis=1)))\n bow_matrix = mx.nd.stack(*bow_matrix)\n if cache:\n self._bow_matrix = bow_matrix\n return bow_matrix\n\n def _ff_batch(self, model, batch_data, on_test=False):\n if on_test:\n if self.fixed_test_batch:\n batch1 = batch_data\n batch2 = self.fixed_test_batch\n else:\n batch1, batch2 = batch_data\n else:\n if self.fixed_batch:\n batch1 = batch_data\n batch2 = self.fixed_batch\n else:\n batch1, batch2 = batch_data\n in1, vl1, tt1, bow1, label1 = batch1\n in2, vl2, tt2, bow2, label2 = batch2\n elbo_ls, rec_ls, kl_ls, red_ls, z_mu1, z_mu2 = model(\n in1.as_in_context(self.ctx), tt1.as_in_context(self.ctx),\n vl1.astype('float32').as_in_context(self.ctx), bow1.as_in_context(self.ctx),\n in2.as_in_context(self.ctx), tt2.as_in_context(self.ctx),\n vl2.astype('float32').as_in_context(self.ctx), bow2.as_in_context(self.ctx))\n return elbo_ls, rec_ls, kl_ls, red_ls, z_mu1, z_mu2, label1, label2\n\n def _get_losses(self, model, batch_data):\n elbo_ls, rec_ls, kl_ls, red_ls, z_mu1, z_mu2, label1, label2 = self._ff_batch(model, batch_data)\n ## convert back to label indices rather than 1-hot vecs\n label1_ind = label1.argmax(axis=1)\n label2_ind = label2.argmax(axis=1)\n label1 = label1_ind.as_in_context(self.ctx)\n label2 = label2_ind.as_in_context(self.ctx)\n label_ls = self.loss_function(z_mu1, label1, z_mu2, label2)\n label_ls = label_ls.mean()\n total_ls = (self.gamma * label_ls) + elbo_ls.mean()\n return elbo_ls, rec_ls, kl_ls, red_ls, label_ls, total_ls\n\n def _get_unlabeled_losses(self, model, batch_data):\n in1, vl1, tt1, bow1, label1 = batch_data\n elbo_ls, rec_ls, kl_ls, red_ls = model.unpaired_input_forward(\n in1.as_in_context(self.ctx), tt1.as_in_context(self.ctx),\n vl1.astype('float32').as_in_context(self.ctx), bow1.as_in_context(self.ctx))\n total_ls = elbo_ls / self.gamma\n return elbo_ls, rec_ls, kl_ls, red_ls, total_ls\n \n def classifier_validate(self, model, dataloader, epoch_id):\n posteriors = []\n ground_truth = []\n ground_truth_idx = []\n emb2 = None\n emb1 = []\n for batch_id, data_batch in enumerate(dataloader):\n seqs = data_batch[0]\n elbo_ls, rec_ls, kl_ls, red_ls, z_mu1, z_mu2, label1, label2 = self._ff_batch(model, seqs, on_test=True)\n label_mat = self.loss_function._compute_labels(mx.ndarray, label1, label2)\n dists = self.loss_function._compute_distances(z_mu1, z_mu2)\n probs = mx.nd.softmax(-dists, axis=1).asnumpy()\n posteriors += list(probs)\n label1 = np.array(label1.squeeze().asnumpy(), dtype='int')\n ground_truth_idx += list(label1) ## index values for labels\n gt = np.zeros((label1.shape[0], int(mx.nd.max(label2).asscalar())+1))\n gt[np.arange(label1.shape[0]), label1] = 1\n ground_truth += list(gt)\n if emb2 is None:\n emb2 = z_mu2.asnumpy()\n emb1 += list(z_mu1.asnumpy())\n posteriors = np.array(posteriors)\n ground_truth = np.array(ground_truth)\n ground_truth_idx = np.array(ground_truth_idx)\n if not np.any(np.isnan(posteriors)):\n avg_prec = average_precision_score(ground_truth, posteriors, average='weighted')\n else:\n avg_prec = 0.0\n logging.info('EVALUTAION: Ground truth indices: {}'.format(list(ground_truth_idx)))\n try:\n auroc = roc_auc_score(ground_truth, posteriors, average='weighted')\n except:\n auroc = 0.0\n logging.error('ROC computation failed')\n ndcg = ndcg_score(ground_truth, posteriors)\n top_acc_1 = top_k_accuracy_score(ground_truth_idx, posteriors, k=1) \n top_acc_2 = top_k_accuracy_score(ground_truth_idx, posteriors, k=2)\n top_acc_3 = top_k_accuracy_score(ground_truth_idx, posteriors, k=3)\n top_acc_4 = top_k_accuracy_score(ground_truth_idx, posteriors, k=4)\n y = np.where(ground_truth > 0)[1]\n if self.plot_dir:\n ofile = self.plot_dir + '/' + 'plot_' + str(epoch_id) + '.png'\n umap_model = umap.UMAP(n_neighbors=4, min_dist=0.5, metric='euclidean')\n embeddings = umap_model.fit_transform(np.array(emb1))\n #mapper = umap_model.fit(np.array(emb1))\n plt.scatter(*embeddings.T, c=y, s=0.8, alpha=0.9, cmap='coolwarm')\n #umap.plot.points(mapper, labels=y)\n plt.savefig(ofile)\n plt.close(\"all\")\n return {'avg_prec': avg_prec, 'top_1': top_acc_1, 'top_2': top_acc_2, 'top_3': top_acc_3, 'top_4': top_acc_4,\n 'au_roc': auroc, 'ndcg': ndcg}\n\n \n def _perform_validation(self, model, dev_data, epoch_id):\n v_res = self.classifier_validate(model, dev_data, epoch_id)\n self._output_status(\"Epoch [{}]. Objective = {} ==> Avg. Precision = {}, AuROC = {}, NDCG = {} [acc@1= {}, acc@2={}, acc@3={}, acc@4={}]\"\n .format(epoch_id, v_res['avg_prec'], v_res['avg_prec'], v_res['au_roc'], v_res['ndcg'],\n v_res['top_1'], v_res['top_2'], v_res['top_3'], v_res['top_4']))\n if self.reporter:\n self.reporter(epoch=epoch_id+1, objective=v_res['avg_prec'], time_step=time.time(), coherence=0.0,\n perplexity=0.0, redundancy=0.0)\n return v_res['avg_prec'], v_res\n\n\nclass DeepAveragingBowEstimator(BaseEstimator):\n\n def __init__(self, vocabulary, n_labels, gamma, emb_dim, emb_dr, seq_length, *args, **kwargs):\n super(DeepAveragingBowEstimator, self).__init__(*args, **kwargs)\n self.vocabulary = vocabulary\n self.n_labels = n_labels\n self.gamma = gamma\n self.emb_in_dim = len(vocabulary)\n self.emb_out_dim = emb_dim\n self.emb_dr = emb_dr\n self.seq_length = seq_length\n self.validate_each_epoch = False\n\n def _get_model(self):\n model = DeepAveragingVAEModel(self.n_labels, self.gamma, self.emb_in_dim, self.emb_out_dim , self.emb_dr, self.seq_length,\n vocabulary=self.vocabulary, n_latent=self.n_latent, latent_distrib=self.latent_distrib,\n batch_size=self.batch_size, wd_freqs=self.wd_freqs, ctx=self.ctx)\n return model\n \n\n def _forward(self, model, ids, lens, bow, labels, l_mask):\n return model(ids, lens, bow, labels, l_mask)\n\n\n def fit_with_validation(self, X, y, val_X, val_y):\n seq_train, bow_train = X\n model = self._get_model()\n self.model = model\n\n dataloader = mx.gluon.data.DataLoader(seq_train, batch_size=self.batch_size,\n shuffle=True, last_batch='rollover')\n if val_X is not None:\n seq_val, bow_val = val_X\n dataloader_val = mx.gluon.data.DataLoader(seq_val, batch_size=self.batch_size, last_batch='rollover',\n shuffle=False)\n\n trainer = gluon.Trainer(self.model.collect_params(), self.optimizer, {'learning_rate': self.lr})\n sc_obj, npmi, ppl, redundancy = 0.0, 0.0, 0.0, 0.0\n \n for epoch in range(self.epochs):\n ts_epoch = time.time()\n elbo_losses = []\n lab_losses = []\n for i, seqs in enumerate(dataloader):\n ids, valid_len, output_bow, labels = seqs\n if labels is None:\n labels = mx.nd.expand_dims(mx.nd.zeros(), 1)\n mask = None\n else:\n if len(labels.shape) > 1:\n mask = labels.sum(axis=1) >= 0.0\n else:\n mask = labels >= 0.0\n mask = mask.as_in_context(self.ctx)\n ids = ids.as_in_context(self.ctx)\n labels = labels.as_in_context(self.ctx)\n valid_len = valid_len.as_in_context(self.ctx)\n output_bow = output_bow.as_in_context(self.ctx)\n\n with autograd.record():\n elbo, kl_loss, rec_loss, coherence_loss, redundancy_loss, lab_loss = \\\n self._forward(self.model, ids, valid_len, output_bow, labels, mask)\n elbo_mean = elbo.mean()\n elbo_mean.backward()\n trainer.step(1)\n if not self.quiet:\n elbo_losses.append(float(elbo_mean.asscalar()))\n if lab_loss is not None:\n lab_losses.append(float(lab_loss.mean().asscalar()))\n if not self.quiet and not self.validate_each_epoch:\n elbo_mean = np.mean(elbo_losses) if len(elbo_losses) > 0 else 0.0\n lab_mean = np.mean(lab_losses) if len(lab_losses) > 0 else 0.0\n self._output_status(\"Epoch [{}] finished in {} seconds. [elbo = {}, label loss = {}]\"\n .format(epoch+1, (time.time()-ts_epoch), elbo_mean, lab_mean))\n if val_X is not None and (self.validate_each_epoch or epoch == self.epochs-1):\n _, val_X_sp = val_X\n npmi, redundancy = self._npmi(val_X_sp)\n self._output_status(\"NPMI ==> {}\".format(npmi))\n #ppl, npmi, redundancy = self.validate(val_X, val_y)\n #if self.reporter:\n # obj = (npmi - redundancy) * self.coherence_coefficient - ( ppl / 1000 )\n # b_obj = max(min(obj, 100.0), -100.0)\n # sc_obj = 1.0 / (1.0 + math.exp(-b_obj))\n # self._output_status(\"Epoch [{}]. Objective = {} ==> PPL = {}. NPMI ={}. Redundancy = {}.\"\n # .format(epoch+1, sc_obj, ppl, npmi, redundancy))\n # self.reporter(epoch=epoch+1, objective=sc_obj, time_step=time.time(),\n # coherence=npmi, perplexity=ppl, redundancy=redundancy)\n \n return sc_obj, {'npmi': npmi, 'ppl': ppl, 'redundancy': redundancy}\n\n \n" ]
[ [ "numpy.array", "numpy.isnan", "numpy.zeros", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "numpy.mean", "sklearn.metrics.precision_recall_fscore_support", "numpy.where", "sklearn.metrics.top_k_accuracy_score", "sklearn.metrics.ndcg_score", "sklearn.metrics.average_precision_score", "numpy.arange", "matplotlib.pyplot.scatter", "sklearn.metrics.roc_auc_score", "numpy.unique" ] ]
chari8/MathematicalBiology_Murray
[ "c787636b389b3eb8280571d931ed1d59117909fd" ]
[ "sec3/bc.py" ]
[ "import os, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns #not used\n\ndef main(step_number, lamb, a, m, T, g, init):\n print(\"step_number:%d, lamb:%f, a:%d, m:%d, T:%d, g:%d\\n\" % (step_number, lamb, a, m, T, g))\n c = np.zeros(step_number + 10) \n\n for i in range(T):\n c[i] = init[i]\n\n for t in range(step_number):\n if(t >= T-1):\n dev = (lamb * a**m * c[t-T+1]) / (a**m + c[t-T+1]**m) - g*c[t]\n c[t+1] = dev + c[t]\n return c[:step_number]\n\n\ndef plot_series(dataset):\n\n set_num = len(dataset)\n \n for i in range(set_num):\n data = dataset[i][0]\n T = dataset[i][1]\n\n plt.subplot(set_num,2, i*2 + 1)\n plt.plot(range(data.size), data)\n plt.title(\"c(t)-t\")\n plt.xlabel(\"day\")\n plt.ylabel(\"c(t)/a\")\n\n plt.subplot(set_num, 2, i*2 + 2)\n plt.plot(data[T:], data[:(data.size - T)])\n plt.title(\"c(t-T)-c(t)\")\n plt.xlabel(\"c(t)/a\")\n plt.ylabel(\"c(t-T)/a\")\n\n plt.tight_layout()\n plt.show()\n\ndef plot_multi(dataset):\n\n set_num = len(dataset)\n hight = int(np.ceil(np.sqrt(set_num)))\n width = int(np.ceil(np.sqrt(set_num)))\n \n cnt = 0\n for i in range(set_num):\n data = dataset[i][0]\n T = dataset[i][1]\n plt.subplot(hight, width,i+1)\n plt.plot(data[T:], data[:(data.size - T)])\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_by_c(data, T):\n x = data[T:]\n y = data[:(data.size - T)] \n\n plt.xlabel(\"c(t)/a\")\n plt.ylabel(\"c(t-T)/a\")\n plt.plot(x, y)\n plt.show()\n\n\ndef plot_by_time(data):\n x = range(data.size)\n y = data\n\n plt.xlabel(\"day\")\n plt.ylabel(\"c(t)/a\")\n plt.plot(x, y)\n plt.show()\n\n\nif __name__ == '__main__':\n ##### params #####\n step_number = 1500\n lamb = 0.2\n a = 1.0\n m = 10\n T = 2\n g = 0.1\n\n init = [a*np.random.rand() for i in range(1000)]\n#init = np.linspace(0.15, 1.15, 100)\n#init = [0.5, 0.8]\n\n#dataset = [ [main(step_number, lamb, a, float(m), 6, g, init[:6]), 6, m], [main(step_number, lamb, a, float(m), 20, g, init[:20]), 20, m]] \n dataset = [ [main(step_number, lamb, a, float(m), T, g, init[:T]), T, m] for T in range(1, 50, 5)] \n#dataset = [ [main(step_number, lamb, a, float(m), T, g, init[:T]), T]] \n\n for data in dataset:\n print(np.array_equal(dataset[0][0], data[0]))\n\n#plot_series(dataset)\n plot_multi(dataset)\n" ]
[ [ "numpy.random.rand", "numpy.array_equal", "numpy.zeros", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.ylabel", "numpy.sqrt", "matplotlib.pyplot.show", "matplotlib.pyplot.subplot" ] ]
zhy0/sig-tsc
[ "a9d01760233f0fbb25d53a73225e9ee7bf53e1b3" ]
[ "model.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\nfrom iisignature import sig, logsig, prepare\nfrom sklearn import preprocessing\n\n\nclass SigModel:\n \"\"\"\n Signature classification/regression model.\n\n Params:\n model: (class) sklearn classification/regression model to use\n level: (int) signature truncation level to use\n transform: (callable) path embedding function, e.g., SigModel.leadlag\n scale: (bool) whether to apply column-wise scaling of signature features\n using to sklearn.preprocessing.scale.\n \"\"\"\n\n def __init__(self, model, level=2, transform=lambda x: x, scale=True, **model_args):\n self.level = level\n self.model = model(**model_args)\n self.transform = transform\n self.scale = scale\n\n def preprocess(self, X):\n \"\"\"\n Preprocess training/testing data using signatures.\n \"\"\"\n data = [sig(self.transform(x), self.level) for x in X]\n if self.scale:\n data = preprocessing.scale(data)\n return data\n\n def train(self, X, Y):\n \"\"\"Train the signature model\"\"\"\n assert len(X) == len(Y)\n self.model.fit(np.array(self.preprocess(X)), Y)\n\n def predict(self, X):\n \"\"\"Predict using trained model\"\"\"\n return self.model.predict(self.preprocess(X))\n\n def score(self, X, Y):\n \"\"\"Output score of trained model, depends on used model\"\"\"\n return self.model.score(self.preprocess(X), Y)\n\n\n @staticmethod\n def time_indexed(X):\n \"\"\"\n Turn 1-dimensional list into 2-dimensional list of points by adding\n list index.\n\n Params:\n X: (list) 1-dimensional list of length N to be transformed\n\n Returns: (list) 2-dimensional list of shape (N, 2)\n \"\"\"\n if not np.shape(X) == (len(X),):\n raise ValueError(\"Input does not have correct shape\")\n\n return np.transpose([np.arange(len(X)), X])\n\n\n @staticmethod\n def lead_lag(X):\n \"\"\"\n Compute lead-lag transformation of 1-dimensional list of values.\n\n Params:\n X: (list) 1-dimensional list of length N to be transformed\n\n Returns: (list) 2-dimensional list of shape (N, 2)\n \"\"\"\n if not np.shape(X) == (len(X),):\n raise ValueError(\"Input does not have correct shape\")\n\n lead = np.transpose([X, X]).flatten()[1:]\n lag = np.transpose([X, X]).flatten()[0:-1]\n return np.transpose([lead, lag])\n\n\n @staticmethod\n def time_joined(X):\n \"\"\"\n Compute time-joined transformation of a path.\n\n Params:\n X: (list) a list of shape (N,2) or (N,) with N length of path; in\n the case of (N,2), the first component of the path must be the\n time index.\n\n Returns: (list) dimensional list of shape (N, 2)\n \"\"\"\n if np.shape(X) == (len(X),):\n # if there is no time index, we simply use the list index\n Y = np.array([np.arange(len(X)), X])\n elif np.shape(X) == (len(X), 2):\n Y = np.transpose(X)\n else:\n raise ValueError(\"Input does not have correct shape\")\n\n t = np.transpose([Y[0], Y[0]]).flatten()\n Z = np.insert(np.transpose([Y[1], Y[1]]).flatten()[0:-1], 0,0)\n return np.transpose([t,Z])\n\n\nclass LogSigModel(SigModel):\n \"\"\"\n Classification/regression model using log signature features.\n\n Params:\n model: (class) sklearn classification/regression model to use\n dim: dimension of transformed path, needed for iisignature\n level: (int) signature truncation level to use\n transform: (callable) path embedding function, e.g., SigModel.leadlag\n scale: (bool) whether to apply column-wise scaling of signature features\n using to sklearn.preprocessing.scale.\n \"\"\"\n\n def __init__(self, model, dim, level=2, transform=lambda x: x, **model_args):\n self.prepared = prepare(dim, level) # iisignature prepare log signature\n super().__init__(model, level, transform, **model_args)\n\n def preprocess(self, X):\n return [logsig(self.transform(x), self.prepared) for x in X]\n" ]
[ [ "sklearn.preprocessing.scale", "numpy.transpose", "numpy.shape" ] ]
feedbackward/mml
[ "d257e0508d75c86c63f01dd6cfe6b48b79e0d4d4" ]
[ "mml/data/protein/protein.py" ]
[ "'''H5 data prep'''\n\n## External modules.\nimport csv\nimport numpy as np\nimport os\nimport tables\n\n## Internal modules.\nfrom mml.config import dir_data_toread\nfrom mml.config import dir_data_towrite\nfrom mml.utils import makedir_safe\n\n\n###############################################################################\n\n\n## Clerical setup.\n\ndata_name = \"protein\"\n\ntoread = os.path.join(dir_data_toread, data_name, \"bio_train.dat\")\nnewdir = os.path.join(dir_data_towrite, data_name)\ntowrite = os.path.join(newdir, \"protein.h5\")\n\nn_all = 145751\nnum_features = 74\nnum_classes = 2\nnum_labels = 1\n\ntitle = data_name+\": Full dataset\"\ntitle_X = data_name+\": Features\"\ntitle_y = data_name+\": Labels\"\n\ndtype_X = np.float32\natom_X = tables.Float32Atom()\ndtype_y = np.uint8\natom_y = tables.UInt8Atom()\n\n\ndef raw_to_h5():\n '''\n Transform the raw dataset into one of HDF5 type.\n '''\n \n X_raw = np.zeros((n_all,num_features), dtype=dtype_X)\n y_raw = np.zeros((n_all,num_labels), dtype=dtype_y)\n \n print(\"Preparation: {}\".format(data_name))\n\n ## Read in the raw data.\n with open(toread, newline=\"\") as f_table:\n\n print(\"Read {}.\".format(toread))\n \n f_reader = csv.reader(f_table, delimiter=\"\\t\")\n \n ## Populate the placeholder numpy arrays.\n i = 0\n for line in f_reader:\n if len(line) > 0:\n X_raw[i,:] = np.array(line[3:],\n dtype=X_raw.dtype)\n y_raw[i,0] = np.array(line[2],\n dtype=y_raw.dtype)\n i += 1\n \n ## Create and populate the HDF5 file.\n makedir_safe(newdir)\n with tables.open_file(towrite, mode=\"w\", title=title) as myh5:\n myh5.create_array(where=myh5.root,\n name=\"X\",\n obj=X_raw,\n atom=atom_X,\n title=title_X)\n myh5.create_array(where=myh5.root,\n name=\"y\",\n obj=y_raw,\n atom=atom_y,\n title=title_y)\n print(myh5)\n\n print(\"Wrote {}.\".format(towrite))\n\n ## Exit all context managers before returning.\n print(\"Done ({}).\".format(data_name))\n return None\n\n\nif __name__ == \"__main__\":\n raw_to_h5()\n\n\n###############################################################################\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
BillyBai/Tensorflow_CNN_Models
[ "03ca57b1adcdb12d02c5ef128c47dd4fd8ba1251" ]
[ "test.py" ]
[ "import tensorflow as tf\r\nimport config\r\nimport numpy as np\r\nimport json\r\nimport time\r\nfrom models.model_factory import get_models\r\nfrom utils.data_utils import load_image, test_10_crop_iterator\r\nfrom utils.train_utils import cross_entropy_batch, l2_loss\r\nfrom utils.augment_utils import test_10_crop\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\n\r\nmodel_path = './result/weight/Inception_ResNetV2_Pretrain_Tricks.h5'\r\nimage_path = './data/MyData/TestData/01_Dry_Box/01_Dry_Box_14.jpg'\r\n\r\n\r\ndef test_one_image(model):\r\n # show\r\n img, _ = load_image(tf.constant(image_path), 0)\r\n prediction = model(np.array([img]), training=False)\r\n label = np.argmax(prediction)\r\n\r\n with open('data/label_to_content.json', 'r') as f:\r\n begin_time = time.time()\r\n label_to_content = f.readline()\r\n label_to_content = json.loads(label_to_content)\r\n\r\n print('-' * 100)\r\n print('Test one image:')\r\n print('image: {}\\nclassification: {}\\nconfidence: {:.4f}'.format(image_path,\r\n label_to_content[str(label)],\r\n prediction[0, label]))\r\n end_time = time.time()\r\n run_time = end_time - begin_time\r\n print('run time:', run_time)\r\n print(np.around(np.squeeze(prediction), decimals=4))\r\n print('-' * 100)\r\n\r\n\r\[email protected]\r\ndef test_step(model, images, labels):\r\n prediction = model(images, training=False)\r\n prediction = tf.reduce_mean(prediction, axis=0)\r\n cross_entropy = cross_entropy_batch([labels], [prediction])\r\n return cross_entropy, prediction\r\n\r\n\r\ndef test(model):\r\n test_data_iterator, test_num = test_10_crop_iterator()\r\n\r\n test_sum_cross_entropy = 0\r\n test_sum_correct_num = 0\r\n for i in range(test_num):\r\n images, labels = test_data_iterator.next()\r\n cross_entropy, prediction = test_step(model, images, labels)\r\n\r\n test_sum_cross_entropy += cross_entropy * config.batch_size\r\n if np.argmax(prediction) == np.argmax(labels):\r\n test_sum_correct_num += 1\r\n message = \"Test 10 crop: cross entropy loss: {:.5f}, \" \\\r\n \"l2 loss: {:.5f}, accuracy: {:.5f}\".format(test_sum_cross_entropy / test_num,\r\n l2_loss(model),\r\n test_sum_correct_num / test_num)\r\n print('-' * 100)\r\n print(message)\r\n print('-' * 100)\r\n return message\r\n\r\n\r\ndef test_all_image_10_crop(model):\r\n # show\r\n message = test(model)\r\n return message\r\n\r\n\r\ndef test_one_image_10_crop(model):\r\n # show\r\n img, _ = load_image(tf.constant(image_path), 0)\r\n img = test_10_crop(img)\r\n prediction = model(img, training=False)\r\n prediction = tf.reduce_mean(prediction, axis=0)\r\n label = np.argmax(prediction)\r\n\r\n with open('data/label_to_content.json', 'r') as f:\r\n begin_time = time.time()\r\n label_to_content = f.readline()\r\n label_to_content = json.loads(label_to_content)\r\n\r\n print('-' * 100)\r\n print('Test one image 10 crop:')\r\n print('image: {}\\nclassification: {}\\nconfidence: {:.4f}'.format(image_path,\r\n label_to_content[str(label)],\r\n prediction[label]))\r\n end_time = time.time()\r\n run_time = end_time - begin_time\r\n print('run time:', run_time)\r\n print(np.around(np.squeeze(prediction), decimals=4))\r\n print('-' * 100)\r\n\r\n\r\n\r\n\r\ndef main():\r\n # get model\r\n print(\"Loading model...\")\r\n model = get_models(\"Inception_ResNetV2_Pretrain\")\r\n model.build(input_shape=(None,) + config.input_shape)\r\n model.load_weights(model_path)\r\n test_one_image(model)\r\n test_one_image_10_crop(model)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "numpy.array", "tensorflow.constant", "numpy.argmax", "tensorflow.reduce_mean", "numpy.squeeze" ] ]
openclimatedata/pymagicc
[ "b6e9c1a34b85ff45b92be8fd03e43d601eaba06e" ]
[ "pymagicc/io/scen.py" ]
[ "import warnings\n\nimport pandas as pd\nfrom six import StringIO\n\nfrom pymagicc.definitions import (\n PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0,\n PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1,\n convert_magicc6_to_magicc7_variables,\n convert_magicc7_to_openscm_variables,\n convert_magicc_to_openscm_regions,\n convert_pint_to_fortran_safe_units,\n)\n\nfrom .base import _EmisInReader, _Writer\nfrom .utils import _strip_emis_variables, get_region_order\n\n\nclass _NonStandardEmisInReader(_EmisInReader):\n def _set_lines(self):\n with self._open_file() as f:\n self.lines = f.readlines()\n\n def read(self):\n self._set_lines()\n self._stream = self._get_stream()\n header_notes_lines = self._read_header()\n df, columns = self.read_data_block()\n header_notes_lines += self._read_notes()\n\n metadata = {\"header\": \"\".join(header_notes_lines)}\n metadata.update(self.process_header(metadata[\"header\"]))\n\n return metadata, df, columns\n\n def _get_stream(self):\n # Create a stream to work with, ignoring any blank lines\n stream = StringIO()\n cleaned_lines = [line.strip() for line in self.lines if line.strip()]\n stream.write(\"\\n\".join(cleaned_lines))\n stream.seek(0)\n\n return stream\n\n def _read_header(self):\n raise NotImplementedError()\n\n def read_data_block(self):\n raise NotImplementedError()\n\n def _read_notes(self):\n raise NotImplementedError()\n\n\ndef get_special_scen_code(regions, emissions):\n \"\"\"\n Get special code for MAGICC6 SCEN files.\n\n At the top of every MAGICC6 and MAGICC5 SCEN file there is a two digit\n number. The first digit, the 'scenfile_region_code' tells MAGICC how many regions\n data is being provided for. The second digit, the 'scenfile_emissions_code', tells\n MAGICC which gases are in the SCEN file.\n\n The variables which are part of ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1`` are the\n emissions species which are expected when scenfile_emissions_code is 1. Similarly,\n ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0`` defines the emissions species which are\n expected when scenfile_emissions_code is 0.\n\n Having these definitions allows Pymagicc to check that the right\n set of emissions has been provided before writing SCEN files.\n\n Parameters\n ----------\n region : list_like\n Regions to get code for.\n\n emissions : list-like\n Emissions to get code for.\n\n Raises\n ------\n ValueError\n If the special scen code cannot be determined.\n\n Returns\n -------\n int\n The special scen code for the regions-emissions combination provided.\n \"\"\"\n if sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0)) == sorted(set(emissions)):\n scenfile_emissions_code = 0\n elif sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1)) == sorted(set(emissions)):\n scenfile_emissions_code = 1\n else:\n msg = \"Could not determine scen special code for emissions {}\".format(emissions)\n raise ValueError(msg)\n\n if set(regions) == set([\"WORLD\"]):\n scenfile_region_code = 1\n elif set(regions) == set([\"WORLD\", \"OECD90\", \"REF\", \"ASIA\", \"ALM\"]):\n scenfile_region_code = 2\n elif set(regions) == set([\"WORLD\", \"R5OECD\", \"R5REF\", \"R5ASIA\", \"R5MAF\", \"R5LAM\"]):\n scenfile_region_code = 3\n elif set(regions) == set(\n [\"WORLD\", \"R5OECD\", \"R5REF\", \"R5ASIA\", \"R5MAF\", \"R5LAM\", \"BUNKERS\"]\n ):\n scenfile_region_code = 4\n try:\n return scenfile_region_code * 10 + scenfile_emissions_code\n except NameError:\n msg = \"Could not determine scen special code for regions {}\".format(regions)\n raise ValueError(msg)\n\n\nclass _ScenReader(_NonStandardEmisInReader):\n def read(self):\n metadata, df, columns = super().read()\n columns[\"scenario\"] = metadata.pop(\"scenario\")\n\n return metadata, df, columns\n\n def _read_header(self):\n # I don't know how to do this without these nasty while True statements\n header_notes_lines = []\n end_of_notes_key = \"WORLD\"\n while True:\n prev_pos = self._stream.tell()\n line = self._stream.readline()\n if not line:\n raise ValueError(\n \"Reached end of file without finding {} which should \"\n \"always be the first region in a SCEN file\".format(end_of_notes_key)\n )\n\n if line.startswith(end_of_notes_key):\n self._stream.seek(prev_pos)\n break\n\n header_notes_lines.append(line)\n\n return header_notes_lines\n\n def process_header(self, header):\n \"\"\"\n Parse the header for additional metadata.\n\n Parameters\n ----------\n header : str\n All the lines in the header.\n\n Returns\n -------\n dict\n The metadata in the header.\n \"\"\"\n metadata = {\"header\": []}\n for i, line in enumerate(header.split(\"\\n\")):\n line = line.strip()\n if i < 2:\n continue # top level keys, ignore\n if i == 2:\n metadata[\"scenario\"] = line.replace(\"name: \", \"\")\n elif i == 3:\n metadata[\"description\"] = line.replace(\"description: \", \"\")\n elif i == 4:\n metadata[\"notes\"] = line.replace(\"notes: \", \"\")\n else:\n if line:\n metadata[\"header\"].append(line)\n\n metadata[\"header\"] = \"\\n\".join(metadata[\"header\"])\n return metadata\n\n def read_data_block(self):\n number_years = int(self.lines[0].strip())\n\n # go through datablocks until there are none left\n while True:\n ch = {}\n pos_block = self._stream.tell()\n region = convert_magicc_to_openscm_regions(self._stream.readline().strip())\n\n try:\n variables = self._read_data_header_line(\n self._stream, [\"Years\", \"Year\", \"YEARS\", \"YEAR\"]\n )\n except IndexError: # tried to get variables from empty string\n break\n except AssertionError: # tried to get variables from a notes line\n break\n\n variables = convert_magicc6_to_magicc7_variables(variables)\n ch[\"variable\"] = convert_magicc7_to_openscm_variables(\n [v + \"_EMIS\" for v in variables]\n )\n\n ch[\"unit\"] = self._read_data_header_line(self._stream, [\"Yrs\", \"YEARS\"])\n\n ch = self._read_units(ch)\n ch[\"todo\"] = [\"SET\"] * len(variables)\n ch[\"region\"] = [region] * len(variables)\n\n region_block = StringIO()\n for i in range(number_years):\n region_block.write(self._stream.readline())\n region_block.seek(0)\n\n region_df = self._convert_data_block_to_df(region_block)\n\n try:\n df = pd.concat([region_df, df], axis=\"columns\")\n columns = {key: ch[key] + columns[key] for key in columns}\n except NameError:\n df = region_df\n columns = ch\n\n self._stream.seek(pos_block)\n\n try:\n return df, columns\n except NameError:\n error_msg = (\n \"This is unexpected, please raise an issue on \"\n \"https://github.com/openscm/pymagicc/issues\"\n )\n raise Exception(error_msg)\n\n def _read_notes(self):\n notes = []\n while True:\n line = self._stream.readline()\n if not line:\n break\n notes.append(line)\n\n return notes\n\n\nclass _ScenWriter(_Writer):\n SCEN_VARS_CODE_0 = convert_magicc7_to_openscm_variables(\n [v + \"_EMIS\" for v in PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0]\n )\n SCEN_VARS_CODE_1 = convert_magicc7_to_openscm_variables(\n [v + \"_EMIS\" for v in PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1]\n )\n\n def write(self, magicc_input, filepath):\n orig_length = len(magicc_input)\n orig_vars = magicc_input[\"variable\"]\n\n if not (set(self.SCEN_VARS_CODE_1) - set(orig_vars)):\n magicc_input.filter(variable=self.SCEN_VARS_CODE_1, inplace=True)\n elif not (set(self.SCEN_VARS_CODE_0) - set(orig_vars)):\n magicc_input.filter(variable=self.SCEN_VARS_CODE_0, inplace=True)\n if len(magicc_input) != orig_length:\n warnings.warn(\"Ignoring input data which is not required for .SCEN file\")\n\n super().write(magicc_input, filepath)\n\n def _write_header(self, output):\n header_lines = []\n header_lines.append(\"{}\".format(len(self.data_block)))\n\n variables = self._get_df_header_row(\"variable\")\n variables = convert_magicc7_to_openscm_variables(variables, inverse=True)\n variables = _strip_emis_variables(variables)\n\n regions = self._get_df_header_row(\"region\")\n regions = convert_magicc_to_openscm_regions(regions, inverse=True)\n regions = self._ensure_file_region_type_consistency(regions)\n\n special_scen_code = get_special_scen_code(regions=regions, emissions=variables)\n\n header_lines.append(\"{}\".format(special_scen_code))\n\n # for a scen file, the convention is (although all these lines are\n # actually ignored by source so could be anything):\n # - line 3 is name\n # - line 4 is description\n # - line 5 is notes (other notes lines go at the end)\n # - line 6 is empty\n header_lines.append(\"name: {}\".format(self.minput[\"scenario\"].unique()[0]))\n header_lines.append(\n \"description: {}\".format(\n self.minput.metadata.pop(\n \"description\", \"metadata['description'] is written here\"\n )\n )\n )\n header_lines.append(\n \"notes: {}\".format(\n self.minput.metadata.pop(\"notes\", \"metadata['notes'] is written here\")\n )\n )\n header_lines.append(\"\")\n\n try:\n header_lines.append(self.minput.metadata.pop(\"header\"))\n except KeyError:\n pass\n for k, v in self.minput.metadata.items():\n header_lines.append(\"{}: {}\".format(k, v))\n\n output.write(self._newline_char.join(header_lines))\n output.write(self._newline_char)\n\n return output\n\n def _write_namelist(self, output):\n # No namelist for SCEN files\n return output\n\n def _write_datablock(self, output):\n # for SCEN files, the data format is vitally important for the source code\n # we have to work out a better way of matching up all these conventions/testing them, tight coupling between pymagicc and MAGICC may solve it for us...\n lines = output.getvalue().split(self._newline_char)\n # notes are everything except the first 6 lines\n number_notes_lines = len(lines) - 6\n\n def _gip(lines, number_notes_lines):\n \"\"\"\n Get the point where we should insert the data block.\n \"\"\"\n return len(lines) - number_notes_lines\n\n region_order_db = get_region_order(\n self._get_df_header_row(\"region\"), scen7=self._scen_7\n )\n region_order_magicc = self._ensure_file_region_type_consistency(region_order_db)\n # format is vitally important for SCEN files as far as I can tell\n time_col_length = 11\n first_col_format_str = (\"{\" + \":{}d\".format(time_col_length) + \"}\").format\n other_col_format_str = \"{:10.4f}\".format\n\n # TODO: doing it this way, out of the loop, should ensure things\n # explode if your regions don't all have the same number of emissions\n # timeseries or does extra timeseries in there (that probably\n # shouldn't raise an error, another one for the future), although the\n # explosion will be cryptic so should add a test for good error\n # message at some point\n formatters = [other_col_format_str] * (\n int(len(self.data_block.columns) / len(region_order_db))\n + 1 # for the years column\n )\n formatters[0] = first_col_format_str\n\n variables = convert_magicc7_to_openscm_variables(\n self._get_df_header_row(\"variable\"), inverse=True\n )\n variables = _strip_emis_variables(variables)\n\n special_scen_code = get_special_scen_code(\n regions=region_order_magicc, emissions=variables\n )\n if special_scen_code % 10 == 0:\n variable_order = PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0\n else:\n variable_order = PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1\n\n for region_db, region_magicc in zip(region_order_db, region_order_magicc):\n region_block_region = convert_magicc_to_openscm_regions(region_db)\n region_block = self.data_block.xs(\n region_block_region, axis=1, level=\"region\", drop_level=False\n )\n region_block.columns = region_block.columns.droplevel(\"todo\")\n region_block.columns = region_block.columns.droplevel(\"region\")\n\n variables = region_block.columns.levels[0]\n variables = convert_magicc7_to_openscm_variables(variables, inverse=True)\n region_block.columns = region_block.columns.set_levels(\n levels=_strip_emis_variables(variables), level=\"variable\",\n )\n\n region_block = region_block.reindex(\n variable_order, axis=1, level=\"variable\"\n )\n\n variables = region_block.columns.get_level_values(\"variable\").tolist()\n variables = convert_magicc6_to_magicc7_variables(\n [v.replace(\"_EMIS\", \"\") for v in variables], inverse=True\n )\n\n units = convert_pint_to_fortran_safe_units(\n region_block.columns.get_level_values(\"unit\").tolist()\n )\n # column widths don't work with expressive units\n units = [u.replace(\"_\", \"\").replace(\"peryr\", \"\") for u in units]\n\n if not (region_block.columns.names == [\"variable\", \"unit\"]):\n raise AssertionError(\n \"Unexpected region block columns: \"\n \"{}\".format(region_block.columns.names)\n )\n\n region_block = region_block.rename(columns=str).reset_index()\n region_block.columns = [[\"YEARS\"] + variables, [\"Yrs\"] + units]\n\n region_block_str = region_magicc + self._newline_char\n region_block_str += region_block.to_string(\n index=False, formatters=formatters, sparsify=False\n )\n region_block_str += self._newline_char * 2\n\n lines.insert(_gip(lines, number_notes_lines), region_block_str)\n\n output.seek(0)\n output.write(self._newline_char.join(lines))\n return output\n\n def _ensure_file_region_type_consistency(self, regions):\n magicc7_regions_mapping = {\n r: r.replace(\"R5.2\", \"R5\")\n for r in [\"R5.2ASIA\", \"R5.2LAM\", \"R5.2REF\", \"R5.2MAF\", \"R5.2OECD\"]\n }\n\n if not any([r in regions for r in magicc7_regions_mapping]):\n return regions\n\n new_regions = [\n magicc7_regions_mapping[r] if r in magicc7_regions_mapping else r\n for r in regions\n ]\n warn_msg = (\n \"MAGICC7 RCP region naming (R5.2*) is not compatible with \"\n \"MAGICC6, automatically renaming to MAGICC6 compatible regions \"\n \"(R5*)\"\n )\n warnings.warn(warn_msg)\n\n return new_regions\n" ]
[ [ "pandas.concat" ] ]
noe/sparsely_factored_nmt
[ "b4cf93f80a15f22d7bc45830120e6aba8d5b2459" ]
[ "src/morphodropout/dataset.py" ]
[ "import torch\nfrom itertools import accumulate\nfrom fairseq.data import (\n data_utils,\n FairseqDataset,\n Dictionary,\n IdDataset,\n NestedDictionaryDataset,\n NumelDataset,\n NumSamplesDataset,\n)\nfrom functools import lru_cache\nimport numpy as np\nfrom seqp.hdf5 import Hdf5RecordReader\nfrom typing import List\n\nfrom morphodropout.binarize import (\n SRC_SUBWORD_KEY,\n SRC_SUBWORD_LENGTHS_KEY,\n SRC_MORPH_KEY,\n SRC_MORPH_LENGTHS_KEY,\n SRC_LEMMA_KEY,\n)\n\n\nclass MorphoDataset(FairseqDataset):\n \"\"\"A dataset that provides helpers for batching.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n data_files: List[str],\n morpho_dropout: float = 0.5):\n self.dictionary = dictionary\n self.pad_idx = dictionary.pad_index\n self.data_files = data_files\n self.reader = Hdf5RecordReader(data_files)\n self.morpho_dropout = morpho_dropout\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # Don't pickle the reader (see https://github.com/h5py/h5py/issues/1092)\n del state[\"reader\"]\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n # Add reader back since it doesn't exist in the pickle\n self.reader = Hdf5RecordReader(self.data_files)\n\n @lru_cache(maxsize=8)\n def __getitem__(self, index):\n record = self.reader.retrieve(index)\n lemmas = record[SRC_LEMMA_KEY]\n subwords = record[SRC_SUBWORD_KEY]\n morphos = record[SRC_MORPH_KEY]\n morpho_lengths = record[SRC_MORPH_LENGTHS_KEY].tolist()\n sw_lengths = record[SRC_SUBWORD_LENGTHS_KEY].tolist()\n\n use_subwords = np.random.binomial(size=len(lemmas), n=1, p=self.morpho_dropout).astype(bool)\n EOS_POS = -1\n sw_pos = list(accumulate(sw_lengths))\n morpho_pos = list(accumulate(morpho_lengths))\n start_end = zip([0] + sw_pos[:EOS_POS - 1],\n sw_pos[:EOS_POS],\n [0] + morpho_pos[:-1],\n morpho_pos)\n\n pad = [self.dictionary.pad_index]\n eos = [self.dictionary.eos_index]\n\n result: List[np.ndarray] = list()\n max_depth = 0\n for k, (sw_start, sw_end, morpho_start, morpho_end) in enumerate(start_end):\n if use_subwords[k] or lemmas[k] in [self.dictionary.unk_index, self.dictionary.pad_index]:\n word_subwords = subwords[sw_start:sw_end]\n result.extend([np.array([sw], dtype=np.int64) for sw in word_subwords])\n max_depth = max(max_depth, 1)\n else:\n clean_morphos = [m if m != self.dictionary.unk_index else self.dictionary.pad_index\n for m in morphos[morpho_start:morpho_end]]\n word_linguistic_info = [lemmas[k]] + clean_morphos\n result.append(np.array(word_linguistic_info, dtype=np.int64))\n max_depth = max(max_depth, len(word_linguistic_info))\n result.append(np.array(eos, dtype=np.int64))\n\n # Add padding and convert to tensors\n result = [torch.LongTensor(np.concatenate((r, pad * (max_depth - len(r))))) for r in result]\n\n # Combine padded tensors into a single 2d tensor\n result = torch.stack(result)\n\n return result\n\n def __len__(self):\n return self.reader.num_records()\n\n def num_tokens(self, index):\n return self.reader.length(index)\n\n def size(self, index):\n return self.reader.length(index)\n\n def ordered_indices(self):\n return [idx for idx, length in self.reader.indexes_and_lengths()]\n\n @property\n def sizes(self):\n return [length for idx, length in self.reader.indexes_and_lengths()]\n\n def get_dummy_batch(self, num_tokens, max_positions):\n return self.dictionary.dummy_sentence(num_tokens).unsqueeze(dim=2)\n\n @property\n def supports_prefetch(self):\n return False\n\n def prefetch(self, indices):\n raise NotImplementedError\n\n def collater(self, samples):\n size = max(v.size(0) for v in samples)\n depth = max(v.size(1) for v in samples)\n res = samples[0].new(len(samples), size, depth).fill_(self.pad_idx)\n\n def copy_tensor(src, dst):\n assert dst.numel() == src.numel()\n dst.copy_(src)\n\n for i, v in enumerate(samples):\n copy_tensor(v, res[i, :v.size(0), :v.size(1)])\n return res\n\n\nclass MonolingualDataset(FairseqDataset):\n \"\"\"A dataset that provides helpers for batching.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n data_files: List[str],\n left_pad=False,\n move_eos_to_beginning=False):\n self.dictionary = dictionary\n self.data_files = data_files\n self.reader = Hdf5RecordReader(data_files)\n self.left_pad = left_pad\n self.move_eos_to_beginning = move_eos_to_beginning\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # Don't pickle the reader (see https://github.com/h5py/h5py/issues/1092)\n del state[\"reader\"]\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n # Add reader back since it doesn't exist in the pickle\n self.reader = Hdf5RecordReader(self.data_files)\n\n def __getitem__(self, index):\n elem = self.reader.retrieve(index)\n elem = torch.LongTensor(elem.astype(np.int64))\n return elem\n\n def __len__(self):\n return self.reader.num_records()\n\n def collater(self, samples):\n tokens = data_utils.collate_tokens(\n [s for s in samples],\n self.dictionary.pad_index,\n self.dictionary.eos_index,\n self.left_pad,\n move_eos_to_beginning=self.move_eos_to_beginning)\n return tokens\n\n def get_dummy_batch(self, num_tokens, max_positions):\n return self.dictionary.dummy_sentence(num_tokens)\n\n def num_tokens(self, index):\n return self.reader.length(index)\n\n def size(self, index):\n return self.reader.length(index)\n\n def ordered_indices(self):\n return [idx for idx, length in self.reader.indexes_and_lengths()]\n\n @property\n def sizes(self):\n return [length for idx, length in self.reader.indexes_and_lengths()]\n\n @property\n def supports_prefetch(self):\n return False\n\n def prefetch(self, indices):\n raise NotImplementedError\n\n\ndef build_combined_dataset(\n src_dictionary: Dictionary,\n src_data_files: List[str],\n src_morpho_dropout: float,\n tgt_dictionary: Dictionary,\n tgt_hdf5_files: List[str],\n seed: int,\n epoch: int,\n ) -> FairseqDataset:\n\n src = MorphoDataset(src_dictionary,\n src_data_files,\n src_morpho_dropout)\n\n target = MonolingualDataset(tgt_dictionary,\n tgt_hdf5_files,\n move_eos_to_beginning=False)\n\n prev_output_tokens = MonolingualDataset(tgt_dictionary,\n tgt_hdf5_files,\n move_eos_to_beginning=True)\n\n return NestedDictionaryDataset(\n {\n 'id': IdDataset(),\n 'net_input': {\n 'src_tokens': src,\n 'src_lengths': NumelDataset(src, reduce=False),\n 'prev_output_tokens': prev_output_tokens,\n },\n 'target': target,\n 'nsentences': NumSamplesDataset(),\n 'ntokens': NumelDataset(src, reduce=True),\n },\n sizes=[src.sizes],\n )\n" ]
[ [ "numpy.array", "torch.stack" ] ]
Dapid/contact-vis
[ "60ad8afc739aba8abe8f7857c932b1902645d97f" ]
[ "contactvis/parsing/parse_pdb.py" ]
[ "import sys\nimport operator\nimport numpy as np\nfrom collections import defaultdict\n\n\ndef parse_atm_record(line):\n\n record = defaultdict()\n record['name'] = line[0:6].strip()\n record['atm_no'] = int(line[6:11])\n record['atm_name'] = line[12:16].strip()\n record['res_name'] = line[17:20].strip()\n record['chain'] = line[21]\n record['res_no'] = int(line[22:26])\n record['insert'] = line[26].strip()\n record['x'] = float(line[30:38])\n record['y'] = float(line[38:46])\n record['z'] = float(line[46:54])\n record['occ'] = float(line[54:60])\n record['B'] = float(line[60:66])\n \n return record\n\n\ndef read(pdbfile):\n\n header = ''\n res_lst = []\n atm_lst = []\n tail = ''\n\n seen_atoms = False\n curr_resi = 0\n prev_resi = 0\n \n for line in pdbfile:\n if not line.startswith('ATOM') and not seen_atoms:\n header += line\n elif not line.startswith('ATOM') and seen_atoms:\n tail += line\n else:\n atm_record = parse_atm_record(line)\n if not seen_atoms:\n curr_resi = atm_record['res_no']\n prev_resi = curr_resi\n seen_atoms = True\n curr_resi = atm_record['res_no']\n if curr_resi == prev_resi:\n atm_lst.append(line)\n else:\n #atm_lst.append(line)\n res_lst.append(atm_lst)\n atm_lst = [line]\n prev_resi = curr_resi\n res_lst.append(atm_lst)\n \n pdbfile.close()\n pdb_lst = [header, res_lst, tail]\n return pdb_lst\n\n\ndef write(pdb_lst, outfile):\n\n outfile.write(pdb_lst[0])\n\n for res in pdb_lst[1]:\n for atm in res:\n outfile.write(atm)\n \n outfile.write(pdb_lst[2])\n outfile.close()\n\n\ndef get_coordinates(pdbfile, chain):\n\n res_dict = defaultdict(list)\n\n if not chain:\n chain = get_first_chain(pdbfile)\n pdbfile.seek(0)\n\n for line in pdbfile:\n if not line.startswith('ATOM'):\n continue\n atm_record = parse_atm_record(line)\n if atm_record['chain'] != ' ' and atm_record['chain'] != chain:\n continue\n\n res_i = atm_record['res_no']\n atm = [atm_record['x'], atm_record['y'], atm_record['z']]\n\n \"\"\"\n line_arr = line.split()\n #print line_arr\n\n if line_arr[2].startswith('H'):\n continue\n\n if len(line_arr[2]) > 4:\n if line_arr[3] != chain:\n continue\n try:\n res_i = int(line_arr[4])\n except ValueError as exc:\n continue\n try:\n atm = map(float, line_arr[5:8])\n except ValueError as exc:\n atm = [float('inf'), float('inf'), float('inf')]\n else:\n if line_arr[4] != chain:\n continue\n try:\n res_i = int(line_arr[5])\n except ValueError as exc:\n continue\n try:\n atm = map(float, line_arr[6:9])\n except ValueError as exc:\n atm = [float('inf'), float('inf'), float('inf')]\n \"\"\"\n\n res_dict[res_i].append(np.array(atm))\n \n pdbfile.close()\n return sorted(res_dict.iteritems(), key=operator.itemgetter(0))\n\n\ndef get_cb_coordinates(pdbfile, chain):\n\n cb_lst = []\n res_dict = defaultdict(list)\n\n if not chain:\n chain = get_first_chain(pdbfile)\n pdbfile.seek(0)\n\n for line in pdbfile:\n if not line.startswith('ATOM'):\n continue\n atm_record = parse_atm_record(line)\n\n if atm_record['chain'] != ' ' and atm_record['chain'] != chain:\n continue\n\n res_i = atm_record['res_no']\n \n atm = [float('inf'), float('inf'), float('inf')]\n\n if atm_record['atm_name'] == 'CA':\n atm = [atm_record['x'], atm_record['y'], atm_record['z']]\n res_dict[res_i].append(np.array(atm)) \n elif atm_record['atm_name'] == 'CB':\n atm = [atm_record['x'], atm_record['y'], atm_record['z']]\n res_dict[res_i].append(np.array(atm)) \n \n cb_lst = []\n num_res = len(res_dict)+2\n tmp_i = 0\n for i in res_dict.keys():\n if len(res_dict[i]) > 1:\n tmp_i += 1\n cb_lst.append(res_dict[i][-1])\n elif len(res_dict[i]) == 1:\n tmp_i += 1\n cb_lst.append(res_dict[i][0])\n #print atm_count \n pdbfile.close()\n return cb_lst\n\n\ndef get_atom_seq(pdbfile, chain):\n\n three_to_one = {'ARG':'R', 'HIS':'H', 'LYS':'K', 'ASP':'D', 'GLU':'E', 'SER':'S', 'THR':'T', 'ASN':'N', 'GLN':'Q', 'CYS':'C', 'GLY':'G', 'PRO':'P', 'ALA':'A', 'ILE':'I', 'LEU':'L', 'MET':'M', 'PHE':'F', 'TRP':'W', 'TYR':'Y', 'VAL':'V', 'UNK': 'X'}\n res_dict = {}\n \n if not chain:\n chain = get_first_chain(pdbfile)\n pdbfile.seek(0)\n\n res_name = ''\n for line in pdbfile:\n if not line.startswith('ATOM'):\n continue\n atm_record = parse_atm_record(line)\n if atm_record['chain'] != ' ' and atm_record['chain'] != chain:\n continue\n\n res_i = atm_record['res_no']\n \n if atm_record['insert'] == 'X':\n res_i = res_i * 0.001\n \n #print atm_record['res_name']\n if atm_record['res_name'] in three_to_one:\n #res_name = three_to_one[atm_record['res_name']]\n #print res_name\n res_name = three_to_one[atm_record['res_name']]\n #else:\n #res_name = ''\n #continue\n\n res_dict[res_i] = res_name\n\n res_lst = sorted(res_dict.iteritems(), key=operator.itemgetter(0))\n atom_seq = ''\n\n for res in res_lst:\n atom_seq += res[1]\n\n pdbfile.close()\n return atom_seq\n\n\ndef get_first_chain(pdbfile):\n\n for line in pdbfile:\n if not line.startswith('ATOM'):\n continue\n atm_record = parse_atm_record(line)\n break\n\n return atm_record['chain']\n \n\n\n\nif __name__ == '__main__':\n\n pdbfile = open(sys.argv[1], 'r')\n chain = sys.argv[2]\n #print get_atom_seq(pdbfile, chain)\n pdbfile.close()\n #pdbfile = open(sys.argv[1], 'r')\n #print get_coordinates(pdbfile)\n #pdbfile.close()\n" ]
[ [ "numpy.array" ] ]
crisostomi/augmented-graphs-evaluator
[ "2ed3d9c0fcb7d30d6f45c048df21aee965a1fa73" ]
[ "src/age/modules/baseline.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.data import Batch\nfrom torch_geometric.nn import global_mean_pool\nfrom torch_geometric.nn.conv.gat_conv import GATConv\n\nfrom age.modules.mlp import MLP\n\n\nclass GraphClassifier(nn.Module):\n def __init__(\n self,\n feature_dim,\n hidden_dim,\n output_dim,\n num_classes,\n num_mlp_layers,\n ):\n super().__init__()\n\n self.feature_dim = feature_dim\n self.hidden_dim = hidden_dim\n self.num_classes = num_classes\n self.output_dim = output_dim\n\n self.conv1 = GATConv(in_channels=self.feature_dim, out_channels=self.hidden_dim)\n self.conv2 = GATConv(in_channels=self.hidden_dim, out_channels=self.hidden_dim)\n\n self.mlp = MLP(\n num_layers=num_mlp_layers,\n input_dim=self.hidden_dim,\n output_dim=self.hidden_dim,\n hidden_dim=self.hidden_dim,\n )\n\n self.hidden_to_logits = MLP(\n num_layers=num_mlp_layers,\n input_dim=self.hidden_dim,\n output_dim=self.num_classes,\n hidden_dim=self.hidden_dim,\n )\n\n def _get_logits(self, batch: Batch):\n\n # X ~ (num_nodes_in_batch, feature_dim)\n # edge_index ~ (2, num_edges_in_batch)\n X, edge_index = batch.x, batch.edge_index\n\n # h1 ~ (num_nodes_in_batch, hidden_dim)\n h1 = self.conv1(X, edge_index)\n h1 = F.relu(h1)\n\n # h2 ~ (num_nodes_in_batch, hidden_dim)\n h2 = self.conv2(h1, edge_index)\n h2 = F.relu(h2)\n\n # out ~ (num_nodes_in_batch, output_dim)\n node_out_features = self.mlp(h2)\n\n # pooled_out ~ (num_samples_in_batch, output_dim)\n pooled_out = global_mean_pool(node_out_features, batch.batch)\n\n logits = self.hidden_to_logits(pooled_out)\n\n return logits\n\n def forward(self, batch):\n \"\"\"\n :param supports:\n :return:\n \"\"\"\n\n logits = self._get_logits(batch)\n\n return logits\n" ]
[ [ "torch.nn.functional.relu" ] ]
amirbiran/openrec
[ "69a1c57a7a1eec49720b776279b9120b80630ba2" ]
[ "openrec/legacy/utils/evaluators/auc.py" ]
[ "import numpy as np\nfrom openrec.legacy.utils.evaluators import Evaluator\n\nclass AUC(Evaluator):\n\n def __init__(self, name='AUC'):\n \n super(AUC, self).__init__(etype='rank', name=name)\n\n def compute(self, rank_above, negative_num):\n\n return np.mean((negative_num - rank_above) / negative_num)" ]
[ [ "numpy.mean" ] ]
Genpeng/order-sales-forecast
[ "c89269817a0d936900e5deb9bcc2f9a0d885382d" ]
[ "util/pandas_util.py" ]
[ "# _*_ coding: utf-8 _*_\n\n\"\"\"\nSome useful utility functions about pandas.\n\nAuthor: Genpeng Xu\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef row_normalization(df):\n \"\"\"Normalize each row of data.\"\"\"\n df_bak = df.copy()\n rnames = list(df_bak.index)\n scalers = dict()\n for rn in rnames:\n scaler = MinMaxScaler().fit(df_bak.loc[rn].values.reshape(-1, 1))\n df_bak.loc[rn] = scaler.transform(df_bak.loc[rn].values.reshape(-1, 1)).ravel()\n scalers[rn] = scaler\n return df_bak, scalers\n\n\ndef row_restore(df_normalized, scalers):\n \"\"\"Convert data back from normalized values.\"\"\"\n df_bak = df_normalized.copy()\n rnames = list(df_bak.index)\n for rn in rnames:\n scaler = scalers[rn]\n df_bak.loc[rn] = scaler.inverse_transform(df_bak.loc[rn].values.reshape(-1, 1)).ravel()\n return df_bak\n" ]
[ [ "sklearn.preprocessing.MinMaxScaler" ] ]
husencd/DriverPostureClassification
[ "91dbcffdea82527a26fbc840961970b4671a2a85" ]
[ "utils.py" ]
[ "import csv\nimport torch\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass Logger(object):\n \"\"\"Outputs log files\"\"\"\n def __init__(self, path, header):\n self.log_file = open(path, 'w')\n self.logger = csv.writer(self.log_file, delimiter='\\t')\n self.logger.writerow(header)\n self.header = header\n\n def __del(self):\n self.log_file.close()\n\n def log(self, values):\n write_values = []\n for col in self.header:\n assert col in values\n write_values.append(values[col])\n\n self.logger.writerow(write_values)\n self.log_file.flush()\n\n\ndef calculate_accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, dim=1, largest=True, sorted=True) # batch_size x maxk\n pred = pred.t() # transpose, maxk x batch_size\n # target.view(1, -1): convert (batch_size,) to 1 x batch_size\n # expand_as: convert 1 x batch_size to maxk x batch_size\n correct = pred.eq(target.view(1, -1).expand_as(pred)) # maxk x batch_size\n\n res = []\n for k in topk:\n # correct[:k] converts \"maxk x batch_size\" to \"k x batch_size\"\n # view(-1) converts \"k x batch_size\" to \"(k x batch_size,)\"\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n" ]
[ [ "torch.no_grad" ] ]
daniagudelos/contact_tracing
[ "dd8e54b518393d87cc6aa7020c13ed11ea94c33b" ]
[ "optimizer/optimizer.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 16 10:48:11 2021\n\n@author: saitel\n\"\"\"\nfrom parameters.parameters import TestParameters1\nfrom parameters.parameters import TestParameters2\nfrom periodic.reproduction_number_case1 import ReproductionNumberCalculator as rn_case1\nfrom periodic.reproduction_number_case2 import ReproductionNumberCalculator as rn_case2\nfrom scipy.optimize import minimize # differential_evolution\nfrom scipy.optimize import NonlinearConstraint, Bounds\nfrom helper.exporter import Exporter\nfrom multiprocessing import Pool\nimport logging\nimport numpy as np\n\n\ndef min_case1():\n logger = logging.getLogger('min_case1')\n formatter = logging.Formatter('%(asctime)s %(message)s',\n '%m/%d/%Y %I:%M:%S %p')\n fh = logging.FileHandler(\n '/home/saitel/TUM/Thesis/Code/min_case1_SLSQP.log')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # period\n T = 7\n # initial beta\n beta0 = np.array([1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8,\n 1e-8, 1e-8, 1e-8, 461, 1e-8, 1e-8, 1e-8, 1e-8,\n 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8,\n 1e-8, 1e-8, 1e-8, 1e-8])\n # initial parameters\n par = TestParameters1(beta0, p=1/3, h=0.25, period_time=T)\n\n # Calculators\n rnc_nct = rn_case1(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n rnc_ot_bct = rn_case1(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n\n R_0 = rnc_nct.calculateReproductionNumber\n R_p = rnc_ot_bct.calculateReproductionNumber\n\n # Ro constraint\n def constrain(beta):\n return R_0(beta, 0)\n\n cons = NonlinearConstraint(constrain, 3, 3)\n\n # Positive boundary\n bounds = Bounds([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf])\n logger.info('START SLSQP')\n\n result = minimize(R_p, beta0, bounds=bounds, method='SLSQP',\n constraints=cons, args=(1,),\n options={'maxiter': 1000, 'ftol': 1.0e-8, 'eps': 1e-06})\n Exporter.save_variable(result, 'min_case1_result')\n logger.info('Success %s', str(result.success))\n logger.info('STOP SLSQP')\n\n return result\n\n\ndef max_case1():\n logger = logging.getLogger('max_case1')\n formatter = logging.Formatter('%(asctime)s %(message)s',\n '%m/%d/%Y %I:%M:%S %p')\n fh = logging.FileHandler(\n '/home/saitel/TUM/Thesis/Code/max_case1_SLSQP.log')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # period\n T = 7\n # initial beta\n beta0 = np.array([0.835, 0.835, 0.835, 0.835, 0.835, 0.835, 0.835, 0.835,\n 0.835, 0.835, 0.835, 0.835, 0.835, 0.835, 0.835, 0.835,\n 0.835, 0.835, 0.835, 0.835, 0.835, 0.835, 0.835, 0.835,\n 0.835, 0.835, 0.835, 0.835])\n\n # initial parameters\n par = TestParameters1(beta0, p=1/3, h=0.25, period_time=T)\n\n # Calculators\n rnc_nct = rn_case1(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n rnc_ot_bct = rn_case1(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n\n R_0 = rnc_nct.calculateReproductionNumber\n\n def R_p(beta, tracing_type):\n rp = -1 * rnc_ot_bct.calculateReproductionNumber(beta, tracing_type)\n logger.info('rp %s', str(rp))\n return rp\n\n # Ro constraint\n def constrain(beta):\n return R_0(beta, 0)\n\n cons = NonlinearConstraint(constrain, 3, 3)\n\n # Positive boundary\n bounds = Bounds([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,\n 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20])\n logger.info('START SLSQP')\n\n result = minimize(R_p, beta0, bounds=bounds, method='SLSQP',\n constraints=cons, args=(1,),\n options={'maxiter': 1000, 'ftol': 1.0e-8, 'eps': 1e-06})\n Exporter.save_variable(result, 'max_case1_result')\n logger.info('Success %s', str(result.success))\n logger.info('STOP SLSQP')\n\n return result\n\n\ndef min_case2():\n logger = logging.getLogger('min_case2')\n formatter = logging.Formatter('%(asctime)s %(message)s',\n '%m/%d/%Y %I:%M:%S %p')\n fh = logging.FileHandler(\n '/home/saitel/TUM/Thesis/Code/min_case2_SLSQP.log')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # period\n T = 7\n # initial beta\n beta0 = np.array([1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8,\n 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8,\n 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 1e-8, 78.5,\n 1e-8, 1e-8, 1e-8, 1e-8])\n\n # initial parameters\n par = TestParameters2(beta0, p=1/3, h=0.25, period_time=T)\n\n # Calculators\n rnc_nct = rn_case2(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n rnc_ot_bct = rn_case2(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n\n R_0 = rnc_nct.calculateReproductionNumber\n R_p = rnc_ot_bct.calculateReproductionNumber\n\n # Ro constraint\n def constrain(beta):\n return R_0(beta, 0)\n\n cons = NonlinearConstraint(constrain, 3, 3)\n\n # Positive boundary\n bounds = Bounds([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf,\n np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.inf])\n logger.info('START SLSQP')\n\n result = minimize(R_p, beta0, bounds=bounds, method='SLSQP',\n constraints=cons, args=(1,),\n options={'maxiter': 1000, 'ftol': 1.0e-8, 'eps': 1e-06})\n Exporter.save_variable(result, 'min_case2_result')\n logger.info('Success %s', str(result.success))\n logger.info('STOP SLSQP')\n\n return result\n\n\ndef max_case2():\n logger = logging.getLogger('max_case2')\n formatter = logging.Formatter('%(asctime)s %(message)s',\n '%m/%d/%Y %I:%M:%S %p')\n fh = logging.FileHandler(\n '/home/saitel/TUM/Thesis/Code/max_case2_SLSQP.log')\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # period\n T = 7\n # initial beta\n beta0 = np.array([0.52, 0.52, 0.52, 0.52, 0.52, 0.52, 0.52, 0.52,\n 0.52, 0.52, 0.52, 0.52, 0.52, 0.52, 0.52, 0.52,\n 0.52, 0.52, 0.52, 0.52, 0.52, 0.52, 0.52, 0.52,\n 0.52, 0.52, 0.52, 0.52])\n\n # initial parameters\n par = TestParameters2(beta0, p=1/3, h=0.25, period_time=T)\n\n # Calculators\n rnc_nct = rn_case2(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n rnc_ot_bct = rn_case2(logger, par, a_max=T, t_0_max=2 * T, trunc=1)\n\n R_0 = rnc_nct.calculateReproductionNumber\n\n def R_p(beta, tracing_type):\n rp = -1 * rnc_ot_bct.calculateReproductionNumber(beta, tracing_type)\n logger.info('rp %s', str(rp))\n return rp\n\n # Ro constraint\n def constrain(beta):\n return R_0(beta, 0)\n\n cons = NonlinearConstraint(constrain, 3, 3)\n\n # Positive boundary\n bounds = Bounds([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,\n 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20])\n logger.info('START SLSQP')\n\n result = minimize(R_p, beta0, bounds=bounds, method='SLSQP',\n constraints=cons, args=(1,),\n options={'maxiter': 1000, 'ftol': 1.0e-8, 'eps': 1e-06})\n Exporter.save_variable(result, 'max_case2_result')\n logger.info('Success %s', str(result.success))\n logger.info('STOP SLSQP')\n\n return result\n\n\ndef main():\n pool = Pool(4)\n result1 = pool.apply_async(min_case1)\n result2 = pool.apply_async(max_case1)\n result3 = pool.apply_async(min_case2)\n result4 = pool.apply_async(max_case2)\n\n pool.close()\n pool.join()\n\n return result1, result2, result3, result4\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\n result1, result2, result3, result4 = main()\n" ]
[ [ "numpy.array", "scipy.optimize.minimize", "scipy.optimize.Bounds", "scipy.optimize.NonlinearConstraint" ] ]
DanIulian/minigrid_rl
[ "d7b59fd1d1e62fc99d5134c89f59c6ad16246cfa", "d7b59fd1d1e62fc99d5134c89f59c6ad16246cfa" ]
[ "torch_rl/torch_rl/algos/a2c.py", "agents/ppo_icm.py" ]
[ "\"\"\"\n Copyright (c) https://github.com/lcswillems/torch-rl\n\"\"\"\n\nimport numpy\nimport torch\nimport torch.nn.functional as F\n\nfrom torch_rl.algos.base import BaseAlgo\n\nclass A2CAlgo(BaseAlgo):\n \"\"\"The class for the Advantage Actor-Critic algorithm.\"\"\"\n\n def __init__(self, envs, acmodel, num_frames_per_proc=None, discount=0.99, lr=7e-4, gae_lambda=0.95,\n entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,\n rmsprop_alpha=0.99, rmsprop_eps=1e-5, preprocess_obss=None, reshape_reward=None):\n num_frames_per_proc = num_frames_per_proc or 8\n\n super().__init__(envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,\n value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward)\n\n self.optimizer = torch.optim.RMSprop(self.acmodel.parameters(), lr,\n alpha=rmsprop_alpha, eps=rmsprop_eps)\n\n def update_parameters(self):\n # Collect experiences\n\n exps, logs = self.collect_experiences()\n\n # Compute starting indexes\n\n inds = self._get_starting_indexes()\n\n # Initialize update values\n\n update_entropy = 0\n update_value = 0\n update_policy_loss = 0\n update_value_loss = 0\n update_loss = 0\n\n # Initialize memory\n\n if self.acmodel.recurrent:\n memory = exps.memory[inds]\n\n for i in range(self.recurrence):\n # Create a sub-batch of experience\n\n sb = exps[inds + i]\n\n # Compute loss\n\n if self.acmodel.recurrent:\n dist, value, memory = self.acmodel(sb.obs, memory * sb.mask)\n else:\n dist, value = self.acmodel(sb.obs)\n\n entropy = dist.entropy().mean()\n\n policy_loss = -(dist.log_prob(sb.action) * sb.advantage).mean()\n\n value_loss = (value - sb.returnn).pow(2).mean()\n\n loss = policy_loss - self.entropy_coef * entropy + self.value_loss_coef * value_loss\n\n # Update batch values\n\n update_entropy += entropy.item()\n update_value += value.mean().item()\n update_policy_loss += policy_loss.item()\n update_value_loss += value_loss.item()\n update_loss += loss\n\n # Update update values\n\n update_entropy /= self.recurrence\n update_value /= self.recurrence\n update_policy_loss /= self.recurrence\n update_value_loss /= self.recurrence\n update_loss /= self.recurrence\n\n # Update actor-critic\n\n self.optimizer.zero_grad()\n update_loss.backward()\n update_grad_norm = sum(p.grad.data.norm(2) ** 2 for p in self.acmodel.parameters()) ** 0.5\n torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)\n self.optimizer.step()\n\n # Log some values\n\n logs[\"entropy\"] = update_entropy\n logs[\"value\"] = update_value\n logs[\"policy_loss\"] = update_policy_loss\n logs[\"value_loss\"] = update_value_loss\n logs[\"grad_norm\"] = update_grad_norm\n\n return logs\n\n def _get_starting_indexes(self):\n \"\"\"Gives the indexes of the observations given to the model and the\n experiences used to compute the loss at first.\n\n The indexes are the integers from 0 to `self.num_frames` with a step of\n `self.recurrence`. If the model is not recurrent, they are all the\n integers from 0 to `self.num_frames`.\n\n Returns\n -------\n starting_indexes : list of int\n the indexes of the experiences to be used at first\n \"\"\"\n\n starting_indexes = numpy.arange(0, self.num_frames, self.recurrence)\n return starting_indexes\n", "\"\"\"\n DanM 2019\n inspired from https://github.com/lcswillems/torch-rl\n\"\"\"\nimport numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom argparse import Namespace\nfrom copy import deepcopy\n\nfrom agents.two_v_base_general import TwoValueHeadsBaseGeneral\nfrom torch_rl.utils import DictList\nfrom utils.utils import RunningMeanStd, RewardForwardFilter\nfrom utils.format import preprocess_images\n\n\nclass PPOIcm(TwoValueHeadsBaseGeneral):\n \"\"\"The class for the Proximal Policy Optimization algorithm\n ([Schulman et al., 2015](https://arxiv.org/abs/1707.06347)).\"\"\"\n\n def __init__(self, cfg, envs, acmodel, agent_data, **kwargs):\n num_frames_per_proc = getattr(cfg, \"num_frames_per_proc\", 128)\n discount = getattr(cfg, \"discount\", 0.99)\n gae_lambda = getattr(cfg, \"gae_lambda\", 0.95)\n entropy_coef = getattr(cfg, \"entropy_coef\", 0.01)\n value_loss_coef = getattr(cfg, \"value_loss_coef\", 0.5)\n max_grad_norm = getattr(cfg, \"max_grad_norm\", 0.5)\n recurrence = getattr(cfg, \"recurrence\", 4)\n clip_eps = getattr(cfg, \"clip_eps\", 0.)\n epochs = getattr(cfg, \"epochs\", 4)\n batch_size = getattr(cfg, \"batch_size\", 256)\n\n optimizer = getattr(cfg, \"optimizer\", \"Adam\")\n optimizer_args = getattr(cfg, \"optimizer_args\", {})\n\n exp_used_pred = getattr(cfg, \"exp_used_pred\", 0.25)\n preprocess_obss = kwargs.get(\"preprocess_obss\", None)\n reshape_reward = kwargs.get(\"reshape_reward\", None)\n eval_envs = kwargs.get(\"eval_envs\", [])\n\n self.recurrence_worlds = getattr(cfg, \"recurrence_worlds\", 16)\n self.running_norm_obs = getattr(cfg, \"running_norm_obs\", False)\n self.nminibatches = getattr(cfg, \"nminibatches\", 4)\n self.out_dir = getattr(cfg, \"out_dir\", None)\n self.pre_fill_memories = pre_fill_memories = getattr(cfg, \"pre_fill_memories\", 1)\n\n self.save_experience_batch = getattr(cfg, \"save_experience_batch\", 5)\n\n super().__init__(\n envs, acmodel, num_frames_per_proc, discount, gae_lambda, entropy_coef,\n value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward, exp_used_pred)\n\n self.clip_eps = clip_eps\n self.epochs = epochs\n self.batch_size = batch_size\n self.int_coeff = cfg.int_coeff\n self.ext_coeff = cfg.ext_coeff\n\n assert self.batch_size % self.recurrence == 0\n\n # -- Prepare intrinsic generators\n # self.acmodel.random_target.eval()\n self.predictor_rms = RunningMeanStd()\n self.predictor_rff = RewardForwardFilter(gamma=self.discount)\n\n # -- Prepare optimizers\n optimizer_args = vars(optimizer_args)\n\n self.optimizer_policy = getattr(torch.optim, optimizer)(\n self.acmodel.policy_model.parameters(), **optimizer_args)\n\n self.optimizer_agworld = getattr(torch.optim, optimizer)(\n self.acmodel.curiosity_model.parameters(), **optimizer_args)\n\n self.optimizer_evaluator = getattr(torch.optim, optimizer)(\n self.acmodel.evaluator_network.parameters(), **optimizer_args)\n\n self.optimizer_evaluator_base = getattr(torch.optim, optimizer)(\n self.acmodel.base_evaluator_network.parameters(), **optimizer_args)\n\n if \"optimizer_policy\" in agent_data:\n self.optimizer_policy.load_state_dict(agent_data[\"optimizer_policy\"])\n self.optimizer_agworld.load_state_dict(agent_data[\"optimizer_agworld\"])\n self.optimizer_evaluator.load_state_dict(agent_data[\"optimizer_evaluator\"])\n self.optimizer_evaluator_base.load_state_dict(agent_data[\"optimizer_evaluator_base\"])\n self.predictor_rms = agent_data[\"predictor_rms\"] # type: RunningMeanStd\n\n self.batch_num = 0\n self.updates_cnt = 0\n\n # get width and height of the observation space for position normalization\n self.env_width = envs[0][0].unwrapped.width\n self.env_height = envs[0][0].unwrapped.height\n\n if self.running_norm_obs:\n self.collect_random_statistics(50)\n\n # -- Previous batch of experiences last frame\n self.prev_frame_exps = None\n\n # -- Init evaluator envs\n self.eval_envs = None\n self.eval_memory = None\n self.eval_mask = None\n self.eval_icm_memory = None\n self.eval_dir = None\n\n if len(eval_envs) > 0:\n self.eval_envs = self.init_evaluator(eval_envs)\n self.eval_dir = os.path.join(self.out_dir, \"eval\")\n if not os.path.isdir(self.eval_dir):\n os.mkdir(self.eval_dir)\n\n # remember some log values from intrinsic rewards computation\n self.aux_logs = {}\n\n def init_evaluator(self, envs):\n from torch_rl.utils import ParallelEnv\n device = self.device\n acmodel = self.acmodel\n\n eval_envs = ParallelEnv(envs)\n obs = eval_envs.reset()\n\n if self.acmodel.recurrent:\n self.eval_memory = torch.zeros(len(obs), acmodel.memory_size, device=device)\n\n self.eval_agworld_memory = torch.zeros(len(obs), acmodel.curiosity_model.memory_size,\n device=device)\n self.eval_mask = torch.ones(len(obs), device=device)\n return eval_envs\n\n def augment_exp(self, exps):\n\n # from exp (P * T , ** ) -> (T, P, **)\n num_procs = self.num_procs\n num_frames_per_proc = self.num_frames_per_proc\n device = self.device\n env = self.env\n agworld_network = self.acmodel.curiosity_model\n\n shape = torch.Size([num_procs, num_frames_per_proc])\n frame_exp = Namespace()\n\n # ------------------------------------------------------------------------------------------\n # Redo in format T x P\n\n for k, v in exps.items():\n if k == \"obs\":\n continue\n setattr(frame_exp, k, v.view(shape + v.size()[1:]).transpose(0, 1).contiguous())\n\n def inverse_img(t, ii):\n return torch.transpose(torch.transpose(t, ii, ii+2), ii+1, ii+2).contiguous()\n\n frame_exp.obs_image = inverse_img(frame_exp.obs_image, 2)\n #frame_exp.states = inverse_img(frame_exp.states, 2)\n\n def gen_memory(ss):\n return torch.zeros(num_frames_per_proc, num_procs, ss, device=device)\n\n frame_exp.agworld_mems = gen_memory(agworld_network.memory_size)\n frame_exp.agworld_embs = gen_memory(agworld_network.embedding_size)\n\n frame_exp.actions_onehot = gen_memory(env.action_space.n)\n frame_exp.actions_onehot.scatter_(2, frame_exp.action.unsqueeze(2).long(), 1.)\n\n # ------------------------------------------------------------------------------------------\n # Save last frame exp\n\n last_frame_exp = Namespace()\n for k, v in frame_exp.__dict__.items():\n if k == \"obs\":\n continue\n setattr(last_frame_exp, k, v[-1].clone())\n\n prev_frame_exps = self.prev_frame_exps\n if self.prev_frame_exps is None:\n prev_frame_exps = deepcopy(last_frame_exp)\n for k, v in prev_frame_exps.__dict__.items():\n v.zero_()\n\n self.prev_frame_exps = last_frame_exp\n\n # ------------------------------------------------------------------------------------------\n # Fill memories with past\n\n frame_exp.agworld_mems[0] = prev_frame_exps.agworld_mems\n frame_exp.agworld_embs[0] = prev_frame_exps.agworld_embs\n\n return frame_exp, prev_frame_exps\n\n @staticmethod\n def flip_back_experience(exp):\n # for all tensors below, T x P -> P x T -> P * T\n for k, v in exp.__dict__.items():\n setattr(exp, k, v.transpose(0, 1).reshape(-1, *v.shape[2:]).contiguous())\n return exp\n\n def update_parameters(self):\n # Collect experiences\n\n exps, logs = self.collect_experiences()\n\n log_entropies = []\n log_values_ext = []\n log_values_int = []\n log_policy_losses = []\n log_value_ext_losses = []\n log_value_int_losses = []\n log_grad_norms = []\n log_ret_int = []\n log_rew_int = []\n batch_ret_int = 0\n batch_rew_int = 0\n\n for epoch_no in range(self.epochs):\n # Initialize log values\n\n # Loop for Policy\n\n for inds in self._get_batches_starting_indexes():\n # Initialize batch values\n\n batch_entropy = 0\n batch_ext_value = 0\n batch_int_value = 0\n batch_policy_loss = 0\n batch_value_ext_loss = 0\n batch_value_int_loss = 0\n batch_loss = 0\n\n # Initialize memory\n\n if self.acmodel.recurrent:\n memory = exps.memory[inds]\n\n for i in range(self.recurrence):\n # Create a sub-batch of experience\n\n sb = exps[inds + i]\n # Compute loss\n\n if self.acmodel.recurrent:\n dist, vvalue, memory = self.acmodel.policy_model(sb.obs, memory * sb.mask)\n else:\n dist, vvalue = self.acmodel.policy_model(sb.obs)\n\n entropy = dist.entropy().mean()\n\n ratio = torch.exp(dist.log_prob(sb.action) - sb.log_prob)\n adv = (self.int_coeff * sb.advantage_int + self.ext_coeff * sb.advantage_ext)\n surr1 = ratio * adv\n surr2 = torch.clamp(ratio, 1.0 - self.clip_eps, 1.0 + self.clip_eps) * adv\n policy_loss = -torch.min(surr1, surr2).mean()\n\n # Value losses\n value_ext, value_int = vvalue\n\n value_ext_clipped = sb.value_ext + torch.clamp(value_ext - sb.value_ext, -self.clip_eps, self.clip_eps)\n surr1 = (value_ext - sb.returnn_ext).pow(2)\n surr2 = (value_ext_clipped - sb.returnn_ext).pow(2)\n value_ext_loss = torch.max(surr1, surr2).mean()\n\n value_int_clipped = sb.value_int + torch.clamp(value_int - sb.value_int, -self.clip_eps, self.clip_eps)\n surr1 = (value_int - sb.returnn_int).pow(2)\n surr2 = (value_int_clipped - sb.returnn_int).pow(2)\n value_int_loss = torch.max(surr1, surr2).mean()\n\n loss = policy_loss - self.entropy_coef * entropy + \\\n (0.5 * self.value_loss_coef) * value_int_loss + \\\n (0.5 * self.value_loss_coef) * value_ext_loss\n\n # Update batch values\n\n batch_entropy += entropy.item()\n batch_ext_value += value_ext.mean().item()\n batch_int_value += value_int.mean().item()\n batch_policy_loss += policy_loss.item()\n batch_value_ext_loss += value_ext_loss.item()\n batch_value_int_loss += value_int_loss.item()\n batch_loss += loss\n batch_ret_int += sb.returnn_int.mean().item()\n batch_rew_int += sb.reward_int.mean().item()\n\n # Update memories for next epoch\n\n if self.acmodel.recurrent and i < self.recurrence - 1:\n exps.memory[inds + i + 1] = memory.detach()\n\n # Update batch values\n\n batch_entropy /= self.recurrence\n batch_ext_value /= self.recurrence\n batch_int_value /= self.recurrence\n batch_policy_loss /= self.recurrence\n batch_value_ext_loss /= self.recurrence\n batch_value_int_loss /= self.recurrence\n batch_loss /= self.recurrence\n batch_rew_int /= self.recurrence\n batch_ret_int /= self.recurrence\n\n # Update actor-critic\n self.optimizer_policy.zero_grad()\n batch_loss.backward()\n grad_norm = sum(\n p.grad.data.norm(2).item() ** 2 for p in self.acmodel.policy_model.parameters()\n if p.grad is not None\n ) ** 0.5\n torch.nn.utils.clip_grad_norm_(self.acmodel.policy_model.parameters(),\n self.max_grad_norm)\n self.optimizer_policy.step()\n\n # Update log values\n\n log_entropies.append(batch_entropy)\n log_values_ext.append(batch_ext_value)\n log_values_int.append(batch_int_value)\n log_policy_losses.append(batch_policy_loss)\n log_value_ext_losses.append(batch_value_ext_loss)\n log_value_int_losses.append(batch_value_int_loss)\n log_grad_norms.append(grad_norm)\n log_ret_int.append(batch_ret_int)\n log_rew_int.append(batch_rew_int)\n\n # Log some values\n\n logs[\"entropy\"] = np.mean(log_entropies)\n logs[\"value_ext\"] = np.mean(log_values_ext)\n logs[\"value_int\"] = np.mean(log_values_int)\n logs[\"value\"] = logs[\"value_ext\"] + logs[\"value_int\"]\n logs[\"policy_loss\"] = np.mean(log_policy_losses)\n logs[\"value_ext_loss\"] = np.mean(log_value_ext_losses)\n logs[\"value_int_loss\"] = np.mean(log_value_int_losses)\n logs[\"value_loss\"] = logs[\"value_int_loss\"] + logs[\"value_ext_loss\"]\n logs[\"grad_norm\"] = np.mean(log_grad_norms)\n logs[\"return_int\"] = np.mean(log_ret_int)\n logs[\"reward_int\"] = np.mean(log_rew_int)\n\n # add extra logs from intrinsic rewards\n for k in self.aux_logs:\n logs[k] = self.aux_logs[k]\n\n self.updates_cnt += 1\n return logs\n\n def _get_batches_starting_indexes(self, recurrence=None, padding=0):\n \"\"\"Gives, for each batch, the indexes of the observations given to\n the model and the experiences used to compute the loss at first.\n\n First, the indexes are the integers from 0 to `self.num_frames` with a step of\n `self.recurrence`, shifted by `self.recurrence//2` one time in two for having\n more diverse batches. Then, the indexes are splited into the different batches.\n\n Returns\n -------\n batches_starting_indexes : list of list of int\n the indexes of the experiences to be used at first for each batch\n\n \"\"\"\n num_frames_per_proc = self.num_frames_per_proc\n num_procs = self.num_procs\n\n if recurrence is None:\n recurrence = self.recurrence\n\n # Consider Num frames list ordered P * T\n if padding == 0:\n indexes = np.arange(0, self.num_frames, recurrence)\n else:\n # Consider Num frames list ordered P * T\n # Do not index step[:padding] and step[-padding:]\n frame_index = np.arange(padding, num_frames_per_proc-padding+1-recurrence, recurrence)\n indexes = np.resize(frame_index.reshape((1, -1)), (num_procs, len(frame_index)))\n indexes = indexes + np.arange(0, num_procs).reshape(-1, 1) * num_frames_per_proc\n indexes = indexes.reshape(-1)\n\n indexes = np.random.permutation(indexes)\n\n # Shift starting indexes by recurrence//2 half the time\n # TODO Check this ; Bad fix\n if recurrence is None:\n self.batch_num += 1\n\n num_indexes = self.batch_size // recurrence\n batches_starting_indexes = [\n indexes[i:i+num_indexes] for i in range(0, len(indexes), num_indexes)\n ]\n\n return batches_starting_indexes\n\n def get_save_data(self):\n return dict({\n \"optimizer_policy\": self.optimizer_policy.state_dict(),\n \"optimizer_agworld\": self.optimizer_agworld.state_dict(),\n \"optimizer_evaluator\": self.optimizer_evaluator.state_dict(),\n \"predictor_rms\": self.predictor_rms,\n })\n\n def collect_random_statistics(self, num_timesteps):\n # initialize observation normalization with data from random agent\n\n self.obs_rms = RunningMeanStd(shape=(1, 7, 7, 3))\n\n curr_obs = self.obs\n collected_obss = [None] * (self.num_frames_per_proc * num_timesteps)\n for i in range(self.num_frames_per_proc * num_timesteps):\n # Do one agent-environment interaction\n\n action = torch.randint(0, self.env.action_space.n, (self.num_procs,)) # sample uniform actions\n obs, reward, done, _ = self.env.step(action.cpu().numpy())\n\n # Update experiences values\n collected_obss[i] = curr_obs\n curr_obs = obs\n\n self.obs = curr_obs\n exps = DictList()\n exps.obs = [collected_obss[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc * num_timesteps)]\n\n images = [obs[\"image\"] for obs in exps.obs]\n images = np.array(images)\n images = torch.tensor(images, dtype=torch.float)\n\n self.obs_rms.update(images)\n\n def calculate_intrinsic_reward(self, exps: DictList, dst_intrinsic_r: torch.Tensor):\n\n # ------------------------------------------------------------------------------------------\n # Run worlds models & generate memories\n\n agworld_network = self.acmodel.curiosity_model\n evaluator_network = self.acmodel.evaluator_network\n base_eval_network = self.acmodel.base_evaluator_network\n\n num_procs = self.num_procs\n num_frames_per_proc = self.num_frames_per_proc\n device = self.device\n beta = 0.2 # loss factor for Forward and Dynamics Models\n\n # ------------------------------------------------------------------------------------------\n # Get observations and full states\n f, prev_frame_exps = self.augment_exp(exps)\n\n # Save state\n\n out_dir = self.eval_dir\n updates_cnt = self.updates_cnt\n save_experience_batch = self.save_experience_batch\n save = save_experience_batch > 0 and (updates_cnt + 1) % save_experience_batch == 0\n\n # ------------------------------------------------------------------------------------------\n if self.pre_fill_memories:\n prev_actions = prev_frame_exps.actions_onehot\n for i in range(num_frames_per_proc - 1):\n obs = f.obs_image[i]\n masks = f.mask[i]\n\n # Do one agent-environment interaction\n with torch.no_grad():\n _, f.agworld_mems[i + 1], f.agworld_embs[i] = \\\n agworld_network(obs, f.agworld_mems[i] * masks, prev_actions)\n prev_actions = f.actions_onehot[i]\n\n # ------------------------------------------------------------------------------------------\n # -- Compute Intrinsic rewards\n\n pred_next_state = torch.zeros(num_frames_per_proc + 1, num_procs,\n agworld_network.memory_size, device=device)\n next_state = torch.zeros(num_frames_per_proc + 1, num_procs,\n agworld_network.memory_size, device=device)\n\n prev_actions = prev_frame_exps.actions_onehot\n prev_memory = prev_frame_exps.agworld_mems\n for i in range(num_frames_per_proc):\n obs = f.obs_image[i]\n masks = f.mask[i]\n actions = f.actions_onehot[i]\n\n #Do one agent-environment interaction\n with torch.no_grad():\n _, next_state[i], embs = agworld_network(\n obs, prev_memory * masks, prev_actions)\n\n pred_next_state[i + 1] = agworld_network.forward_state(\n next_state[i], actions)\n\n prev_actions = actions\n prev_memory = next_state[i]\n\n\n # TODO fix the last intrinsic reward value as it is pred - 0\n dst_intrinsic_r = (pred_next_state[1:] - next_state[1:]).detach().pow(2).sum(2)\n\n # --Normalize intrinsic reward\n #self.predictor_rff.reset() # do you have to rest it every time ???\n int_rff = torch.zeros((self.num_frames_per_proc, self.num_procs), device=self.device)\n\n for i in reversed(range(self.num_frames_per_proc)):\n int_rff[i] = self.predictor_rff.update(dst_intrinsic_r[i])\n\n self.predictor_rms.update(int_rff.view(-1)) # running mean statisics\n # dst_intrinsic_r.sub_(self.predictor_rms.mean.to(dst_intrinsic_r.device))\n dst_intrinsic_r.div_(torch.sqrt(self.predictor_rms.var).to(dst_intrinsic_r.device))\n\n #if save:\n # f.dst_intrinsic = dst_intrinsic_r.clone()\n # torch.save(f, f\"{out_dir}/f_{updates_cnt}\")\n # delattr(f, \"dst_intrinsic\")\n\n\n # ------------------------------------------------------------------------------------------\n # -- Optimize ICM\n optimizer_evaluator = self.optimizer_evaluator\n optimizer_evaluator_base = self.optimizer_evaluator_base\n optimizer_agworld = self.optimizer_agworld\n recurrence_worlds = self.recurrence_worlds\n\n max_grad_norm = self.max_grad_norm\n\n # ------------------------------------------------------------------------------------------\n # _________ for all tensors below, T x P -> P x T -> P * T _______________________\n f = self.flip_back_experience(f)\n # ------------------------------------------------------------------------------------------\n\n loss_m_state = torch.nn.MSELoss()\n loss_m_act = torch.nn.CrossEntropyLoss()\n loss_m_eval = torch.nn.CrossEntropyLoss()\n loss_m_eval_base = torch.nn.CrossEntropyLoss()\n\n log_state_loss = []\n log_state_loss_same = []\n log_state_loss_diffs = []\n\n log_act_loss = []\n log_act_loss_same = []\n log_act_loss_diffs = []\n\n log_evaluator_loss = []\n log_base_evaluator_loss = []\n\n for inds in self._get_batches_starting_indexes(recurrence=recurrence_worlds, padding=1):\n\n agworld_mem = f.agworld_mems[inds].detach()\n new_agworld_emb = [None] * recurrence_worlds\n new_agworld_mem = [None] * recurrence_worlds\n\n state_batch_loss = torch.zeros(1, device=self.device)[0]\n\n state_batch_loss_same = torch.zeros(1, device=self.device)[0]\n state_batch_loss_diffs = torch.zeros(1, device=self.device)[0]\n\n act_batch_loss = torch.zeros(1, device=self.device)[0]\n act_batch_loss_same = torch.zeros(1, device=self.device)[0]\n act_batch_loss_diff = torch.zeros(1, device=self.device)[0]\n evaluator_batch_loss = torch.zeros(1, device=self.device)[0]\n evaluator_base_batch_loss = torch.zeros(1, device=self.device)[0]\n\n log_grad_agworld_norm = []\n log_grad_eval_norm = []\n\n # -- Agent world\n for i in range(recurrence_worlds):\n\n obs = f.obs_image[inds + i].detach()\n mask = f.mask[inds + i]\n prev_actions_one = f.actions_onehot[inds + i - 1].detach()\n pos = f.position[inds + i].detach()\n\n # Forward pass Agent Net for memory\n _, agworld_mem, new_agworld_emb[i] = agworld_network(\n obs, agworld_mem * mask, prev_actions_one)\n new_agworld_mem[i] = agworld_mem\n\n # Compute loss for evaluator using cross entropy loss\n pred_pos = evaluator_network(new_agworld_mem[i].detach())\n target_pos = (self.env_height * pos[:, 0] + pos[:, 1]).type(torch.long)\n\n evaluator_batch_loss += loss_m_eval(pred_pos, target_pos)\n\n # Compute loss for a random input using cross entropy to obtain a base\n # for evaluator\n\n random_input = torch.rand_like(new_agworld_mem[i].detach())\n pred_pos_base = base_eval_network(random_input)\n target_pos_base = (self.env_height * pos[:, 0] + pos[:, 1]).type(torch.long)\n evaluator_base_batch_loss += loss_m_eval_base(pred_pos_base, target_pos_base)\n\n\n # Go back and predict action(t) given state(t) & embedding (t+1)\n # and predict state(t + 1) given state(t) and action(t)\n for i in range(recurrence_worlds - 1):\n\n obs = f.obs_image[inds + i].detach()\n next_obs = f.obs_image[inds + i + 1].detach()\n\n # take masks and convert them to 1D tensor for indexing\n # use next masks because done gives you the new game obs\n next_mask = f.mask[inds + i + 1].long().detach()\n next_mask = next_mask.squeeze(1).type(torch.ByteTensor)\n\n crt_actions = f.action[inds + i].long().detach()\n crt_actions_one = f.actions_onehot[inds + i].detach()\n\n pred_act = agworld_network.forward_action(new_agworld_mem[i], new_agworld_emb[i+1])\n\n pred_state = agworld_network.forward_state(new_agworld_mem[i], crt_actions_one)\n\n act_batch_loss += loss_m_act(pred_act, crt_actions)\n state_batch_loss += loss_m_state(pred_state, new_agworld_mem[i + 1].detach())\n\n # if all episodes ends at once, can't compute same/diff losses\n if next_mask.sum() == 0:\n continue\n\n same = (obs[next_mask] == next_obs[next_mask]).all(1).all(1).all(1)\n\n s_pred_act = pred_act[next_mask]\n s_crt_act = crt_actions[next_mask]\n\n s_pred_state = pred_state[next_mask]\n s_crt_state = (new_agworld_mem[i + 1].detach())[next_mask]\n\n # if all are same/diff take care to empty tensors\n if same.sum() == same.shape[0]:\n act_batch_loss_same += loss_m_act(s_pred_act[same], s_crt_act[same])\n state_batch_loss_same += loss_m_state(s_pred_state[same], s_crt_state[same])\n\n elif same.sum() == 0:\n act_batch_loss_diff += loss_m_act(s_pred_act[~same], s_crt_act[~same])\n state_batch_loss_diffs += loss_m_state(s_pred_state[~same], s_crt_state[~same])\n\n else:\n act_batch_loss_same += loss_m_act(s_pred_act[same], s_crt_act[same])\n act_batch_loss_diff += loss_m_act(s_pred_act[~same], s_crt_act[~same])\n\n state_batch_loss_same += loss_m_state(s_pred_state[same], s_crt_state[same])\n state_batch_loss_diffs += loss_m_state(s_pred_state[~same], s_crt_state[~same])\n\n # -- Optimize models\n act_batch_loss /= (recurrence_worlds - 1)\n state_batch_loss /= (recurrence_worlds - 1)\n evaluator_batch_loss /= recurrence_worlds\n evaluator_base_batch_loss /= recurrence_worlds\n\n act_batch_loss_same /= (recurrence_worlds - 1)\n act_batch_loss_diff /= (recurrence_worlds - 1)\n\n state_batch_loss_same /= (recurrence_worlds - 1)\n state_batch_loss_diffs /= (recurrence_worlds - 1)\n\n ag_loss = (1 - beta) * act_batch_loss + beta * state_batch_loss\n\n optimizer_agworld.zero_grad()\n optimizer_evaluator.zero_grad()\n optimizer_evaluator_base.zero_grad()\n\n ag_loss.backward()\n evaluator_batch_loss.backward()\n evaluator_base_batch_loss.backward()\n\n grad_agworld_norm = sum(\n p.grad.data.norm(2).item() ** 2 for p in agworld_network.parameters()\n if p.grad is not None\n ) ** 0.5\n grad_eval_norm = sum(\n p.grad.data.norm(2).item() ** 2 for p in evaluator_network.parameters()\n if p.grad is not None\n ) ** 0.5\n\n torch.nn.utils.clip_grad_norm_(evaluator_network.parameters(), max_grad_norm)\n torch.nn.utils.clip_grad_norm_(agworld_network.parameters(), max_grad_norm)\n torch.nn.utils.clip_grad_norm_(base_eval_network.parameters(), max_grad_norm)\n\n #log some shit\n\n log_act_loss.append(act_batch_loss.item())\n log_state_loss.append(state_batch_loss.item())\n log_evaluator_loss.append(evaluator_batch_loss.item())\n log_base_evaluator_loss.append(evaluator_base_batch_loss.item())\n\n log_grad_agworld_norm.append(grad_agworld_norm)\n log_grad_eval_norm.append(grad_eval_norm)\n\n log_act_loss_diffs.append(act_batch_loss_diff.item())\n log_act_loss_same.append(act_batch_loss_same.item())\n\n log_state_loss_same.append(state_batch_loss_same.item())\n log_state_loss_diffs.append(state_batch_loss_diffs.item())\n\n optimizer_evaluator.step()\n optimizer_agworld.step()\n optimizer_evaluator_base.step()\n\n # ------------------------------------------------------------------------------------------\n # Log some values\n self.aux_logs['next_state_loss'] = np.mean(log_state_loss)\n self.aux_logs['next_action_loss'] = np.mean(log_act_loss)\n self.aux_logs['position_loss'] = np.mean(log_evaluator_loss)\n self.aux_logs['base_positon_loss'] = np.mean(log_base_evaluator_loss)\n self.aux_logs['grad_norm_icm'] = np.mean(log_grad_agworld_norm)\n self.aux_logs['grad_norm_pos'] = np.mean(log_grad_eval_norm)\n\n self.aux_logs['next_state_loss_same'] = np.mean(log_state_loss_same)\n self.aux_logs['next_state_loss_diffs'] = np.mean(log_state_loss_diffs)\n\n self.aux_logs['next_act_loss_same'] = np.mean(log_act_loss_same)\n self.aux_logs['next_act_loss_diffs'] = np.mean(log_act_loss_diffs)\n\n return dst_intrinsic_r\n\n def add_extra_experience(self, exps: DictList):\n # Process\n full_positions = [self.obss[i][j][\"position\"]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n # Process\n full_states = [self.obss[i][j][\"state\"]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n\n exps.states = preprocess_images(full_states, device=self.device)\n max_pos_value = max(self.env_height, self.env_width)\n exps.position = preprocess_images(full_positions,\n device=self.device,\n max_image_value=max_pos_value,\n normalize=False)\n exps.obs_image = exps.obs.image\n\n def evaluate(self):\n # set networks in eval mode\n self.acmodel.eval()\n\n out_dir = self.eval_dir\n env = self.eval_envs\n preprocess_obss = self.preprocess_obss\n device = self.device\n recurrent = self.acmodel.recurrent\n acmodel = self.acmodel\n evaluator_network = acmodel.evaluator_network\n agworld_network = acmodel.curiosity_model\n updates_cnt = self.updates_cnt\n\n obs = env.reset()\n if recurrent:\n memory = self.eval_memory\n\n mask = self.eval_mask.fill_(1).unsqueeze(1)\n eval_ag_memory = self.eval_agworld_memory.zero_()\n\n prev_actions_a = torch.zeros((len(obs), env.action_space.n), device=device)\n crt_actions = torch.zeros((len(obs), env.action_space.n), device=device)\n pred_act = torch.zeros((len(obs), env.action_space.n), device=device)\n\n prev_agworld_mem = None\n obs_batch = None\n\n transitions = []\n steps = 200\n\n for i in range(steps):\n\n prev_obs = obs_batch\n preprocessed_obs = preprocess_obss(obs, device=device)\n obs_batch = torch.transpose(torch.transpose(preprocessed_obs.image, 1, 3), 2, 3)\n\n pos_batch = preprocess_images(\n [obs[i]['position'] for i in range(len(obs))],\n device=device,\n normalize=False\n )\n full_state_batch = preprocess_images(\n [obs[i]['state'] for i in range(len(obs))],\n device=device,\n )\n\n with torch.no_grad():\n if recurrent:\n dist, value, memory = acmodel(preprocessed_obs, memory * mask)\n else:\n dist, value = acmodel(preprocessed_obs)\n\n action = dist.sample()\n crt_actions.zero_()\n crt_actions.scatter_(1, action.long().unsqueeze(1), 1.) # transform to one_hot\n\n # Agent world\n _, eval_ag_memory, new_agworld_emb = agworld_network(obs_batch, eval_ag_memory * mask,\n prev_actions_a)\n\n if prev_agworld_mem is not None:\n pred_act = agworld_network.forward_action(prev_agworld_mem, new_agworld_emb)\n\n pred_state = agworld_network.forward_state(eval_ag_memory, crt_actions)\n\n # Evaluation network\n pred_position = F.softmax(\n evaluator_network(eval_ag_memory.detach()),\n dim=1)\n prev_agworld_mem = eval_ag_memory\n\n next_obs, reward, done, _ = env.step(action.cpu().numpy())\n\n mask = (1 - torch.tensor(done, device=device, dtype=torch.float)).unsqueeze(1)\n\n prev_actions_a.copy_(crt_actions)\n\n transitions.append((obs, action.cpu(), reward, done, next_obs, dist.probs.cpu(),\n pos_batch.cpu(), pred_state.cpu(), pred_position.cpu(),\n eval_ag_memory.cpu(), new_agworld_emb.cpu(), pred_act.cpu(),\n obs_batch.cpu(), full_state_batch.cpu()))\n obs = next_obs\n\n if out_dir is not None:\n np.save(f\"{out_dir}/eval_{updates_cnt}\",\n {\"transitions\": transitions,\n \"columns\": [\"obs\", \"action\", \"reward\", \"done\", \"next_obs\", \"probs\",\n \"pos_batch\", \"pred_emb_state\", \"pos_predict\",\"eval_ag_memory\",\n \"new_agworld_emb\", \"pred_act\", \"obs_batch\", \"full_state_batch\"]})\n\n self.acmodel.train()\n\n return None" ]
[ [ "numpy.arange" ], [ "torch.Size", "torch.zeros", "numpy.array", "torch.sqrt", "torch.nn.MSELoss", "torch.min", "torch.max", "numpy.random.permutation", "torch.no_grad", "numpy.mean", "numpy.save", "torch.clamp", "torch.randint", "torch.tensor", "numpy.arange", "torch.transpose", "torch.nn.CrossEntropyLoss" ] ]
BruceW91/cogdl
[ "1ad524375f5ba062103698a0432fc857572a6933" ]
[ "tests/tasks/test_graph_classification.py" ]
[ "import torch\nfrom cogdl import options\nfrom cogdl.tasks import build_task\nfrom cogdl.datasets import build_dataset\nfrom cogdl.models import build_model\nfrom cogdl.utils import build_args_from_dict\n\n\ndef get_default_args():\n cuda_available = torch.cuda.is_available()\n default_dict = {\n 'task': 'graph_classification',\n 'hidden_size': 64,\n 'dropout': 0.5,\n 'patience': 1,\n 'max_epoch': 2,\n 'cpu': not cuda_available,\n 'lr': 0.001,\n 'kfold': False,\n 'seed': [0],\n 'weight_decay': 5e-4,\n 'gamma': 0.5,\n 'train_ratio': 0.7,\n 'test_ratio': 0.1,\n 'device_id': [0 if cuda_available else 'cpu'],\n 'degree_feature': False}\n return build_args_from_dict(default_dict)\n\n\ndef add_diffpool_args(args):\n args.num_layers = 2\n args.num_pooling_layers = 1\n args.no_link_pred = False\n args.pooling_ratio = 0.15\n args.embedding_dim = 64\n args.hidden_size = 64\n args.batch_size = 20\n args.dropout = 0.1\n return args\n\n\ndef add_gin_args(args):\n args.epsilon = 0.\n args.hidden_size = 32\n args.num_layers = 5\n args.num_mlp_layers = 2\n args.train_epsilon = True\n args.pooling = 'sum'\n args.batch_size = 128\n return args\n\n\ndef add_dgcnn_args(args):\n args.hidden_size = 64\n args.batch_size = 20\n return args\n\n\ndef add_sortpool_args(args):\n args.hidden_size = 64\n args.batch_size = 20\n args.num_layers = 2\n args.out_channels = 32\n args.k = 30\n args.kernel_size = 5\n return args\n\n\ndef add_patchy_san_args(args):\n args.hidden_size = 64\n args.batch_size = 20\n args.sample = 10\n args.stride = 1\n args.neighbor = 10\n args.iteration = 2\n args.train_ratio = 0.7\n args.test_ratio = 0.1\n return args \n\ndef test_gin_mutag():\n args = get_default_args()\n args = add_gin_args(args)\n args.dataset = 'mutag'\n args.model = 'gin'\n args.batch_size = 32\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_gin_imdb_binary():\n args = get_default_args()\n args = add_gin_args(args)\n args.dataset = 'imdb-b'\n args.model = 'gin'\n args.degree_feature = True\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_gin_proteins():\n args = get_default_args()\n args = add_gin_args(args)\n args.dataset = 'proteins'\n args.model = 'gin'\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_diffpool_mutag():\n args = get_default_args()\n args = add_diffpool_args(args)\n args.dataset = 'mutag'\n args.model = 'diffpool'\n args.batch_size = 5\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_diffpool_proteins():\n args = get_default_args()\n args = add_diffpool_args(args)\n args.dataset = 'proteins'\n args.model = 'diffpool'\n args.batch_size = 20\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\n# def test_dgcnn_modelnet10():\n# args = get_default_args()\n# args = add_dgcnn_args(args)\n# args.dataset = 'ModelNet10'\n# args.model = 'pyg_dgcnn'\n# task = build_task(args)\n# ret = task.train()\n# assert ret[\"Acc\"] > 0\n\n\ndef test_dgcnn_proteins():\n args = get_default_args()\n args = add_dgcnn_args(args)\n args.dataset = 'proteins'\n args.model = 'dgcnn'\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_dgcnn_imdb_binary():\n args = get_default_args()\n args = add_dgcnn_args(args)\n args.dataset = 'imdb-b'\n args.model = 'dgcnn'\n args.degree_feature = True\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\ndef test_sortpool_mutag():\n args = get_default_args()\n args = add_sortpool_args(args)\n args.dataset = 'mutag'\n args.model = 'sortpool'\n args.batch_size = 20\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_sortpool_proteins():\n args = get_default_args()\n args = add_sortpool_args(args)\n args.dataset = 'proteins'\n args.model = 'sortpool'\n args.batch_size = 20\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n\n\ndef test_patchy_san_mutag():\n args = get_default_args()\n args = add_patchy_san_args(args)\n args.dataset = 'mutag'\n args.model = 'patchy_san'\n args.batch_size = 20\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0\n \n\ndef test_patchy_san_proteins():\n args = get_default_args()\n args = add_patchy_san_args(args)\n args.dataset = 'proteins'\n args.model = 'patchy_san'\n args.batch_size = 20\n task = build_task(args)\n ret = task.train()\n assert ret[\"Acc\"] > 0 \n\n\nif __name__ == \"__main__\":\n \n test_gin_imdb_binary()\n test_gin_mutag()\n test_gin_proteins()\n\n test_sortpool_mutag()\n test_sortpool_proteins()\n\n test_diffpool_mutag()\n test_diffpool_proteins()\n\n test_dgcnn_proteins()\n test_dgcnn_imdb_binary()\n # test_dgcnn_modelnet10()\n \n test_patchy_san_mutag()\n test_patchy_san_proteins()" ]
[ [ "torch.cuda.is_available" ] ]
deitrr/vplanet
[ "7e1276cb5f02610ae289c899472459e665695529" ]
[ "tests/StellarEvol/test_StellarEvol.py" ]
[ "from benchmark import Benchmark, benchmark\nimport astropy.units as u\nimport pytest\nimport numpy as np\n\n\n@benchmark(\n {\n \"log.final.a.Luminosity\": {\"value\": 0.002445, \"unit\": u.LSUN},\n \"log.final.a.Temperature\": {\"value\": 2992.330343, \"unit\": u.K},\n \"log.final.a.Radius\": {\"value\": 0.184359, \"unit\": u.Rsun, \"rtol\": 1e-4,},\n \"log.final.a.LostAngMom\": {\n \"value\": 9.438647e+41,\n \"unit\": u.kg * u.m ** 2 / u.sec,\n \"rtol\": 1e-4,\n },\n \"log.final.a.RotPer\": {\"value\": 0.299254, \"unit\": u.days},\n \"log.final.b.Luminosity\": {\"value\": 0.689088, \"unit\": u.LSUN, \"rtol\": 1e-4,},\n \"log.final.b.Temperature\": {\"value\": 5539.190016, \"unit\": u.K},\n \"log.final.b.Radius\": {\"value\": 0.902638, \"unit\": u.Rsun, \"rtol\": 1e-4,},\n \"log.final.b.LostAngMom\": {\n \"value\": 5.085996e+43,\n \"unit\": u.kg * u.m ** 2 / u.sec,\n \"rtol\": 1e-4,\n },\n \"log.final.b.RotPer\": {\"value\": 3.075721, \"unit\": u.days, \"rtol\": 1e-4},\n \"log.final.b.Density\": {\"value\": 1916.956727, \"unit\": u.kg / u.m ** 3},\n }\n)\nclass TestStellarEvol(Benchmark):\n pass\n\n\ndef test_energy_conservation(vplanet_output):\n Einit = vplanet_output.log.initial.system.TotEnergy\n Efinal = vplanet_output.log.initial.system.TotEnergy\n assert np.isclose(Einit, Efinal, rtol=1e-3)\n\n\ndef test_angmom_conservation(vplanet_output):\n Linit = vplanet_output.log.initial.system.TotAngMom\n Lfinal = vplanet_output.log.final.system.TotAngMom\n assert np.isclose(Linit, Lfinal, rtol=1e-3)\n" ]
[ [ "numpy.isclose" ] ]
jhpark428/studio
[ "3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75" ]
[ "function/python/brightics/function/io/read_excel.py" ]
[ "\"\"\"\n Copyright 2019 Samsung SDS\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport pandas as pd\n\n\ndef read_excel(path, sheet_index=0):\n table = pd.read_excel(path, sheet_name=sheet_index)\n return {\"table\": table}\n" ]
[ [ "pandas.read_excel" ] ]
Aakriti05/ORCA_Astar-warehouse-navigation
[ "8699b30c25cacf1a7be1f56dc34db90d5a757b36" ]
[ "layouts/count_1.py" ]
[ "import numpy as np\nimport random\nfrom numpy import savetxt\n\ndata = np.loadtxt('tight_2_64.cfg', skiprows=10)\nprint(data)\nprint(np.shape(data), type(data))\nprint(data[3][3], type(data[3][3]))\n\nl = np.shape(data)[0]\n#expanded_layout = np.zeros((2*l,2*l), dtype=int)\ncount = 0\ncount_zero = 0\nfor i in range(l):\n\tfor j in range(l):\n\t\tif(data[i][j] == 1):\n\t\t\tcount = count+1\n\t\telif(data[i][j] == 0):\n\t\t\tcount_zero = count_zero+1\n\nprint(count, \" \", count_zero, \" \", count+count_zero)\n" ]
[ [ "numpy.loadtxt", "numpy.shape" ] ]
alexanu/trading-with-python
[ "e6d6f6daea3089aebcd0f48c2e69c8bfe23c332c" ]
[ "lib/csvDatabase.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nintraday data handlers in csv format.\n\n@author: jev\n\"\"\"\n\n\n\nimport pandas as pd\nimport datetime as dt\nimport os\nimport numpy as np\nfrom .extra import ProgressBar\n\ndateFormat = \"%Y%m%d\" # date format for converting filenames to dates\ndateTimeFormat = \"%Y%m%d%H%M%S\"\n\ndef fileName2date(fName):\n '''convert filename to date'''\n name = os.path.splitext(fName)[0]\n try:\n return dt.datetime.strptime(name.split('_')[1],dateTimeFormat) \n except ValueError:\n return dt.datetime.strptime(name.split('_')[1],dateFormat) \n \ndef parseDateTime(dateTimeStr):\n return dt.datetime.strptime(dateTimeStr,dateTimeFormat)\n \ndef loadCsv(fName):\n ''' load DataFrame from csv file '''\n return pd.DataFrame.from_csv(fName)\n \n \nclass HistDataCsv(object):\n '''class for working with historic database in .csv format'''\n def __init__(self,symbol,dbDir,autoCreateDir=False):\n self.symbol = symbol\n self.dbDir = os.path.normpath(os.path.join(dbDir,symbol))\n \n if not os.path.exists(self.dbDir) and autoCreateDir:\n print('Creating data directory ', self.dbDir)\n os.mkdir(self.dbDir)\n \n self.dates = [] \n \n @property \n def files(self):\n \"\"\" a list of csv files present \"\"\"\n files = os.listdir(self.dbDir)\n files.sort()\n \n return files\n \n def loadAll(self):\n \"\"\" load all files from the database and return as DataFrame \"\"\"\n \n files = self.files\n \n data = [self._loadCsv(f) for f in files]\n data = pd.concat(data) \n \n data = data.groupby(data.index).first() # remove duplicate rows\n \n return data\n \n def to_hdf(self,fName):\n \"\"\" \n convert data to hdf5 file. If no fName is provided, the file is created in\n the database root directory \"\"\"\n \n df = self.loadAll()\n df.to_hdf(fName,self.symbol) \n \n @property \n def dateRange(self):\n \"\"\" get min and max values of the timestamps in database \"\"\"\n \n files = self.files\n if len(files) == 0:\n return (None, None)\n \n ts = [fileName2date(fName) for fName in files]\n \n # earliest\n t0 = self._loadCsv(files[np.argmin(ts)]).index[0]\n t1 = self._loadCsv(files[np.argmax(ts)]).index[-1] \n \n return (t0,t1)\n \n def _loadCsv(self,fName):\n \"\"\" convenience function, prepending right path \"\"\"\n return pd.DataFrame.from_csv(os.path.join(self.dbDir,fName))\n \n def saveData(self, df,lowerCaseColumns=True):\n ''' add data to database'''\n \n if lowerCaseColumns: # this should provide consistency to column names. All lowercase\n df.columns = [ c.lower() for c in df.columns]\n \n s = self.symbol+'_'+df.index[-1].strftime(dateTimeFormat)+'.csv' # file name\n dest = os.path.join(self.dbDir,s) # full path destination\n print('Saving data to: ', dest)\n df.to_csv(dest)\n \n \n \n def __repr__(self):\n rng = self.dateRange\n return '%s dataset %i files\\nrange: %s ... %s' % (self.symbol, len(self.files), rng[0], rng[1] )\n \nclass HistDatabase(object):\n ''' class working with multiple symbols at once '''\n def __init__(self, dataDir):\n \n # get symbols from directory names\n symbols = []\n for l in os.listdir(dataDir):\n if os.path.isdir(os.path.join(dataDir,l)):\n symbols.append(l)\n \n #build dataset\n self.csv = {} # dict of HistDataCsv halndlers\n\n for symbol in symbols:\n self.csv[symbol] = HistDataCsv(symbol,dataDir)\n \n \n def loadDates(self,dates=None):\n ''' \n get data for all symbols as wide panel\n provide a dates list. If no dates list is provided, common dates are used.\n '''\n if dates is None: dates=self.commonDates\n \n tmp = {}\n \n \n for k,v in self.csv.items():\n tmp[k] = v.loadDates(dates)\n \n return pd.WidePanel(tmp)\n \n def toHDF(self,dataFile,dates=None):\n ''' write wide panel data to a hdfstore file '''\n \n if dates is None: dates=self.commonDates\n store = pd.HDFStore(dataFile) \n wp = self.loadDates(dates)\n \n store['data'] = wp\n store.close()\n \n \n \n \n \n @property \n def commonDates(self):\n ''' return dates common for all symbols '''\n t = [v.dates for v in self.csv.values()] # get all dates in a list\n \n d = list(set(t[0]).intersection(*t[1:]))\n return sorted(d)\n \n \n def __repr__(self):\n s = '-----Hist CSV Database-----\\n'\n for k,v in self.csv.items():\n s+= (str(v)+'\\n')\n return s\n \n \n#--------------------\n\nif __name__=='__main__':\n\n dbDir =os.path.normpath('D:/data/30sec')\n vxx = HistDataCsv('VXX',dbDir)\n spy = HistDataCsv('SPY',dbDir)\n# \n date = dt.date(2012,8,31)\n print(date)\n# \n pair = pd.DataFrame({'SPY':spy.loadDate(date)['close'],'VXX':vxx.loadDate(date)['close']})\n \n print(pair.tail())" ]
[ [ "pandas.WidePanel", "pandas.DataFrame.from_csv", "numpy.argmin", "numpy.argmax", "pandas.concat", "pandas.HDFStore" ] ]
urinieto/meshuggarn
[ "51f115662ad863aa72c9feb86e9c6d15da52a6a6" ]
[ "src/generate.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nGenerates the audio file with the trained RNN\n\"\"\"\nimport time\nimport logging\nimport numpy as np\nimport librosa\nfrom tqdm import tqdm\nfrom scipy.spatial.distance import cdist\n\nfrom keras.models import model_from_json\n\n\nNORM_CQT_FRAMES = \"../data/norm_cqt_frames.npy\"\nAUDIO_FRAMES_NPY = \"../data/audio_frames.npy\"\nN_FRAMES = 100 # Must be the same as in train.py\nN_GEN_FRAMES = 1000 # Number of frames to generate\nSR = 22050\nHOP_LEN = 4096\nFRAME_LEN = HOP_LEN * 2 # 50% overlap\n\n\ndef get_closest_frames(in_frames, all_cqt_frames):\n out_frames = []\n for in_frame in tqdm(in_frames):\n dists = cdist(in_frame.reshape((1, -1)), all_cqt_frames).flatten()\n out_frames.append(np.argsort(dists)[0])\n return np.asarray(out_frames, dtype=np.int)\n\n\ndef process():\n logging.info(\"Loading model from disk\")\n with open('model.json', 'r') as f:\n model_json = f.read()\n model = model_from_json(model_json)\n model.load_weights(\"model.h5\")\n\n # Load normalized frames\n frames = np.load(NORM_CQT_FRAMES)\n\n # Generate frames\n logging.info(\"Predicting Frames\")\n # TODO: Randomize\n start_i = 5000\n seed_frames = frames[start_i:start_i + N_FRAMES]\n pred_frames = [frame for frame in seed_frames]\n for i in tqdm(range(N_GEN_FRAMES)):\n preds = model.predict(seed_frames.reshape((1, N_FRAMES, frames.shape[1])),\n verbose=0)\n\n # Update the seed_frames\n seed_frames = seed_frames[1:]\n seed_frames = np.concatenate((seed_frames, preds), axis=0)\n\n # Store the predicted frame\n pred_frames.append(preds.flatten())\n pred_frames = np.asarray(pred_frames)\n\n # Get closest frames to map to audio\n logging.info(\"Mapping predicted frames to GT frames\")\n audio_idxs = get_closest_frames(pred_frames, frames)\n print(audio_idxs)\n\n # Generate audio\n logging.info(\"Generating audio\")\n all_audio_frames = np.load(AUDIO_FRAMES_NPY)\n audio = np.zeros(len(audio_idxs) * HOP_LEN + FRAME_LEN)\n for i, audio_idx in enumerate(audio_idxs):\n audio_frame = np.hanning(FRAME_LEN) * all_audio_frames[audio_idx]\n audio[i * HOP_LEN: i * HOP_LEN + FRAME_LEN] += audio_frame\n librosa.output.write_wav(\"out.wav\", audio, sr=SR)\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n # Setup the logger\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',\n level=logging.INFO)\n process()\n logging.info(\"Done! Took %.2f seconds.\" % (time.time() - start_time))\n" ]
[ [ "numpy.concatenate", "numpy.asarray", "numpy.hanning", "numpy.load", "numpy.argsort" ] ]
cronin4392/trident
[ "1c1eb01bcde861496ce83e265ff071fc9bcb9db2" ]
[ "trident/layers/tensorflow_blocks.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport inspect\nimport itertools\nimport math\nfrom functools import reduce\nfrom functools import wraps\nfrom itertools import repeat\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import image_ops\n\nfrom trident.backend.common import *\nfrom trident.layers.tensorflow_activations import get_activation, Identity\nfrom trident.layers.tensorflow_layers import *\nfrom trident.layers.tensorflow_normalizations import get_normalization,SpectralNorm\nfrom trident.layers.tensorflow_pooling import GlobalAvgPool2d\nfrom trident.backend.tensorflow_backend import *\nfrom trident.backend.tensorflow_ops import *\nfrom trident.layers.tensorflow_layers import *\n\n_tf_data_format = 'channels_last'\n\n__all__ = ['Conv1d_Block', 'Conv2d_Block', 'TransConv2d_Block', 'DepthwiseConv2d_Block', 'SeparableConv2d_Block', 'ShortCut2d', 'ConcateBlock', 'SqueezeExcite', 'For']\n\n_session = get_session()\n\n\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, collections.Iterable):\n return x\n return tuple(repeat(x, n))\n\n return parse\n\n\n_single = _ntuple(1)\n_pair = _ntuple(2)\n_triple = _ntuple(3)\n_quadruple = _ntuple(4)\n\n\nclass FullConnect_Block(Layer):\n def __init__(self, num_filters=None,\n activation=None, normalization=None, use_spectral=False, use_bias=False,\n add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, depth_multiplier=None,\n keep_output=False, sequence_rank='cna', **kwargs):\n super(FullConnect_Block, self).__init__(name=name, keep_output=keep_output)\n\n if sequence_rank in ['fna', 'naf', 'afn']:\n self.sequence_rank = sequence_rank\n else:\n self.sequence_rank = 'fna'\n\n self.num_filters = num_filters\n\n\n\n self.use_bias = use_bias\n\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n self.droupout = None\n self.depth_multiplier = depth_multiplier\n self.keep_output = keep_output\n\n norm = get_normalization(normalization)\n fc = Dense(num_filters=self.num_filters, activation=None, use_bias=self.use_bias, depth_multiplier=self.depth_multiplier).to(self.device)\n self.use_spectral = use_spectral\n if isinstance(norm, SpectralNorm):\n self.use_spectral = True\n norm = None\n fc = SpectralNorm(module=fc)\n if (hasattr(self, 'sequence_rank') and self.sequence_rank == 'fna') or not hasattr(self, 'sequence_rank'):\n self.add_module('fc', fc)\n self.add_module('norm', norm)\n self.add_module('activation', get_activation(activation,only_layer=True))\n\n elif self.sequence_rank == 'naf':\n self.add_module('norm', norm)\n self.add_module('activation', get_activation(activation,only_layer=True))\n self.add_module('fc', fc)\n\n elif self.sequence_rank == 'afn':\n self.add_module('activation', get_activation(activation,only_layer=True))\n self.add_module('fc', fc)\n self.add_module('norm', norm)\n self._name = name\n\n def build(self, input_shape: TensorShape):\n if not self._built:\n # if self.norm is not None:\n # self.norm.input_shape = self.conv.output_shape\n self.to(self.device)\n self._built = True\n\n def forward(self, x, **kwargs):\n\n if not hasattr(self, 'sequence_rank'):\n setattr(self, 'sequence_rank', 'fna')\n if self.add_noise == True and self.training == True:\n noise = self.noise_intensity * torch.randn_like(x, dtype=x.dtype)\n x = x + noise\n for child in list(self.children())[:3]:\n if child is not None:\n x = child(x)\n if self.training and self.dropout_rate > 0:\n x = F.dropout(x, p=self.dropout_rate, training=self.training)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, {num_filters}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], nn.Module):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass Conv1d_Block(Layer):\n def __init__(self, kernel_size=3, num_filters=None, strides=1, auto_pad=True, padding_mode='zero', activation=None,\n normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1, add_noise=False, noise_intensity=0.005,\n dropout_rate=0, name=None, depth_multiplier=None, keep_output=False, sequence_rank='cna', **kwargs):\n super(Conv1d_Block, self).__init__(name=name, keep_output=keep_output)\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n else:\n self.sequence_rank = 'cna'\n self.kernel_size = kernel_size\n self.num_filters = num_filters\n self.strides = strides\n self.auto_pad = auto_pad\n self.padding = 0\n self.padding_mode = padding_mode\n\n # if self.auto_pad == False:\n # self.padding = 0\n # else:\n # self.padding= tuple([n-2 for n in list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else\n # self.kernel_size-2\n\n self.use_bias = use_bias\n self.dilation = dilation\n self.groups = groups\n self.depth_multiplier = depth_multiplier\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n\n norm = get_normalization(normalization)\n conv = Conv1d(kernel_size=self.kernel_size, num_filters=self.num_filters, strides=self.strides,\n auto_pad=self.auto_pad, padding_mode=self.padding_mode, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name,\n depth_multiplier=self.depth_multiplier)\n self.use_spectral = use_spectral\n # if isinstance(norm, SpectralNorm):\n # self.use_spectral = True\n # norm = None\n # conv= nn.utils.spectral_norm(conv)\n if (hasattr(self, 'sequence_rank') and self.sequence_rank == 'cna') or not hasattr(self, 'sequence_rank'):\n self.conv = conv\n self.norm = norm\n self.activation = get_activation(activation)\n elif self.sequence_rank == 'nac':\n self.norm = norm\n self.activation = get_activation(activation)\n self.conv = conv\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n # if self.use_spectral:\n # self.conv = nn.utils.spectral_norm(self.conv)\n # if self.norm is SpectralNorm:\n # self.norm=None\n self._built = True\n\n\n def forward(self, x,**kwargs):\n if hasattr(self, 'sequence_rank'):\n setattr(self, 'sequence_rank', 'cna')\n if self.add_noise == True and self.training == True:\n noise = self.noise_intensity * random_normal_like(x, dtype=x.dtype)\n x = x + noise\n if self.sequence_rank == 'cna':\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n elif self.sequence_rank == 'nac':\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n x = self.conv(x)\n if self.dropout_rate > 0 and self.training:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, {num_filters}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], tf.Module):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass Conv2d_Block(Layer):\n def __init__(self, kernel_size=(3, 3), num_filters=None, strides=1, auto_pad=True, padding_mode='zero',\n activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1,\n add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, depth_multiplier=None, keep_output=False, sequence_rank='cna', **kwargs):\n super(Conv2d_Block, self).__init__(name=name)\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n self.kernel_size = kernel_size\n self.num_filters = num_filters\n self.strides = strides\n self.keep_output = keep_output\n padding = kwargs.get('padding', None)\n if 'padding' in kwargs:\n kwargs.pop('padding')\n if isinstance(padding, str) and auto_pad == False:\n auto_pad = (padding.lower() == 'same')\n elif isinstance(padding, int) and padding > 0:\n padding = _pair(padding)\n auto_pad = False\n elif isinstance(padding, tuple):\n auto_pad = False\n pass\n self.auto_pad = auto_pad\n self.padding = padding\n\n self.use_bias = use_bias\n self.dilation = dilation\n self.groups = groups\n\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n self.depth_multiplier = depth_multiplier\n self.use_spectral = use_spectral\n if not self.use_spectral:\n self.conv = Conv2d(kernel_size=self.kernel_size, num_filters=self.num_filters, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups,\n depth_multiplier=self.depth_multiplier, padding=self.padding, **kwargs)\n self.norm = get_normalization(normalization)\n self.activation = get_activation(activation)\n self.droupout = None\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n self.conv.input_shape = input_shape\n # if self.use_spectral:\n # conv=self._modules['conv']\n # self._modules['conv']=nn.utils.spectral_norm(conv)\n # self.norm=None\n self._built = True\n\n\n def forward(self, x, **kwargs):\n if self.training and self.add_noise == True:\n noise = self.noise_intensity * tf.random.normal(shape=x.shape, mean=0, stddev=1,dtype=x.dtype)\n x += noise\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n if self.training and self.dropout_rate > 0:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, {num_filters}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass TransConv2d_Block(Layer):\n def __init__(self, kernel_size=(3, 3), num_filters=None, strides=1, auto_pad=True, padding_mode='zero',\n activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1,\n add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, depth_multiplier=None, sequence_rank='cna', **kwargs):\n super(TransConv2d_Block, self).__init__(name=name)\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n self.kernel_size = kernel_size\n self.num_filters = num_filters\n self.strides = strides\n self.auto_pad = auto_pad\n\n self.use_bias = use_bias\n self.dilation = dilation\n self.groups = groups\n\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n self.depth_multiplier = depth_multiplier\n self.use_spectral = use_spectral\n if not self.use_spectral:\n self.conv = TransConv2d(kernel_size=self.kernel_size, num_filters=self.num_filters, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name,\n depth_multiplier=self.depth_multiplier)\n self.norm = get_normalization(normalization)\n self.activation = get_activation(activation)\n self.droupout = None\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n conv = TransConv2d(kernel_size=self.kernel_size, num_filters=self.num_filters, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name,\n depth_multiplier=self.depth_multiplier)\n # conv.input_shape = input_shape\n\n if self.use_spectral:\n # self.conv = nn.utils.spectral_norm(conv)\n self.norm = None\n else:\n self.conv = conv\n self._built = True\n\n\n def forward(self, x, **kwargs):\n if self.training and self.add_noise == True:\n noise = self.noise_intensity * tf.random.normal(shape=x.shape, mean=0, stddev=1,dtype=x.dtype)\n x += noise\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n if self.training and self.dropout_rate > 0:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, {num_filters}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass TransConv3d_Block(Layer):\n def __init__(self, kernel_size=(3, 3, 3), num_filters=32, strides=1, input_shape=None, auto_pad=True,\n activation='leaky_relu', normalization=None, use_bias=False, dilation=1, groups=1, add_noise=False,\n noise_intensity=0.001, dropout_rate=0, name=None, sequence_rank='cna', **kwargs):\n super(TransConv3d_Block, self).__init__(name=name)\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n if add_noise:\n noise = tf.keras.layers.GaussianNoise(noise_intensity)\n self.add(noise)\n self._conv = TransConv3d(kernel_size=kernel_size, num_filters=num_filters, strides=strides,\n input_shape=input_shape, auto_pad=auto_pad, activation=None, use_bias=use_bias,\n dilation=dilation, groups=groups)\n self.add(self._conv)\n\n self.norm = get_normalization(normalization)\n if self.norm is not None:\n self.add(self.norm)\n\n self.activation = get_activation(snake2camel(activation))\n if self.activation is not None:\n self.add(self.activation)\n if dropout_rate > 0:\n self.drop = Dropout(dropout_rate)\n self.add(self.drop)\n\n\n def forward(self, x, **kwargs):\n if self.training and self.add_noise == True:\n noise = self.noise_intensity * tf.random.normal(shape=x.shape, mean=0, stddev=1,dtype=x.dtype)\n x += noise\n x = self._conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n if self.training and self.dropout_rate > 0:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, num_filters={num_filters}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass DepthwiseConv2d_Block1(Layer):\n def __init__(self, kernel_size=(3, 3), depth_multiplier=None, strides=1, auto_pad=True, padding_mode='zero', activation=None, normalization=None, use_spectral=False,\n use_bias=False, dilation=1, groups=1, add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, sequence_rank='cna', **kwargs):\n super(DepthwiseConv2d_Block, self).__init__()\n super().__init__(name, **kwargs)\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n self.kernel_size = kernel_size\n self.strides = strides\n self.auto_pad = auto_pad\n\n self.use_bias = use_bias\n self.dilation = dilation\n self.groups = groups\n\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n self.depth_multiplier = depth_multiplier\n self.use_spectral = use_spectral\n if not self.use_spectral:\n self.conv = DepthwiseConv2d(kernel_size=self.kernel_size, depth_multiplier=self.depth_multiplier, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name)\n self.norm = get_normalization(normalization)\n self.conv = None\n self.activation = get_activation(activation)\n self.droupout = None\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n conv = DepthwiseConv2d(kernel_size=self.kernel_size, depth_multiplier=self.depth_multiplier, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name)\n # conv.input_shape = input_shape\n\n if self.use_spectral:\n # self.conv = nn.utils.spectral_norm(conv)\n self.norm = None\n else:\n self.conv = conv\n self._built = True\n\n\n def forward(self, x, **kwargs):\n if self.training and self.add_noise == True:\n noise = self.noise_intensity * tf.random.normal(shape=x.shape, mean=0, stddev=1,dtype=x.dtype)\n x += noise\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n if self.training and self.dropout_rate > 0:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, depth_multiplier={depth_multiplier}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass DepthwiseConv2d_Block(Layer):\n def __init__(self, kernel_size=(3, 3), depth_multiplier=1, strides=1, auto_pad=True, padding=None, padding_mode='zero',\n activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1,\n add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, keep_output=False, sequence_rank='cna', **kwargs):\n super(DepthwiseConv2d_Block, self).__init__(name=name)\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n self.kernel_size = kernel_size\n self.depth_multiplier = depth_multiplier\n\n self.strides = strides\n self.auto_pad = auto_pad\n self.padding = 0\n self.padding_mode = padding_mode\n # if self.auto_pad == False:\n # self.padding = 0\n # else:\n # self.padding= tuple([n-2 for n in list(self.kernel_size)]) if hasattr(self.kernel_size,'__len__') else\n # self.kernel_size-2\n\n self.use_bias = use_bias\n self.dilation = dilation\n\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n self.conv = None\n self.norm = get_normalization(normalization)\n self.use_spectral = use_spectral\n self.activation = get_activation(activation)\n self.droupout = None\n self.keep_output = keep_output\n self._name = name\n\n def build(self, input_shape:TensorShape):\n if self._built == False or self.conv is None:\n conv = DepthwiseConv2d(kernel_size=self.kernel_size, depth_multiplier=self.depth_multiplier,\n strides=self.strides, auto_pad=self.auto_pad, padding=self.padding, padding_mode=self.padding_mode,\n activation=None, use_bias=self.use_bias, dilation=self.dilation, name=self._name)\n conv.input_shape = input_shape\n if self.use_spectral:\n self.conv = spectral_norm(conv)\n else:\n self.conv = conv\n\n self._built = True\n\n\n def forward(self, x, **kwargs):\n if self.training and self.add_noise == True:\n noise = self.noise_intensity * tf.random.normal(shape=x.shape, mean=0, stddev=1,dtype=x.dtype)\n x += noise\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n if self.training and self.dropout_rate > 0:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, depth_multiplier={depth_multiplier}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\nclass SeparableConv2d_Block(Layer):\n def __init__(self, kernel_size=(3, 3), depth_multiplier=None, strides=1, auto_pad=True, padding_mode='zero',\n activation=None, normalization=None, use_spectral=False, use_bias=False, dilation=1, groups=1,\n add_noise=False, noise_intensity=0.005, dropout_rate=0, name=None, sequence_rank='cna', **kwargs):\n super(SeparableConv2d_Block, self).__init__()\n if sequence_rank in ['cna', 'nac']:\n self.sequence_rank = sequence_rank\n self.kernel_size = kernel_size\n self.strides = strides\n self.auto_pad = auto_pad\n\n self.use_bias = use_bias\n self.dilation = dilation\n self.groups = groups\n\n self.add_noise = add_noise\n self.noise_intensity = noise_intensity\n self.dropout_rate = dropout_rate\n self.depth_multiplier = depth_multiplier\n self.use_spectral = use_spectral\n if not self.use_spectral:\n self.conv = SeparableConv2d(kernel_size=self.kernel_size, depth_multiplier=self.depth_multiplier, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name)\n self.norm = get_normalization(normalization)\n self.activation = get_activation(activation)\n self.droupout = None\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n conv = SeparableConv2d(kernel_size=self.kernel_size, depth_multiplier=self.depth_multiplier, strides=self.strides,\n auto_pad=self.auto_pad, activation=None,\n use_bias=self.use_bias, dilation=self.dilation, groups=self.groups, name=self._name\n )\n # conv.input_shape = input_shape\n\n # if self.use_spectral:\n # self.conv = spectral_norm(self.conv)\n # if self.norm is SpectralNorm:\n # self.norm=None\n self._built = True\n\n\n def forward(self, x, **kwargs):\n if self.training and self.add_noise == True:\n noise = self.noise_intensity * tf.random.normal(shape=x.shape, mean=0, stddev=1,dtype=x.dtype)\n x += noise\n x = self.conv(x)\n if self.norm is not None:\n x = self.norm(x)\n if self.activation is not None:\n x = self.activation(x)\n if self.training and self.dropout_rate > 0:\n x = tf.nn.dropout(x, rate=self.dropout_rate)\n return x\n\n def extra_repr(self):\n s = 'kernel_size={kernel_size}, depth_multiplier={depth_multiplier}, strides={strides}'\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n return s.format(**self.__dict__)\n\n\ndef For(what_range, constructor):\n \"\"\"\n For(what_range, constructor, name='')\n Layer factory function to create a composite through a pattern similar to Python's `for` statement.\n This layer factory loops over the given range and passes each value to the constructor function.\n It is equivalent to\n ``Sequential([constructor(i) for i in what_range])``.\n It is acceptable that ``constructor`` takes no argument.\n\n Args:\n what_range (range): a Python range to loop over\n constructor (Python function/lambda with 1 or 0 arguments): lambda that constructs a layer\n Returns:\n cntk.ops.functions.Function:\n A function that accepts one argument and applies the layers as constructed by ``constructor`` one after another.\n\n Examples:\n >>> # stack of 3 Dense relu layers\n >>> model = For(range(3), lambda: Dense(200, activation=relu))\n >>> # version of the above that has no activation for the last layer\n >>> model = For(range(3), lambda i: Dense(200, name='dense_{0}'.format(i+1)))\n >>> print(model[2].name)\n dense_3\n \"\"\"\n # Python 2.7 support requires us to use getargspec() instead of inspect\n takes_arg = len(inspect.getfullargspec(constructor).args) > 0\n\n # For Python 3, check if it is a python function/lambda\n if not callable(constructor):\n raise ValueError(\"constructor must be a Python function/lambda\")\n\n # helper to call the layer constructor\n def call(i):\n if takes_arg:\n return constructor(i) # takes an arg: pass it\n else:\n return constructor() # takes no arg: call without, that's fine too\n\n layers = [call(i) for i in what_range]\n return Sequential(layers)\n\n\nclass Classifer1d(Sequential):\n def __init__(self, num_classes=10, is_multilable=False, classifier_type=ClassfierType.dense, name=None, **kwargs):\n super(Classifer1d, self).__init__(name=name)\n self.classifier_type = classifier_type\n self.num_classes = num_classes\n self.is_multilable = is_multilable\n if classifier_type == ClassfierType.dense:\n self.add(Flatten)\n self.add(Dense(num_classes, use_bias=False, activation='sigmoid'))\n if not is_multilable:\n self.add(SoftMax)\n elif classifier_type == ClassfierType.global_avgpool:\n self.add(Conv2d((1, 1), num_classes, strides=1, auto_pad=True, activation=None))\n self.add(GlobalAvgPool2d)\n if not is_multilable:\n self.add(SoftMax)\n\n\nclass ShortCut2d(Layer):\n def __init__(self, *args, axis=-1, branch_from=None, activation=None, mode='add', name=None, keep_output=False, **kwargs):\n \"\"\"\n Args\n layer_defs : object\n \"\"\"\n super(ShortCut2d, self).__init__(name=name)\n self.activation = get_activation(activation)\n self.has_identity = False\n self.mode = mode if isinstance(mode, str) else mode\n self.axis = axis\n self.branch_from = branch_from\n self.branch_from_uuid = None\n self.keep_output = keep_output\n\n for i in range(len(args)):\n arg = args[i]\n if isinstance(arg, (Layer, tf.Tensor, list, dict)):\n if isinstance(arg, list):\n arg = Sequential(*arg)\n elif isinstance(arg, OrderedDict) and len(args) == 1:\n for k, v in arg.items():\n if isinstance(v, Identity):\n self.has_identity = True\n self.add_module('Identity', v)\n else:\n self.add_module(k, v)\n elif isinstance(arg, dict) and len(args) == 1:\n keys = sorted(list(arg.keys()))\n for k in keys:\n v = arg[k]\n if isinstance(v, Identity):\n self.has_identity = True\n self.add_module('Identity', v)\n else:\n self.add_module(str(k), v)\n elif isinstance(arg, (dict, OrderedDict)) and len(args) > 1:\n raise ValueError('more than one dict argument is not support.')\n elif isinstance(arg, Identity):\n self.has_identity = True\n self.add_module('Identity', arg)\n elif isinstance(arg, Layer):\n if len(arg.name) > 0 and arg.name != arg._name:\n self.add_module(arg.name, arg)\n else:\n self.add_module('branch{0}'.format(i + 1), arg)\n else:\n raise ValueError('{0} is not support.'.format(arg.__class__.__name))\n if len(self._modules) == 1 and self.has_identity == False and self.branch_from is None:\n self.has_identity = True\n self.add_module('Identity', Identity())\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n if self.branch_from is not None:\n for k, v in self.nodes.item_list:\n if v.name == self.branch_from:\n v.keep_output = True\n self.branch_from_uuid = k\n print('get {0} output info...'.format(self.branch_from))\n break\n if self.branch_from_uuid is None:\n raise ValueError('Cannot find any layer named {0}'.format(self.branch_from))\n self._built = True\n\n def forward(self, x, **kwargs):\n current = None\n concate_list = []\n\n for k, v in self._modules.items():\n if k!='activation':\n new_item = v(x) if not isinstance(v, Identity) else x\n if current is None:\n current = new_item\n concate_list.append(current)\n else:\n if self.mode == 'add':\n current = current + new_item\n elif self.mode == 'dot':\n current = current * new_item\n elif self.mode == 'concate':\n concate_list.append(new_item)\n else:\n raise ValueError('Not valid shortcut mode')\n\n if hasattr(self, 'branch_from_uuid') and self.branch_from_uuid is not None and self.branch_from_uuid in self.nodes:\n new_item = self.nodes.get(self.branch_from_uuid)._output_tensor\n if self.mode == 'add':\n current = current + new_item\n elif self.mode == 'dot':\n current = current * new_item\n elif self.mode == 'concate':\n concate_list.append(new_item)\n\n if self.mode == 'concate':\n x = concate(concate_list, axis=self.axis)\n else:\n x = current\n if self.activation is not None:\n x = self.activation(x)\n return x\n\n def extra_repr(self):\n s = ('mode={mode}, keep_output={keep_output},axis={axis}')\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n\n if hasattr(self, 'branch_from') and self.branch_from is not None:\n s += ', branch_from={0}, branch_from_uuid={1}'.format(self.branch_from, self.branch_from_uuid)\n return s.format(**self.__dict__)\n\n\n\nclass ShortCut(Layer):\n \"\"\"ShortCut2d Layer \"\"\"\n\n def __init__(self, *args, axis=-1, branch_from=None, activation=None, mode='add', name=None, keep_output=False,\n **kwargs):\n \"\"\"\n\n Args:\n *args ():\n axis ():\n branch_from ():\n activation ():\n mode (str): 'add' 'dot' 'concate'\n name (str):\n keep_output (bool):\n **kwargs ():\n\n \"\"\"\n super(ShortCut, self).__init__(name=name, keep_output=keep_output)\n valid_mode = ['add', 'subtract', 'concate', 'dot','maxout']\n if mode in valid_mode:\n self.mode = mode\n else:\n raise ValueError('{0} is not valid mode. please use one of {1}'.format(mode, valid_mode))\n self.activation = get_activation(activation)\n self.has_identity = False\n\n self.axis = axis\n self.branch_from = branch_from\n self.branch_from_uuid = None\n\n self.keep_output = keep_output\n\n for i in range(len(args)):\n arg = args[i]\n if isinstance(arg, (Layer, Tensor, list, dict)):\n if isinstance(arg, list):\n arg = Sequential(*arg)\n elif isinstance(arg, OrderedDict) and len(args) == 1:\n for k, v in arg.items():\n if isinstance(v, Identity):\n self.has_identity = True\n self.add_module('Identity', v)\n else:\n self.add_module(k, v)\n elif isinstance(arg, dict) and len(args) == 1:\n keys = sorted(list(arg.keys()))\n for k in keys:\n v = arg[k]\n if isinstance(v, Identity):\n self.has_identity = True\n self.add_module('Identity', v)\n else:\n self.add_module(str(k), v)\n elif isinstance(arg, (dict, OrderedDict)) and len(args) > 1:\n raise ValueError('more than one dict argument is not support.')\n\n elif isinstance(arg, Identity):\n self.has_identity = True\n self.add_module('Identity', arg)\n elif isinstance(arg, Layer):\n if len(arg.name) > 0 and arg.name != arg.default_name:\n self.add_module(arg.name, arg)\n else:\n self.add_module('branch{0}'.format(i + 1), arg)\n else:\n raise ValueError('{0} is not support.'.format(arg.__class__.__name))\n if len(self._modules) == 1 and self.has_identity == False and self.branch_from is None and mode != 'concate':\n self.has_identity = True\n self.add_module('Identity', Identity())\n self.to(self.device)\n\n def build(self, input_shape: TensorShape):\n if self._built == False:\n if self.branch_from is not None:\n for k, v in self.nodes.item_list:\n if v.name == self.branch_from:\n v.keep_output = True\n self.branch_from_uuid = k\n self.register_buffer('branch_from_tensor', v._output_tensor)\n print('get {0} output info...'.format(self.branch_from))\n break\n if self.branch_from_uuid is None:\n raise ValueError('Cannot find any layer named {0}'.format(self.branch_from))\n self._built = True\n\n def forward(self, x, **kwargs):\n\n current = None\n concate_list = []\n\n for k, v in self._modules.items():\n if k != 'activation':\n new_item = v(x) # if not isinstance(v, Identity) else x\n if current is None:\n current = new_item\n concate_list.append(current)\n else:\n if self.mode == 'add':\n current = current + new_item\n elif self.mode == 'subtract':\n current = current -new_item\n elif self.mode == 'dot':\n current = current * new_item\n elif self.mode == 'concate':\n concate_list.append(new_item)\n else:\n raise ValueError('Not valid shortcut mode')\n\n if hasattr(self,\n 'branch_from_uuid') and self.branch_from_uuid is not None and self.branch_from_uuid in self.nodes:\n self.branch_from_tensor = self.nodes.get(self.branch_from_uuid)._output_tensor\n\n if self.mode == 'add':\n current = current + self.branch_from_tensor\n elif self.mode == 'subtract':\n current = current - self.branch_from_tensor\n elif self.mode == 'dot':\n current = current * self.branch_from_tensor\n elif self.mode == 'concate':\n concate_list.append(self.branch_from_tensor)\n\n if self.mode == 'concate':\n x = concate(concate_list, axis=self.axis)\n else:\n x = current\n if self.activation is not None:\n x = self.activation(x)\n return x\n\n def extra_repr(self):\n s = ('mode={mode}, keep_output={keep_output},axis={axis}')\n if 'activation' in self.__dict__ and self.__dict__['activation'] is not None:\n if inspect.isfunction(self.__dict__['activation']):\n s += ', activation={0}'.format(self.__dict__['activation'].__name__)\n elif isinstance(self.__dict__['activation'], Layer):\n s += ', activation={0}'.format(self.__dict__['activation']).__repr__()\n if hasattr(self, 'branch_from') and self.branch_from is not None:\n s += (', branch_from={branch_from}, branch_from_uuid={branch_from_uuid}')\n return s.format(**self.__dict__)\n\n\nclass ConcateBlock(Layer):\n def __init__(self, *args, axis=1, activation='relu'):\n \"\"\"\n\n Parameters\n ----------\n layer_defs : object\n \"\"\"\n super(ConcateBlock, self).__init__()\n self.activation = get_activation(activation)\n self.axis = axis\n self.has_identity = False\n for i in range(len(args)):\n arg = args[i]\n if isinstance(arg, (Layer, list, dict)):\n if isinstance(arg, list):\n arg = Sequential(*arg)\n elif isinstance(arg, dict) and len(args) == 1:\n for k, v in arg.items():\n if isinstance(v, Identity):\n self.has_identity = True\n self.add_module('Identity', v)\n else:\n self.add_module(k, v)\n elif isinstance(arg, dict) and len(args) > 1:\n raise ValueError('more than one dict argument is not support.')\n elif isinstance(arg, Identity):\n self.has_identity = True\n self.add_module('Identity', arg)\n else:\n self.add_module('branch{0}'.format(i + 1), arg)\n if len(self._modules) == 1 and self.has_identity == False:\n self.add_module('Identity', Identity())\n\n def forward(self, x, **kwargs):\n outs = []\n if 'Identity' in self._modules:\n outs.append(x)\n for k, v in self._modules.items():\n if k != 'Identity':\n out = v(x)\n if len(outs) == 0 or int_shape(out)[1:-1] == int_shape(outs[0])[1:-1]:\n outs.append(out)\n else:\n raise ValueError(\n 'All branches in shortcut should have the same shape {0} {1}'.format(int_shape(out), int_shape(x)))\n outs = tf.concat(outs, axis=self.axis)\n if self.activation is not None:\n outs = self.activation(outs)\n return outs\n\n\nclass SqueezeExcite(Layer):\n def __init__(self, se_filters, num_filters, is_gather_excite=False, use_bias=False, name=''):\n super(SqueezeExcite, self).__init__(name=name)\n\n self.se_filters = se_filters\n self.num_filters = num_filters\n self.squeeze = None\n self.excite = None\n self.is_gather_excite = is_gather_excite\n self.activation = get_activation('swish')\n self.pool = GlobalAvgPool2d(keepdim=True)\n self.use_bias = use_bias\n\n def build(self, input_shape:TensorShape):\n if self._built == False:\n self.squeeze = Conv2d((1, 1), self.se_filters, strides=1, auto_pad=False, activation=None, use_bias=self.use_bias, name=self.name + '_squeeze')\n self.excite = Conv2d((1, 1), self.num_filters, strides=1, auto_pad=False, activation=None, use_bias=self.use_bias, name=self.name + '_excite')\n self._built = True\n\n def forward(self, x, **kwargs):\n s = self.pool(x)\n s = self.activation(self.squeeze(s))\n s = tf.sigmoid(self.excite(s))\n\n if self.is_gather_excite:\n s = image_ops.resize_images_v2(s, x.shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)\n x = s * x\n return x\n\n\n" ]
[ [ "tensorflow.concat", "tensorflow.random.normal", "tensorflow.keras.layers.GaussianNoise", "tensorflow.python.ops.image_ops.resize_images_v2", "tensorflow.nn.dropout" ] ]
rohitmusti/SQuAD-Context-Merging
[ "d055a1565b87399b1d611385097495431f5e250a" ]
[ "exp-x/exp3_train.py" ]
[ "\"\"\"Train a model on SQuAD.\n\nAuthor:\n Rohit Musti ([email protected])\n Chris Chute ([email protected])\n\"\"\"\n\nimport numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as sched\nimport torch.utils.data as torchdata\nimport util\nimport config\nimport sys\n\nfrom collections import OrderedDict\nfrom json import dumps\nfrom models import BiDAF\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport ujson as json\nfrom util import collate_fn, SQuAD3\n\n\ndef main(data, flags):\n # Set up logging and devices\n log_dir = data.logging_dir\n log = util.get_logger(log_dir, \"toy\")\n tbx = SummaryWriter(data.logging_dir)\n device, data.gpu_ids = util.get_available_devices()\n log.info('Config: {}'.format(dumps(vars(data), indent=4, sort_keys=True)))\n data.batch_size *= max(1, len(data.gpu_ids))\n\n # Set random seed\n log.info('Using random seed {}...'.format(data.random_seed))\n random.seed(data.random_seed)\n np.random.seed(data.random_seed)\n torch.manual_seed(data.random_seed)\n torch.cuda.manual_seed_all(data.random_seed)\n\n if flags[1] == \"toy\":\n word_emb_file = data.toy_word_emb_file\n training_data = data.toy_record_file_exp3\n test_data = data.dev_record_file_exp3\n eval_file = data.toy_eval_exp3\n elif flags[1] == \"train\":\n word_emb_file = data.word_emb_file\n training_data = data.train_record_file_exp3\n test_data = data.dev_record_file_exp3\n eval_file = data.train_eval_exp3\n elif flags[1] == \"dev\":\n word_emb_file = data.word_emb_file\n training_data = data.dev_record_file_exp3\n test_data = data.toy_record_file_exp3\n eval_file = data.dev_eval_exp3\n\n # Get embeddings\n log.info('Loading embeddings...')\n word_vectors = util.torch_from_json(word_emb_file)\n\n # Get model\n log.info('Building model...')\n model = BiDAF(word_vectors=word_vectors,\n hidden_size=data.hidden_size,\n drop_prob=data.drop_prob)\n model = nn.DataParallel(model, data.gpu_ids)\n if data.load_path:\n log.info('Loading checkpoint from {}...'.format(data.load_path))\n model, step = util.load_model(model, data.load_path, data.gpu_ids)\n else:\n step = 0\n model = model.to(device)\n model.train()\n ema = util.EMA(model, data.ema_decay)\n\n # Get saver\n saver = util.CheckpointSaver(data.logging_dir,\n max_checkpoints=10,\n metric_name=data.metric_name,\n maximize_metric=data.maximize_metric,\n log=log)\n\n # Get optimizer and scheduler\n optimizer = optim.Adadelta(model.parameters(), data.learning_rate,\n weight_decay=data.learning_weight_decay)\n scheduler = sched.LambdaLR(optimizer, lambda s: 1.) # Constant LR\n\n # Get data loader\n log.info('Building dataset...')\n # np.load(data.toy_record_file_exp3)\n train_dataset = SQuAD3(training_data, use_v2=True)\n train_loader = torchdata.DataLoader(train_dataset,\n batch_size=data.batch_size,\n shuffle=True,\n num_workers=data.num_workers,\n collate_fn=collate_fn)\n\n test_dataset = SQuAD3(test_data, use_v2=True)\n test_loader = torchdata.DataLoader(test_dataset,\n batch_size=data.batch_size,\n shuffle=False,\n num_workers=data.num_workers,\n collate_fn=collate_fn)\n\n # Train\n log.info('Training...')\n steps_till_eval = data.eval_steps\n epoch = step // len(test_dataset)\n while epoch != data.num_epochs:\n epoch += 1\n log.info('Starting epoch {}...'.format(epoch))\n with torch.enable_grad(), \\\n tqdm(total=len(train_loader.dataset)) as progress_bar:\n for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in train_loader:\n # Setup for forward\n cw_idxs = cw_idxs.to(device)\n qw_idxs = qw_idxs.to(device)\n batch_size = cw_idxs.size(0)\n optimizer.zero_grad()\n\n # Forward\n log.info(\"cw_idxs length: {}\".format(str(len(cw_idxs))))\n log.info(\"qw_idxs length: {}\".format(str(len(qw_idxs))))\n log.info(\"cw_idxs size: {}\".format(str(sys.getsizeof(cw_idxs))))\n log.info(\"qw_idxs size: {}\".format(str(sys.getsizeof(qw_idxs))))\n log.info(\"cw_idxs shape: {}\".format(str(cw_idxs.shape)))\n log.info(\"qw_idxs shape: {}\".format(str(qw_idxs.shape)))\n\n log_p1, log_p2 = model(cw_idxs, qw_idxs)\n y1, y2 = y1.to(device), y2.to(device)\n loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)\n loss_val = loss.item()\n\n # Backward\n loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), data.max_grad_norm)\n optimizer.step()\n scheduler.step(step // batch_size)\n ema(model, step // batch_size)\n\n # Log info\n step += batch_size\n progress_bar.update(batch_size)\n progress_bar.set_postfix(epoch=epoch,\n NLL=loss_val)\n tbx.add_scalar('toy/NLL', loss_val, step)\n tbx.add_scalar('toy/LR',\n optimizer.param_groups[0]['lr'],\n step)\n\n steps_till_eval -= batch_size\n if steps_till_eval <= 0:\n steps_till_eval = data.eval_steps\n\n # Evaluate and save checkpoint\n log.info('Evaluating at step {}...'.format(step))\n ema.assign(model)\n results, pred_dict = evaluate(model, test_loader, device,\n eval_path=eval_file,\n max_len=sys.maxsize,\n use_squad_v2=True)\n saver.save(step, model, results[data.metric_name], device)\n ema.resume(model)\n\n # Log to console\n results_str = ', '.join('{}: {:05.2f}'.format(k, v)\n for k, v in results.items())\n log.info('Dev {}'.format(results_str))\n\n # Log to TensorBoard\n log.info('Visualizing in TensorBoard...')\n for k, v in results.items():\n tbx.add_scalar('dev/{}'.format(k), v, step)\n util.visualize(tbx,\n pred_dict=pred_dict,\n eval_path=eval_file,\n step=step,\n split='dev',\n num_visuals=data.num_visuals)\n\n\ndef evaluate(model, data_loader, device, eval_file, max_len, use_squad_v2):\n nll_meter = util.AverageMeter()\n\n model.eval()\n pred_dict = {}\n with open(eval_file, 'r') as fh:\n gold_dict = json.load(fh)\n with torch.no_grad(), \\\n tqdm(total=len(data_loader.dataset)) as progress_bar:\n for cw_idxs, cc_idxs, qw_idxs, qc_idxs, y1, y2, ids in data_loader:\n # Setup for forward\n cw_idxs = cw_idxs.to(device)\n qw_idxs = qw_idxs.to(device)\n batch_size = cw_idxs.size(0)\n\n # Forward\n log_p1, log_p2 = model(cw_idxs, qw_idxs)\n y1, y2 = y1.to(device), y2.to(device)\n loss = F.nll_loss(log_p1, y1) + F.nll_loss(log_p2, y2)\n nll_meter.update(loss.item(), batch_size)\n\n # Get F1 and EM scores\n p1, p2 = log_p1.exp(), log_p2.exp()\n starts, ends = util.discretize(p1, p2, max_len, use_squad_v2)\n\n # Log info\n progress_bar.update(batch_size)\n progress_bar.set_postfix(NLL=nll_meter.avg)\n\n preds, _ = util.convert_tokens(gold_dict,\n ids.tolist(),\n starts.tolist(),\n ends.tolist(),\n use_squad_v2)\n pred_dict.update(preds)\n\n model.train()\n\n results = util.eval_dicts(gold_dict, pred_dict, use_squad_v2)\n results_list = [('NLL', nll_meter.avg),\n ('F1', results['F1']),\n ('EM', results['EM'])]\n if use_squad_v2:\n results_list.append(('AvNA', results['AvNA']))\n results = OrderedDict(results_list)\n\n return results, pred_dict\n\n\nif __name__ == '__main__':\n main(data=config.data(), flags=sys.argv)\n" ]
[ [ "torch.nn.functional.nll_loss", "torch.cuda.manual_seed_all", "numpy.random.seed", "torch.no_grad", "torch.enable_grad", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.LambdaLR", "torch.nn.DataParallel" ] ]
shahrukh357/ga-learner-dsmp-repo
[ "84f479c5fbb111886a4e758c67fb9558c8cab374" ]
[ "-Melbourne---Housing-Again!/code.py" ]
[ "# --------------\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# path- variable storing file path\r\n\r\n#Code starts here\r\ndf = pd.read_csv(path)\r\nprint(df.columns[0:5])\r\nX=df.drop(['Price'],1)\r\ny=df['Price']\r\n\r\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)\r\ncorr=X_train.corr()\r\nprint(corr)\n\n\n# --------------\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import r2_score\r\n\r\n# Code starts here\r\nregressor = LinearRegression()\r\nregressor.fit(X_train,y_train)\r\ny_pred=regressor.predict(X_test)\r\nr2=r2_score(y_test,y_pred)\r\nprint(r2)\n\n\n# --------------\nfrom sklearn.linear_model import Lasso\r\n\r\n# Code starts here\r\nlasso = Lasso()\r\nlasso.fit(X_train,y_train)\r\nlasso_pred = lasso.predict(X_test)\r\n\r\nr2_lasso = r2_score(y_test,lasso_pred)\n\n\n# --------------\nfrom sklearn.linear_model import Ridge\r\n\r\n# Code starts here\r\n\r\n\r\nridge = Ridge()\r\nridge.fit(X_train,y_train)\r\nridge_pred = lasso.predict(X_test)\r\n\r\nr2_ridge = r2_score(y_test,ridge_pred)\r\nprint(len(ridge.coef_))\r\nprint(X_train.shape)\r\n\r\n# Code ends here\n\n\n# --------------\nfrom sklearn.model_selection import cross_val_score\r\n\r\n#Code starts here\r\nregressor = LinearRegression()\r\nscore = cross_val_score(regressor,X_train,y_train,cv=10)\r\nmean_score=np.mean(score)\r\nprint(mean_score)\n\n\n# --------------\nfrom sklearn.preprocessing import PolynomialFeatures\r\nfrom sklearn.pipeline import make_pipeline\r\n\r\n#Code starts here\r\nmodel=make_pipeline(PolynomialFeatures(2),LinearRegression())\r\nmodel.fit(X_train,y_train)\r\ny_pred = model.predict(X_test)\r\nr2_poly = r2_score(y_test,y_pred)\r\nprint(r2_poly)\n\n\n" ]
[ [ "sklearn.linear_model.Lasso", "sklearn.linear_model.LinearRegression", "sklearn.metrics.r2_score", "sklearn.preprocessing.PolynomialFeatures", "sklearn.linear_model.Ridge", "numpy.mean", "sklearn.model_selection.train_test_split", "pandas.read_csv", "sklearn.model_selection.cross_val_score" ] ]
philipperemy/tensorflow-cnn-time-series
[ "d975e822b824315dfc6fa11a5af0450e42c6b0bf" ]
[ "alexnet_data.py" ]
[ "import matplotlib\n\nmatplotlib.use('Agg')\n\nfrom random import shuffle\n\nimport errno\nimport os\nfrom glob import glob\nimport skimage.io\nimport skimage.transform\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nDATA_FOLDER = '/tmp/cnn-time-series/'\n\n\ndef load_image(path):\n try:\n img = skimage.io.imread(path).astype(float)\n # TODO http://scikit-image.org/docs/dev/api/skimage.color.html rgb2gray\n # TODO cropping.\n img = skimage.transform.resize(img, (224, 224), mode='constant')\n except:\n return None\n if img is None:\n return None\n if len(img.shape) < 2:\n return None\n if len(img.shape) == 4:\n return None\n if len(img.shape) == 2:\n img = np.tile(img[:, :, None], 3)\n if img.shape[2] == 4:\n img = img[:, :, :3]\n if img.shape[2] > 4:\n return None\n\n img /= 255.\n return img\n\n\ndef next_batch(x_y, index, batch_size):\n has_reset = False\n index *= batch_size\n updated_index = index % len(x_y)\n if updated_index + batch_size > len(x_y):\n updated_index = 0\n has_reset = True\n beg = updated_index\n end = updated_index + batch_size\n output = x_y[beg:end]\n x = np.array([e[0] for e in output])\n y = np.array([e[1] for e in output])\n return x, y, has_reset\n\n\ndef read_dataset(folder, max_num_training_images, max_num_testing_images, class_mapper):\n training_inputs = read_set(folder, 'train', max_num_training_images, class_mapper)\n testing_inputs = read_set(folder, 'test', max_num_testing_images, class_mapper)\n return training_inputs, testing_inputs\n\n\ndef read_set(folder, phase, max_num_of_images, class_mapper):\n images_folder = os.path.join(folder, phase)\n inputs = []\n list_images = glob(images_folder + '/**/*.png')\n shuffle(list_images)\n for i, image_name in enumerate(list_images):\n if len(inputs) >= max_num_of_images:\n break\n class_name = image_name.split('/')[-2]\n if i % 100 == 0:\n print(i)\n inputs.append([load_image(image_name), class_mapper[class_name]]) # TODO make them 256x256\n return inputs\n\n\ndef compute_mean_not_optimised(inputs):\n matrix_all_images = []\n for image, label in inputs:\n matrix_all_images.append(image)\n return np.mean(np.array(matrix_all_images), axis=0)\n\n\ndef compute_mean(inputs):\n image_mean = np.array(inputs[0][0])\n image_mean.fill(0)\n for i, (image, label) in enumerate(inputs):\n image_mean += image\n if i % 100 == 0:\n print(i)\n return image_mean / len(inputs)\n\n\ndef subtract_mean(inputs, mean_image):\n new_inputs = []\n for image, label in inputs:\n new_inputs.append([image - mean_image, label])\n return new_inputs\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef generate_time_series(arr, filename):\n generate_multi_time_series([arr], filename)\n\n\ndef generate_multi_time_series(arr_list, filename):\n fig = plt.figure()\n for arr in arr_list:\n plt.plot(arr)\n plt.savefig(filename)\n plt.close(fig)\n" ]
[ [ "matplotlib.use", "numpy.array", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.tile", "matplotlib.pyplot.close", "matplotlib.pyplot.figure" ] ]
anshul3899/Structured-Graph-Learning
[ "fabefa3d160baf8faefbffd7961a455348d6f068" ]
[ "main.py" ]
[ "import os\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_moons, make_blobs\nfrom sgl import LearnGraphTopolgy\n\nplots_dir = './plots'\nif not os.path.exists(plots_dir):\n os.makedirs(plots_dir)\n\n'''Visual results on two moon dataset \n\n'''\nnp.random.seed(0)\nn = 50 # number of nodes per cluster\nk = 2 # number of components\nX, y = make_moons(n_samples=n*k, noise=.05, shuffle=True)\n# X, y = make_blobs(n_samples=n*k, centers=k, n_features=2, random_state=0)\n# dict to store position of nodes\npos = {}\nfor i in range(n*k):\n pos[i] = X[i]\n# Visualization of original data\nfig = plt.figure()\nplt.scatter(X[:,0], X[:,1], c=y )\nplt.title(\"Two moon dataset\")\nplt.xlabel('x-coordinate')\nplt.ylabel('y-coordinate')\nfig.savefig('plots/two_moon_dataset.eps', format='eps')\nfig.savefig('plots/two_moon_dataset.png')\n\n# compute sample correlation matrix\nS = np.dot(X, X.T)\n\n# estimate underlying graph\nsgl = LearnGraphTopolgy(S, maxiter=1000, record_objective = True, record_weights = True)\ngraph = sgl.learn_k_component_graph(k=2, beta=0.1 )\n\nnll = graph['negloglike']\nprint('NLL: ', min(nll))\nobjective = graph['obj_fun']\nprint('Objective: ', min(objective))\n\n# build network\nA = graph['adjacency']\nG = nx.from_numpy_matrix(A)\nprint('Graph statistics:')\nprint('Nodes: ', G.number_of_nodes(), 'Edges: ', G.number_of_edges() )\n\n# normalize edge weights to plot edges strength\nall_weights = []\nfor (node1,node2,data) in G.edges(data=True):\n all_weights.append(data['weight'])\nmax_weight = max(all_weights)\nnorm_weights = [3* w / max_weight for w in all_weights]\nnorm_weights = norm_weights\n\n# plot graph\nfig = plt.figure(figsize=(15,15)) \nnx.draw_networkx(G,pos, width=norm_weights)\nplt.title(\"Learned graph for two moon dataset\")\nplt.suptitle('components k=2')\nplt.xlabel('x-coordinate')\nplt.ylabel('y-coordinate')\nfilename = 'plots/learned_graph_k='+ str(k) +'.eps'\npng_filename = 'plots/learned_graph_k='+ str(k) +'.png'\nfig.savefig(filename, format='eps')\nfig.savefig(png_filename,)" ]
[ [ "numpy.dot", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "sklearn.datasets.make_moons" ] ]
marios-stam/crazyswarm
[ "24bda524bd5a6818d364e6be4617203a65a6f2d7" ]
[ "ros_ws/src/crazyswarm/scripts/pycrazyswarm/visualizer/visVispy.py" ]
[ "import os\nimport math\n\nimport ffmpeg\nimport numpy as np\nfrom vispy import scene, app, io, geometry\nfrom vispy.color import Color\nfrom vispy.visuals import transforms\nfrom vispy.scene.cameras import TurntableCamera\n\nfrom .. import util as util\n\n\nCF_MESH_PATH = os.path.join(os.path.dirname(__file__), \"crazyflie2.obj.gz\")\n# Convert millimeters to meters, but make twice as big so easier to see.\nMESHFILE_SCALE = 2.0 * 0.001\n# The matrix that rotates the coordinates of the .obj file to agree with the\n# Crazyflie's standard coordinate system. VisPy uses [row vector] * [matrix]\n# (like DirectX), so this is the transpose of what we would expect.\nUNROT_MESHFILE_TRANSPOSE = MESHFILE_SCALE * np.array([\n [-1, 0, 0],\n [ 0, 0, 1],\n [ 0, -1, 0],\n])\nELLIPSOID_COLOR_OK = Color(\"#11FF22\", alpha=0.1)\nELLIPSOID_COLOR_COLLISION = Color(\"#FF0000\", alpha=0.1)\n\n\nclass VisVispy:\n def __init__(self, show=True, resizable=True):\n self.canvas = scene.SceneCanvas(\n keys='interactive', size=(1024, 768), show=show, config=dict(samples=4), resizable=resizable\n )\n\n self.plane_color = 0.25 * np.ones((1, 3))\n self.bg_color = 0.9 * np.ones((1, 3))\n self.line_color = 0.7 * np.ones((1, 3))\n\n # Set up a viewbox to display the cube with interactive arcball\n self.view = self.canvas.central_widget.add_view()\n self.view.bgcolor = self.bg_color\n self.view.camera = TurntableCamera(\n fov=30.0, elevation=30.0, azimuth=90.0, center=(0.0, 0.0, 1.25)\n )\n self.cam_state = self.view.camera.get_state()\n\n # add a colored 3D axis for orientation\n axis = scene.visuals.XYZAxis(parent=self.view.scene)\n self.cfs = []\n self.led_color_cache = []\n\n ground = scene.visuals.Plane(\n 32.0, 32.0, direction=\"+z\", color=self.plane_color, parent=self.view.scene\n )\n\n # Lazy-constructed vispy objects and data for connectivity graph gfx.\n self.graph_edges = None\n self.graph_lines = None\n self.graph = None\n\n # Lazy-constructed vispy objects for collision ellipsoids.\n self.ellipsoids = None\n self.ellipsoid_radii = None\n\n def setGraph(self, edges):\n \"\"\"Set edges of graph visualization - sequence of (i,j) tuples.\"\"\"\n\n # Only allocate new memory if we need to.\n n_edges = len(edges)\n if self.graph_edges is None or n_edges != len(self.graph_edges):\n self.graph_lines = np.zeros((2 * n_edges, 3))\n self.graph_edges = edges\n\n # Lazily construct VisPy object for graph.\n if self.graph is None:\n self.graph = scene.visuals.Line(\n parent=self.view.scene,\n color=self.line_color,\n pos=self.graph_lines,\n connect=\"segments\",\n method=\"gl\",\n antialias=True,\n )\n\n def showEllipsoids(self, radii):\n self.ellipsoid_radii = np.array(radii)\n\n def update(self, t, crazyflies):\n if len(self.cfs) == 0:\n verts, faces, normals, nothin = io.read_mesh(CF_MESH_PATH)\n for i, cf in enumerate(crazyflies):\n color = cf.ledRGB\n mesh = scene.visuals.Mesh(\n parent=self.view.scene,\n vertices=verts,\n faces=faces,\n color=color,\n shading=\"smooth\",\n )\n mesh.light_dir = (0.1, 0.1, 1.0)\n mesh.shininess = 0.01\n mesh.ambient_light_color = [0.5] * 3\n mesh.transform = transforms.MatrixTransform()\n self.cfs.append(mesh)\n self.led_color_cache.append(color)\n\n if self.ellipsoid_radii is not None and self.ellipsoids is None:\n sphere_mesh = geometry.create_sphere(radius=1.0)\n self.ellipsoids = [\n scene.visuals.Mesh(\n parent=self.view.scene,\n meshdata=sphere_mesh,\n color=ELLIPSOID_COLOR_OK,\n shading=\"smooth\",\n )\n for _ in self.cfs\n ]\n for ell in self.ellipsoids:\n ell.light_dir = (0.1, 0.1, 1.0)\n ell.shininess = 0.0\n ell.ambient_light_color = [0.5] * 3\n ell.transform = transforms.MatrixTransform()\n\n positions = np.stack([cf.position() for cf in crazyflies])\n\n for i in range(0, len(self.cfs)):\n R_state = crazyflies[i].rotBodyToWorld()\n # Recall VisPy uses [row vector] * [matrix]!!\n T = np.eye(4)\n T[:3, :3] = np.dot(UNROT_MESHFILE_TRANSPOSE, R_state.T)\n T[3, :3] = positions[i]\n self.cfs[i].transform = transforms.MatrixTransform(T)\n # vispy does not do this check\n color = crazyflies[i].ledRGB\n if color != self.led_color_cache[i]:\n self.led_color_cache[i] = color\n self.cfs[i].color = color # sets dirty flag\n\n # Update graph line segments to match new Crazyflie positions.\n if self.graph is not None:\n for k, (i, j) in enumerate(self.graph_edges):\n self.graph_lines[2 * k, :] = positions[i]\n self.graph_lines[2 * k + 1, :] = positions[j]\n self.graph.set_data(self.graph_lines)\n\n # Update collsiion ellipsoids.\n if self.ellipsoids is not None:\n colliding = util.check_ellipsoid_collisions(positions, self.ellipsoid_radii)\n for i, pos in enumerate(positions):\n ell = self.ellipsoids[i]\n tf = ell.transform\n tf.reset()\n tf.scale(self.ellipsoid_radii)\n tf.translate(pos)\n new_color = ELLIPSOID_COLOR_COLLISION if colliding[i] else ELLIPSOID_COLOR_OK\n if not (new_color == ell.color): # vispy Color lacks != override.\n ell.color = new_color\n\n self.canvas.app.process_events()\n\n def render(self):\n frame = self.canvas.render()\n # Do not render alpha channel - we always use rgb24 format.\n if frame.shape[2] == 4:\n frame = frame[:, :, :3]\n return frame\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.zeros", "numpy.ones", "numpy.eye" ] ]
agnithamohan/CV_project
[ "0abe59233b83611d1e05492ef2a0cd3ea0b23372" ]
[ "probabilistic_unet.py" ]
[ "#This code is based on: https://github.com/SimonKohl/probabilistic_unet\n\nfrom unet_blocks import *\nfrom unet import Unet\nfrom utils import init_weights,init_weights_orthogonal_normal, l2_regularisation\nimport torch.nn.functional as F\nfrom torch.distributions import Normal, Independent, kl\nimport numpy as np\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nclass Encoder(nn.Module):\n \"\"\"\n A convolutional neural network, consisting of len(num_filters) times a block of no_convs_per_block convolutional layers,\n after each block a pooling operation is performed. And after each convolutional layer a non-linear (ReLU) activation function is applied.\n \"\"\"\n def __init__(self, input_channels, num_filters, no_convs_per_block, initializers, padding=True, posterior=False):\n super(Encoder, self).__init__()\n self.contracting_path = nn.ModuleList()\n self.input_channels = input_channels\n self.num_filters = num_filters\n\n if posterior:\n #To accomodate for the mask that is concatenated at the channel axis, we increase the input_channels.\n self.input_channels += self.input_channels\n\n layers = []\n for i in range(len(self.num_filters)):\n \"\"\"\n Determine input_dim and output_dim of conv layers in this block. The first layer is input x output,\n All the subsequent layers are output x output.\n \"\"\"\n input_dim = self.input_channels if i == 0 else output_dim\n output_dim = num_filters[i]\n \n if i != 0:\n layers.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True))\n \n layers.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, padding=int(padding)))\n layers.append(nn.ReLU(inplace=True))\n\n for _ in range(no_convs_per_block-1):\n layers.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=int(padding)))\n layers.append(nn.ReLU(inplace=True))\n\n self.layers = nn.Sequential(*layers)\n self.layers.apply(init_weights)\n\t\n\n def forward(self, input):\n output = self.layers(input)\n return output\n\nclass AxisAlignedConvGaussian(nn.Module):\n \"\"\"\n A convolutional net that parametrizes a Gaussian distribution with axis aligned covariance matrix.\n \"\"\"\n def __init__(self, input_channels, num_filters, no_convs_per_block, latent_dim, initializers, posterior=False):\n super(AxisAlignedConvGaussian, self).__init__()\n self.input_channels = input_channels\n self.channel_axis = 1\n self.num_filters = num_filters\n self.no_convs_per_block = no_convs_per_block\n self.latent_dim = latent_dim\n self.posterior = posterior\n if self.posterior:\n self.name = 'Posterior'\n else:\n self.name = 'Prior'\n self.encoder = Encoder(self.input_channels, self.num_filters, self.no_convs_per_block, initializers, posterior=self.posterior)\n self.conv_layer = nn.Conv2d(num_filters[-1], 2 * self.latent_dim, (1,1), stride=1)\n self.show_img = 0\n self.show_seg = 0\n self.show_concat = 0\n self.show_enc = 0\n self.sum_input = 0\n\n nn.init.kaiming_normal_(self.conv_layer.weight, mode='fan_in', nonlinearity='relu')\n nn.init.normal_(self.conv_layer.bias)\n\n def forward(self, input, segm=None):\n #If segmentation is not none, concatenate the mask to the channel axis of the input\n if segm is not None:\n self.show_img = input\n self.show_seg = segm\n input = torch.cat((input, segm), dim=1)\n self.show_concat = input\n self.sum_input = torch.sum(input)\n\n encoding = self.encoder(input)\n self.show_enc = encoding\n #We only want the mean of the resulting hxw image\n encoding = torch.mean(encoding, dim=2, keepdim=True)\n encoding = torch.mean(encoding, dim=3, keepdim=True)\n\n #Convert encoding to 2 x latent dim and split up for mu and log_sigma\n mu_log_sigma = self.conv_layer(encoding)\n\n #We squeeze the second dimension twice, since otherwise it won't work when batch size is equal to 1\n mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2)\n mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2)\n mu = mu_log_sigma[:,:self.latent_dim]\n log_sigma = mu_log_sigma[:,self.latent_dim:]\n #This is a multivariate normal with diagonal covariance matrix sigma\n #https://github.com/pytorch/pytorch/pull/11178\n # dist = Independent(Normal(loc=mu, scale=torch.exp(log_sigma)),1)\n dist = Independent(Normal(loc=mu, scale=log_sigma), 1)\n return dist\n\nclass Fcomb(nn.Module):\n \"\"\"\n A function composed of no_convs_fcomb times a 1x1 convolution that combines the sample taken from the latent space,\n and output of the UNet (the feature map) by concatenating them along their channel axis.\n \"\"\"\n def __init__(self, num_filters, latent_dim, num_output_channels, num_classes, no_convs_fcomb, initializers, use_tile=True):\n super(Fcomb, self).__init__()\n self.num_channels = num_output_channels #output channels\n self.num_classes = num_classes\n self.channel_axis = 1\n self.spatial_axes = [2,3]\n self.num_filters = num_filters\n self.latent_dim = latent_dim\n self.use_tile = use_tile\n self.no_convs_fcomb = no_convs_fcomb \n self.name = 'Fcomb'\n\n if self.use_tile:\n layers = []\n\n #Decoder of N x a 1x1 convolution followed by a ReLU activation function except for the last layer\n layers.append(nn.Conv2d(self.num_filters[0]+self.latent_dim, self.num_filters[0], kernel_size=1))\n layers.append(nn.ReLU(inplace=True))\n\n for _ in range(no_convs_fcomb-2):\n layers.append(nn.Conv2d(self.num_filters[0], self.num_filters[0], kernel_size=1))\n layers.append(nn.ReLU(inplace=True))\n\n self.layers = nn.Sequential(*layers)\n\n self.last_layer = nn.Conv2d(self.num_filters[0], self.num_classes, kernel_size=1)\n\n if initializers['w'] == 'orthogonal':\n self.layers.apply(init_weights_orthogonal_normal)\n self.last_layer.apply(init_weights_orthogonal_normal)\n else:\n self.layers.apply(init_weights)\n self.last_layer.apply(init_weights)\n\n def tile(self, a, dim, n_tile):\n \"\"\"\n This function is taken form PyTorch forum and mimics the behavior of tf.tile.\n Source: https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853/3\n \"\"\"\n init_dim = a.size(dim)\n repeat_idx = [1] * a.dim()\n repeat_idx[dim] = n_tile\n a = a.repeat(*(repeat_idx))\n order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).to(device)\n return torch.index_select(a, dim, order_index)\n\n def forward(self, feature_map, z):\n \"\"\"\n Z is batch_sizexlatent_dim and feature_map is batch_sizexno_channelsxHxW.\n So broadcast Z to batch_sizexlatent_dimxHxW. Behavior is exactly the same as tf.tile (verified)\n \"\"\"\n if self.use_tile:\n z = torch.unsqueeze(z,2)\n z = self.tile(z, 2, feature_map.shape[self.spatial_axes[0]])\n z = torch.unsqueeze(z,3)\n z = self.tile(z, 3, feature_map.shape[self.spatial_axes[1]])\n #Concatenate the feature map (output of the UNet) and the sample taken from the latent space\n feature_map = torch.cat((feature_map, z), dim=self.channel_axis)\n output = self.layers(feature_map)\n return self.last_layer(output)\n\n\nclass ProbabilisticUnet(nn.Module):\n \"\"\"\n A probabilistic UNet (https://arxiv.org/abs/1806.05034) implementation.\n input_channels: the number of channels in the image (1 for greyscale and 3 for RGB)\n num_classes: the number of classes to predict\n num_filters: is a list consisint of the amount of filters layer\n latent_dim: dimension of the latent space\n no_cons_per_block: no convs per block in the (convolutional) encoder of prior and posterior\n \"\"\"\n\n def __init__(self, input_channels=1, num_classes=1, num_filters=[32,64,128,192], latent_dim=8, no_convs_fcomb=4, beta=10.0, layer=''):\n super(ProbabilisticUnet, self).__init__()\n self.input_channels = input_channels\n self.num_classes = num_classes\n self.num_filters = num_filters\n self.latent_dim = latent_dim\n self.no_convs_per_block = 3\n self.no_convs_fcomb = no_convs_fcomb\n self.initializers = {'w':'he_normal', 'b':'normal'}\n self.beta = beta\n self.z_prior_sample = 0\n prior_posterior_layers = {\n 'fpn_res5_2_sum': [256, 512, 1024, 2048],\n 'fpn_res4_5_sum': [256, 512, 1024, 1024, 1024, 1024, 2048],\n 'fpn_res3_3_sum': [256, 512, 1024, 2048],\n 'fpn_res2_2_sum': [256, 512, 1024, 1024, 1024, 1024, 2048]\n }\n\n self.unet = Unet(self.input_channels, self.num_classes, self.num_filters, self.initializers, apply_last_layer=False, padding=True).to(device)\n self.prior = AxisAlignedConvGaussian(self.input_channels, prior_posterior_layers[layer], self.no_convs_per_block, self.latent_dim, self.initializers,).to(device)\n self.posterior = AxisAlignedConvGaussian(self.input_channels, prior_posterior_layers[layer], self.no_convs_per_block, self.latent_dim, self.initializers, posterior=True).to(device)\n self.fcomb = Fcomb(self.num_filters, self.latent_dim, self.input_channels, self.num_classes, self.no_convs_fcomb, {'w':'orthogonal', 'b':'normal'}, use_tile=True).to(device)\n\n def forward(self, patch, segm=None, training=True):\n \"\"\"\n Construct prior latent space for patch and run patch through UNet,\n in case training is True also construct posterior latent space\n \"\"\"\n if training:\n self.posterior_latent_space = self.posterior.forward(patch, segm)\n self.prior_latent_space = self.prior.forward(patch)\n self.unet_features = self.unet.forward(patch,False)\n\n def sample(self, testing=False):\n \"\"\"\n Sample a segmentation by reconstructing from a prior sample\n and combining this with UNet features\n \"\"\"\n if testing == False:\n z_prior = self.prior_latent_space.rsample()\n self.z_prior_sample = z_prior\n else:\n #You can choose whether you mean a sample or the mean here. For the GED it is important to take a sample.\n z_prior = self.prior_latent_space.base_dist.loc\n #z_prior = self.prior_latent_space.sample()\n self.z_prior_sample = z_prior\n return self.fcomb.forward(self.unet_features,z_prior)\n\n\n def reconstruct(self, use_posterior_mean=False, calculate_posterior=False, z_posterior=None):\n \"\"\"\n Reconstruct a segmentation from a posterior sample (decoding a posterior sample) and UNet feature map\n use_posterior_mean: use posterior_mean instead of sampling z_q\n calculate_posterior: use a provided sample or sample from posterior latent space\n \"\"\"\n if use_posterior_mean:\n z_posterior = self.posterior_latent_space.loc\n else:\n if calculate_posterior:\n z_posterior = self.posterior_latent_space.rsample()\n return self.fcomb.forward(self.unet_features, z_posterior)\n\n def kl_divergence(self, analytic=True, calculate_posterior=False, z_posterior=None):\n \"\"\"\n Calculate the KL divergence between the posterior and prior KL(Q||P)\n analytic: calculate KL analytically or via sampling from the posterior\n calculate_posterior: if we use samapling to approximate KL we can sample here or supply a sample\n \"\"\"\n if analytic:\n #Neeed to add this to torch source code, see: https://github.com/pytorch/pytorch/issues/13545\n kl_div = kl.kl_divergence(self.posterior_latent_space, self.prior_latent_space)\n else:\n if calculate_posterior:\n z_posterior = self.posterior_latent_space.rsample()\n log_posterior_prob = self.posterior_latent_space.log_prob(z_posterior)\n log_prior_prob = self.prior_latent_space.log_prob(z_posterior)\n kl_div = log_posterior_prob - log_prior_prob\n return kl_div\n\n def elbo(self, segm, analytic_kl=True, reconstruct_posterior_mean=False,training=True):\n \"\"\"\n Calculate the evidence lower bound of the log-likelihood of P(Y|X)\n \"\"\"\n criterion = torch.nn.L1Loss(reduction='none')\n if training:\n z_posterior = self.posterior_latent_space.rsample()\n self.reconstruction = self.reconstruct(use_posterior_mean=reconstruct_posterior_mean, calculate_posterior=False, z_posterior=z_posterior)\n mu2 = self.posterior_latent_space.base_dist.loc[0]\n mu1 = self.prior_latent_space.base_dist.loc[0]\n sigma2 = self.posterior_latent_space.base_dist.scale[0]\n sigma1 = self.prior_latent_space.base_dist.scale[0]\n logvar = sigma1 - sigma2\n mean = mu1 - mu1\n KLD = -0.5 * torch.sum(1 + logvar - mean.pow(2) - logvar.exp()) \n self.kl = KLD \n else:\n self.reconstruction = self.sample(testing=False)\n self.kl = torch.zeros(1,1)\n reconstruction_loss = criterion(input=self.reconstruction, target=segm)\n self.reconstruction_loss = torch.sum(reconstruction_loss)\n self.mean_reconstruction_loss = torch.mean(reconstruction_loss)\n return self.mean_reconstruction_loss, self.kl\n" ]
[ [ "torch.distributions.kl.kl_divergence", "torch.distributions.Normal", "numpy.arange" ] ]
neildhir/DCBO
[ "dc8a1df096cc83b37f45f9e546ed7f59ad693f33" ]
[ "src/utils/utilities.py" ]
[ "from copy import deepcopy\nfrom itertools import chain, combinations\nfrom typing import Tuple\nfrom networkx.classes.multidigraph import MultiDiGraph\nimport numpy as np\nfrom emukit.core import ContinuousParameter, ParameterSpace\nfrom numpy.core import hstack, vstack\nfrom .sequential_sampling import sequential_sample_from_true_SEM\nimport matplotlib.pyplot as plt\n\n\ndef standard_mean_function(x):\n return np.zeros_like(x)\n\n\ndef zero_variance_adjustment(x):\n return np.zeros_like(x)\n\n\ndef check_reshape_add_data(\n interventional_data_x, interventional_data_y, new_interventional_data_x, y_new, best_es, temporal_index,\n):\n if (\n interventional_data_x[temporal_index][best_es] is not None\n and interventional_data_y[temporal_index][best_es] is not None\n ):\n assert interventional_data_x[temporal_index][best_es].shape[1] == new_interventional_data_x.shape[1]\n\n # Update interventional data X\n interventional_data_x[temporal_index][best_es] = vstack(\n (interventional_data_x[temporal_index][best_es], new_interventional_data_x)\n )\n # Update interventional data Y\n interventional_data_y[temporal_index][best_es] = vstack(\n (interventional_data_y[temporal_index][best_es], make_column_shape_2D(y_new),)\n )\n else:\n # Assign new interventional data\n if len(new_interventional_data_x.shape) == 1 and len(best_es) == 1:\n reshaped_new_interventional_data_x = make_column_shape_2D(new_interventional_data_x)\n elif len(best_es) > 1 and len(new_interventional_data_x.shape) == 1:\n reshaped_new_interventional_data_x = new_interventional_data_x.reshape(1, -1)\n elif new_interventional_data_x.shape[0] == len(best_es): # ABO\n # TODO This might not be needed\n reshaped_new_interventional_data_x = np.transpose(new_interventional_data_x)\n else:\n reshaped_new_interventional_data_x = new_interventional_data_x\n\n # Assign X and Y\n interventional_data_x[temporal_index][best_es] = reshaped_new_interventional_data_x\n interventional_data_y[temporal_index][best_es] = make_column_shape_2D(y_new)\n\n assert (\n interventional_data_x[temporal_index][best_es].shape[0]\n == interventional_data_y[temporal_index][best_es].shape[0]\n )\n\n return (\n interventional_data_x[temporal_index][best_es],\n interventional_data_y[temporal_index][best_es],\n )\n\n\ndef get_monte_carlo_expectation(intervention_samples):\n assert isinstance(intervention_samples, dict)\n new = {k: None for k in intervention_samples.keys()}\n for es in new.keys():\n new[es] = intervention_samples[es].mean(axis=0)\n\n # Returns the expected value of the intervention via MC sampling\n return new\n\n\ndef create_intervention_exploration_domain(exploration_sets, interventional_variable_limits,) -> dict:\n intervention_exploration_domain = {es: None for es in exploration_sets}\n for es in exploration_sets:\n if len(es) == 1:\n assert es[0] in interventional_variable_limits.keys()\n LL = float(min(interventional_variable_limits[es[0]]))\n UL = float(max(interventional_variable_limits[es[0]]))\n else:\n LL, UL = [], [] # lower-limit and upper-limit\n for var in es:\n LL.append(float(min(interventional_variable_limits[var])))\n UL.append(float(max(interventional_variable_limits[var])))\n assert len(es) == len(UL) == len(LL)\n # Assign\n intervention_exploration_domain[es] = make_parameter_space_for_intervention_set(es, LL, UL)\n\n return intervention_exploration_domain\n\n\ndef make_parameter_space_for_intervention_set(exploration_set: tuple, lower_limit, upper_limit,) -> ParameterSpace:\n assert isinstance(exploration_set, tuple)\n if len(exploration_set) == 1:\n assert isinstance(lower_limit, float)\n assert isinstance(upper_limit, float)\n return ParameterSpace([ContinuousParameter(str(exploration_set), lower_limit, upper_limit)])\n else:\n multivariate_limits = []\n assert len(exploration_set) == len(lower_limit), exploration_set\n assert len(exploration_set) == len(upper_limit), exploration_set\n for i, var in enumerate(exploration_set):\n multivariate_limits.append(ContinuousParameter(str(var), lower_limit[i], upper_limit[i]))\n return ParameterSpace(multivariate_limits)\n\n\ndef convert_to_dict_of_temporal_lists(observational_samples: dict) -> dict:\n assert isinstance(observational_samples[list(observational_samples.keys())[0]], np.ndarray)\n assert len(observational_samples[list(observational_samples.keys())[0]].shape) == 2\n new = {k: None for k in observational_samples.keys()}\n for key in observational_samples.keys():\n new[key] = observational_samples[key].T.tolist()\n return new\n\n\ndef get_shuffled_dict_sample_subsets(samples, nr_interventions):\n assert isinstance(samples, dict), type(samples)\n for key in samples.keys():\n D = samples[key]\n # Means that all columns have the same number of samples\n assert isinstance(D, np.ndarray)\n # Rows and total timesteps\n N, _ = samples[list(samples.keys())[0]].shape\n shuffled_row_ids = np.random.permutation(N)\n assert nr_interventions <= N\n new = {key: None for key in samples.keys()}\n for key in samples.keys():\n new[key] = samples[key][shuffled_row_ids][:nr_interventions]\n return new\n\n\ndef initialise_interventional_objects(\n exploration_sets: list,\n D_I: dict, # Interventional data\n base_target: str,\n total_timesteps: int,\n task=\"min\",\n index_name: int = None,\n nr_interventions: int = None,\n) -> Tuple[list, list, list, dict, dict]:\n\n assert isinstance(D_I, dict)\n target_values = {t: {es: None for es in exploration_sets} for t in range(total_timesteps)}\n interventions = deepcopy(target_values)\n\n intervention_data_X = deepcopy(target_values)\n intervention_data_Y = deepcopy(target_values)\n temporal_index = 0\n for es in exploration_sets:\n if es not in D_I:\n pass\n else:\n # Interventional data contains a dictionary of dictionaries, each corresponding to one type (es) of intervention.\n interventional_samples = D_I[es] # es on keys and nd.array on values\n\n assert isinstance(interventional_samples, dict), (es, type(interventional_samples), D_I)\n assert base_target in interventional_samples\n assert isinstance(interventional_samples[base_target], np.ndarray)\n\n # This option exist _if_ we have more than one intervention per es\n if nr_interventions:\n assert index_name is not None\n # Need to reset the global seed\n state = np.random.get_state()\n np.random.seed(index_name)\n data_subset = get_shuffled_dict_sample_subsets(interventional_samples, nr_interventions)\n assert data_subset[list(data_subset.keys())[0]].shape[0] == nr_interventions\n\n np.random.set_state(state)\n\n # If we only have one sample per intervention we just use that\n else:\n data_subset = interventional_samples\n # Find the corresponding target values at these coordinates [array]\n target_values[temporal_index][es] = np.array(data_subset[base_target][temporal_index]).reshape(-1, 1)\n assert target_values[temporal_index][es] is not None\n\n # Find the corresponding interventions [array]\n if len(es) == 1:\n interventions[temporal_index][es] = np.array(data_subset[es[0]][temporal_index]).reshape(-1, 1)\n else:\n tmp = []\n for var in es:\n tmp.append(data_subset[var][temporal_index])\n interventions[temporal_index][es] = np.expand_dims(np.hstack(tmp), axis=0)\n assert interventions[temporal_index][es] is not None\n\n # Set the interventional data for use in DCBO\n intervention_data_Y[temporal_index][es] = target_values[temporal_index][es]\n intervention_data_X[temporal_index][es] = interventions[temporal_index][es]\n\n assert intervention_data_X[temporal_index][es] is not None\n assert intervention_data_Y[temporal_index][es] is not None\n\n # Get best intervention set at each time index\n print(target_values)\n best_es = eval(task)(target_values[temporal_index], key=target_values[temporal_index].get)\n\n # Interventions\n best_intervention_level = interventions[temporal_index][best_es]\n # Outcomes\n best_target_value = target_values[temporal_index][best_es]\n\n # Use the best outcome level at t=0 as a prior for all the other timesteps\n best_es_sequence = total_timesteps * [None]\n best_es_sequence[0] = best_es\n best_intervention_levels = total_timesteps * [None]\n best_intervention_levels[0] = best_intervention_level\n best_target_levels = total_timesteps * [None]\n best_target_levels[0] = best_target_value\n\n return (\n best_es_sequence,\n best_target_levels,\n best_intervention_levels,\n intervention_data_X,\n intervention_data_Y,\n )\n\n\ndef initialise_optimal_intervention_level_list(\n total_graph_timesteps: int,\n exploration_sets: list,\n initial_optimal_sequential_intervention_sets: list,\n initial_optimal_sequential_intervention_levels: list,\n number_of_trials: int,\n) -> list:\n assert len(initial_optimal_sequential_intervention_levels) == total_graph_timesteps\n intervention_levels = [\n {es: number_of_trials * [None] for es in exploration_sets} for _ in range(total_graph_timesteps)\n ]\n\n # Add interventional data that we have at start\n for es in exploration_sets:\n if es == initial_optimal_sequential_intervention_sets[0]:\n intervention_levels[0][es].insert(0, initial_optimal_sequential_intervention_levels[0])\n else:\n intervention_levels[0][es].insert(0, None)\n\n return intervention_levels\n\n\ndef initialise_global_outcome_dict_new(\n total_graph_timesteps: int, initial_optimal_target_values: list, blank_val\n) -> dict:\n assert isinstance(total_graph_timesteps, int)\n assert isinstance(initial_optimal_target_values, list)\n assert total_graph_timesteps > 0\n assert len(initial_optimal_target_values) == total_graph_timesteps\n # Remember that Python lists are mutable objects, hence this construction.\n targets = {t: [] for t in range(total_graph_timesteps)}\n\n for t in range(total_graph_timesteps):\n if initial_optimal_target_values[t]:\n targets[t].append(float(initial_optimal_target_values[t]))\n else:\n # No interventional data was provided so this is empty.\n targets[t].append(blank_val)\n return targets\n\n\ndef make_column_shape_2D(x):\n return np.array([x]).reshape(-1, 1)\n\n\ndef assign_blanket_hat(\n blanket_hat: dict, exploration_set, intervention_level, target, target_value,\n):\n\n # Split current target\n target_variable, temporal_index = target.split(\"_\")\n temporal_index = int(temporal_index)\n assert len(exploration_set) == intervention_level.shape[1], (\n exploration_set,\n intervention_level,\n )\n assert intervention_level is not None\n # Assign target value\n blanket_hat[target_variable][temporal_index] = float(target_value) # TARGET\n #  Assign intervention\n for intervention_variable, xx in zip(exploration_set, intervention_level.ravel()):\n blanket_hat[intervention_variable][temporal_index] = xx\n\n return\n\n\ndef assign_blanket(\n static_sem: dict, # OBS: true SEM\n dynamic_sem: dict, #  OBS: true SEM\n blanket: dict,\n exploration_set: list,\n intervention_level: np.array,\n target: str,\n target_value: float,\n G: MultiDiGraph,\n) -> None:\n\n # Split current target\n target_var, temporal_index = target.split(\"_\")\n t = int(temporal_index)\n assert len(exploration_set) == intervention_level.shape[1], (\n exploration_set,\n intervention_level,\n )\n assert intervention_level is not None\n\n # Assign target value\n blanket[target_var][t] = float(target_value)\n\n if len(exploration_set) == 1:\n # Intervention only happening on _one_ variable, assign it\n intervention_variable = exploration_set[0]\n # Intervention only happening on _one_ variable, assign it\n blanket[intervention_variable][t] = float(intervention_level)\n # The target and intervention value have already assigned so we check to see if anything else is missing in this time-slice\n intervention_node = intervention_variable + \"_\" + str(t)\n children = [\n v.split(\"_\")[0]\n for v in G.successors(intervention_node)\n if v.split(\"_\")[0] != target_var and v.split(\"_\")[1] == temporal_index\n ]\n if children:\n for child in children:\n if blanket[child][t] is None: # Only valid when t > 0\n # Value is None so we sample a value for this node\n sample = sequential_sample_from_true_SEM(static_sem, dynamic_sem, t + 1, interventions=blanket)\n blanket[child][t] = sample[child][t]\n else:\n for i, intervention_variable in enumerate(exploration_set):\n blanket[intervention_variable][t] = float(intervention_level[:, i])\n\n\ndef check_blanket(blanket, base_target_variable, temporal_index, manipulative_variables):\n # Check that the target has been properly assigned.\n assert blanket[base_target_variable][temporal_index] is not None, temporal_index\n # Check that at least one intervention has been assigned. E.g. if X was intervened upon then Z should have a value.\n assert any(x is not None for x in [blanket[var][temporal_index] for var in manipulative_variables]), blanket\n\n\ndef select_sample(sample, input_variables, outside_time):\n if isinstance(input_variables, str):\n return sample[input_variables][outside_time].reshape(-1, 1)\n else:\n # Takes either a tuple() or a list()\n samp = []\n for node in input_variables:\n var, time = node.split(\"_\")[0], int(node.split(\"_\")[1])\n assert time == outside_time, (sample, input_variables, time, outside_time)\n samp.append(sample[var][time].reshape(-1, 1))\n return hstack(samp)\n\n\ndef powerset(iterable):\n # this returns e.g. powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)\n s = list(iterable)\n return chain.from_iterable(combinations(s, r) for r in range(1, len(s) + 1))\n\n\ndef calculate_best_intervention_and_effect(\n static_sem,\n dynamic_sem,\n exploration_sets,\n interventional_grids,\n time,\n blanket,\n T,\n plot=False,\n target=\"Y\",\n print_option=False,\n):\n true_causal_effect = {key: None for key in exploration_sets}\n static_noise_model = {k: np.zeros(T) for k in static_sem.keys()}\n for es in exploration_sets:\n res = []\n this_blanket = deepcopy(blanket)\n for xx in interventional_grids[es]:\n for intervention_variable, x in zip(es, xx):\n this_blanket[intervention_variable][time] = x\n out = sequential_sample_from_true_SEM(\n static_sem=static_sem,\n dynamic_sem=dynamic_sem,\n timesteps=time + 1,\n epsilon=static_noise_model,\n interventions=this_blanket,\n )\n\n res.append(out[target][time])\n\n true_causal_effect[es] = np.array(res)\n\n # Plot results\n if plot:\n for es in exploration_sets:\n if len(es) == 1:\n fig, ax = plt.subplots(1, 1, figsize=(10, 5))\n fig.suptitle(\"True causal effect at $t={}$\".format(time))\n ax.plot(\n interventional_grids[es], true_causal_effect[es], lw=2, alpha=0.5, label=\"$do{}$\".format(es),\n )\n plt.legend()\n\n opt_vals = {es: None for es in exploration_sets}\n # Find best causal effect\n for es in exploration_sets:\n Y = true_causal_effect[es].tolist()\n # Min value\n outcome_min_val = min(Y)\n # Corresponding intervention at min value\n idx = Y.index(outcome_min_val)\n opt_vals[es] = (outcome_min_val, interventional_grids[es][idx, :])\n\n # Get best\n minval = min(k[0] for k in opt_vals.values())\n best_es = [k for k, v in opt_vals.items() if v[0] == minval]\n\n # Indexed by zero so we take the shortest ES first\n if print_option is True:\n print(\"\\nBest exploration set: {}\".format(best_es))\n print(\"Best intervention level: {}\".format(opt_vals[best_es[0]][1]))\n print(\"Best best outcome value: {}\".format(opt_vals[best_es[0]][0]))\n\n for intervention_variable, x in zip(best_es[0], opt_vals[best_es[0]][1]):\n blanket[intervention_variable][time] = x\n blanket[target][time] = opt_vals[best_es[0]][0]\n\n if print_option is True:\n print(\"\\nNext blanket:\\n\")\n print(blanket)\n\n return blanket, true_causal_effect\n" ]
[ [ "numpy.core.hstack", "numpy.zeros_like", "numpy.array", "numpy.zeros", "numpy.random.seed", "numpy.random.permutation", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "numpy.core.vstack", "numpy.transpose", "numpy.hstack", "numpy.random.get_state", "numpy.random.set_state" ] ]
Lornatang/zero2SGD
[ "546ca660e6b7843fa77086b2f7a6a575c271be02" ]
[ "activation.py" ]
[ "\"\"\"Implement some basic operations of Activation function.\n\"\"\"\n\n####################################################\n# Author: <Changyu Liu>[email protected]\n# License: MIT\n####################################################\n\nimport numpy as np\n\n\ndef linear(x, w, b):\n \"\"\" linear activation func\n Paras\n -----------------------------------\n x: input of the linear layer\n w: weight\n b: bias\n\n Returns\n -----------------------------------\n cal wx + b\n \"\"\"\n\n return np.dot(w, x) + b\n\n\ndef linear_backward(dx, cache):\n \"\"\"\n Paras\n -----------------------------------\n x: input of the linear layer\n w: weight\n b: bias\n\n Returns\n -----------------------------------\n cal wx + b\n \"\"\"\n x, W, _, _, _, _, _ = cache\n dW = np.dot(dx, x.T)\n db = np.sum(dx, axis=1, keepdims=True)\n dx = np.dot(W.T, dx)\n\n return dx, dW, db\n\n\ndef relu(x):\n \"\"\" relu activation function\n Paras\n -----------------------------------\n x: output of the linear layer\n\n Returns\n -----------------------------------\n max of nums\n \"\"\"\n\n return np.maximum(0, x)\n\n\ndef relu_backward(dx, x):\n \"\"\" derivation of relu\n Paras\n -----------------------------------\n x: output of the linear layer\n\n Returns\n -----------------------------------\n max of nums\n \"\"\"\n\n return np.multiply(dx, np.int64(x > 0))\n\n\ndef sigmoid(x):\n \"\"\" implement the activation function sigmoid\n Paras\n -----------------------------------\n x: Output of the linear layer\n\n Returns\n -----------------------------------\n max of nums\"\"\"\n\n return 1 / (1 + np.exp(-x))\n\n\ndef sigmoid_backward(x):\n \"\"\" derivation of sigmoid\n Paras\n -----------------------------------\n x: output of the linear layer\n\n Returns\n -----------------------------------\n max of nums\n \"\"\"\n s = sigmoid(x)\n\n return s * (1 - s)\n\n\ndef tanh(x):\n \"\"\" implement the activation function tanh\n Paras\n -----------------------------------\n x: output of the linear layer\n\n Returns\n -----------------------------------\n \"\"\"\n\n return (1 - np.exp(2 * -x)) / (1 + np.exp(2 * -x))\n\n\ndef tanh_backward(x):\n \"\"\"derivation of tanh\n Paras\n -----------------------------------\n x: output of the linear layer\n\n Returns\n -----------------------------------\n \"\"\"\n favl = tanh(x)\n\n return 1 - favl ** 2\n\n\ndef batch_norm(x, gamma, beta, epsilon=1e-12):\n \"\"\"\n Paras\n -----------------------------------\n x: the input of activation (x = np.dot(x, W) + b)\n gamma: zoom factor\n beta: translation factor\n epsilon: is a constant for denominator is 0\n\n Returns\n -----------------------------------\n z_out, mean, variance\n \"\"\"\n mean = np.mean(x, axis=1, keepdims=True) # cal x mean\n var = np.var(x, axis=1, keepdims=True) # cal x var\n sqrt_var = np.sqrt(var + epsilon)\n\n normalized = (x - mean) / sqrt_var # normalized\n\n # scale and shift variables are introduced to calculate the normalized value\n out = np.multiply(gamma, normalized) + beta\n return mean, var, sqrt_var, normalized, out\n\n\ndef batch_norm_backward(dx, cache):\n \"\"\" derivation of batch_norm\n Paras\n -----------------------------------\n dx: output of the linear layer\n\n Returns\n -----------------------------------\n \"\"\"\n _, _, _, gamma, sqrt_var, normalized, _ = cache\n m = dx.shape[1]\n dgamma = np.sum(dx * normalized, axis=1, keepdims=True)\n dbeta = np.sum(dx, axis=1, keepdims=True)\n dout = 1. / m * gamma * sqrt_var * (\n m * dx - np.sum(dx, axis=1, keepdims=True) - normalized * np.sum(dx * normalized, axis=1, keepdims=True))\n return dgamma, dbeta, dout\n" ]
[ [ "numpy.dot", "numpy.sum", "numpy.exp", "numpy.mean", "numpy.multiply", "numpy.sqrt", "numpy.int64", "numpy.var", "numpy.maximum" ] ]
jlfilho/sr-tf2
[ "5309c69d252aad7a8e9260106353fd8acca29c6a" ]
[ "train.py" ]
[ "import tensorflow as tf\nimport argparse\nimport os\nimport statistics as stat\n\n\n\nfrom models.utils import plot_test_images, plot_images, print_metrics\n\nfrom models.espcn.model_espcn import ESPCN as espcn\n\nfrom models.evsrnet.model_evsrnet import EVSRNet\n\nfrom models.rtsrgan.model_generator import G_RTSRGAN as g_rtsrgan\nfrom models.rtsrgan.model_discriminator import d_rtsrgan\nfrom models.rtsrgan.model_gan import GAN\n\nfrom models.rtvsrgan.model_generator import G_RTVSRGAN as g_rtvsrgan \nfrom models.rtvsrgan.KnowledgeDistillation import Distiller\n\nfrom models.rtvsrgan.model_discriminator import d_rtvsrgan, rad_rtvsrgan\nfrom models.rtvsrgan.model_ragan import RaGAN\n\nfrom models.percsr.model_discriminator import d_percsr, rad_percsr\nfrom models.percsr.model_percsr import PercSR\nfrom models.percsr.model_teacher import Teacher\n\n\nfrom models.imdn.model_imdn import IMDN\n\nfrom models.dataset import Dataset\nfrom models.metrics import psnr, ssim, rmse, lpips\nfrom models.losses import VGGLossNoActivation as VGGLoss, GANLoss\n\nfrom models.save_img_callback import SaveImageCallback\nfrom models.utils import scale_1 as scale\n\n\nhot_test= {'hot_test_generic': {\n 'lr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/generic/lr/270p_qp17/\",\n 'hr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/generic/hr/1080p/\"\n},\n'hot_test_game': {\n 'lr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/game/lr/270p_qp17/\",\n 'hr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/game/hr/1080p/\"\n},\n'hot_test_sport': {\n 'lr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/sport/lr/270p_qp17/\",\n 'hr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/sport/hr/1080p/\"\n},\n'hot_test_podcast': {\n 'lr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/podcast/lr/270p_qp17/\",\n 'hr_hot_test_path': \"datasets/loaded_harmonic/img_hot_test/podcast/hr/1080p/\"\n}}\n\n\ntest= {\n'test_generic': {\n 'lr_test_path': \"/home/joao/Documentos/projetos/sr-tf2/datasets/loaded_harmonic/img_test/lr/270p_qp17/\",\n 'hr_test_path': \"/home/joao/Documentos/projetos/sr-tf2/datasets/loaded_harmonic/img_test/hr/1080p/\",\n 'logdir': \"test_logdir/test/generic/\"\n},\n'test_game': {\n 'lr_test_path': \"/media/joao/SAMSUNG/Youtube/game/img_test/lr/270p_qp17/\",\n 'hr_test_path': \"/media/joao/SAMSUNG/Youtube/game/img_test/hr/1080p/\",\n 'logdir': \"test_logdir/test/game/\"\n},\n'test_sport': {\n 'lr_test_path': \"/media/joao/SAMSUNG/Youtube/sport/img_test/lr/270p_qp17/\",\n 'hr_test_path': \"/media/joao/SAMSUNG/Youtube/sport/img_test/hr/1080p/\",\n 'logdir': \"test_logdir/test/sport/\"\n},\n'test_podcast': {\n 'lr_test_path': \"/media/joao/SAMSUNG/Youtube/podcast/img_test/lr/270p_qp17/\",\n 'hr_test_path': \"/media/joao/SAMSUNG/Youtube/podcast/img_test/hr/1080p/\",\n 'logdir': \"test_logdir/test/podcast/\"\n}}\n\n\ntest_datasets = {\n'test_generic': {\n 'test_dataset_path': \"datasets/loaded_harmonic/output/generic/test/4X/270p_qp17/dataset.tfrecords\",\n 'test_dataset_info_path': \"datasets/loaded_harmonic/output/generic/test/4X/270p_qp17/dataset_info.txt\"\n},\n'test_game': {\n 'test_dataset_path': \"datasets/loaded_harmonic/output/game/test/4X/270p_qp17/dataset.tfrecords\",\n 'test_dataset_info_path': \"datasets/loaded_harmonic/output/game/test/4X/270p_qp17/dataset_info.txt\"\n},\n'test_sport': {\n 'test_dataset_path': \"datasets/loaded_harmonic/output/sport/test/4X/270p_qp17/dataset.tfrecords\",\n 'test_dataset_info_path': \"datasets/loaded_harmonic/output/sport/test/4X/270p_qp17/dataset_info.txt\"\n},\n'test_podcast': {\n 'test_dataset_path': \"datasets/loaded_harmonic/output/podcast/test/4X/270p_qp17/dataset.tfrecords\",\n 'test_dataset_info_path': \"datasets/loaded_harmonic/output/podcast/test/4X/270p_qp17/dataset_info.txt\"\n}}\n\n\n\nLIST_MODEL=['espcn','g_rtsrgan','rtsrgan','g_rtvsrgan','teacher','rtvsrgan','imdn','k_dist','percsr','evsrnet']\nMODEL='rtvsrgan'\nLIST_GENERATOR=[None,'espcn','g_rtsrgan','imdn','evsrnet','g_rtvsrgan']\nGENERATOR=None\nBATCH_SIZE = 32\nVAL_BATCH_SIZE = 16\nTEST_BATCH_SIZE = 4\nSHUFFLE_BUFFER_SIZE = 64\n\nLIST_TEST_CLUSTER = ['generic','game','sport','podcast']\nTEST_CLUSTER = ['sport']\n\nSCHEDULE_VALUES=[100]\n\n# Knowledge distillation model\nLOSS_FN='mae'\nDISTILLATION_RATE=0.8\nALPHA=0.3\nBETA=0.65\nLIST_WEIGHTS=[1e-5,1e-2,1e-2]\n\nTYPE_REDUCE_LR='schedules'\nLEARNING_RATE = 1e-4\nLEARNING_DECAY_RATE = 1e-1\nLEARNING_DECAY_EPOCHS = 20\nNUM_EPOCHS = 100\nSTEPS_PER_EPOCH = 100\nVAL_STEPS = 1\nTEST_STEPS = 0\nEPOCHS_PER_SAVE = 5\nLOGDIR = 'logdir'\nCHECKPOINT = 'checkpoint/'\nTRAINNABLE_LAYER = 'final'\nPATH_TO_EVAL = 'test_logdir/stats.txt'\nTEST_LOGDIR='test_logdir/'\n\nHOT_TEST_SIZE=5\nLR_HOT_TEST_PATH=\"datasets/loaded_harmonic/img_test/lr/270p_qp28/\"\nHR_HOT_TEST_PATH=\"datasets/loaded_harmonic/img_test/hr/1080p/\"\n\nTRAIN_DATASET_PATH='datasets/loaded_harmonic/output/train/2X/270p_qp17/dataset.tfrecords'\nTRAIN_DATASET_INFO_PATH='datasets/loaded_harmonic/output/train/2X/270p_qp17/dataset_info.txt'\n\nVAL_DATASET_PATH='datasets/loaded_harmonic/output/val/2X/270p_qp17/dataset.tfrecords'\nVAL_DATASET_INFO_PATH='datasets/loaded_harmonic/output/val/2X/270p_qp17/dataset_info.txt'\n\nTEST_DATASET_PATH='datasets/loaded_harmonic/output/test/2X/270p_qp17/dataset.tfrecords'\nTEST_DATASET_INFO_PATH='datasets/loaded_harmonic/output/test/2X/270p_qp17/dataset_info.txt'\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description='train one of the models for image and video super-resolution')\n parser.add_argument('--model', type=str, default=MODEL, choices=LIST_MODEL,\n help='What model to train', required=True)\n parser.add_argument('--generator', type=str, default=GENERATOR, choices=LIST_GENERATOR,\n help='What model to train', required=False)\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,\n help='Number of images in batch', required=True)\n parser.add_argument('--train_dataset_path', type=str, default=TRAIN_DATASET_PATH,\n help='Path to the train dataset', required=True)\n parser.add_argument('--train_dataset_info_path', type=str, default=TRAIN_DATASET_INFO_PATH,\n help='Path to the train dataset info', required=True)\n parser.add_argument('--num_epochs', type=int, default=NUM_EPOCHS,\n help='Number of training epochs', required=True)\n parser.add_argument('--steps_per_epoch', type=int, default=STEPS_PER_EPOCH, \n help='Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch.')\n\n parser.add_argument('--val_batch_size', type=int, default=VAL_BATCH_SIZE,\n help='Number of images in val batch')\n parser.add_argument('--val_dataset_path', type=str, default=VAL_DATASET_PATH,\n help='Path to the val dataset')\n parser.add_argument('--val_dataset_info_path', type=str, default=VAL_DATASET_INFO_PATH,\n help='Path to the val dataset info')\n parser.add_argument('--validation_steps', type=int, default=VAL_STEPS, \n help='Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch.')\n \n parser.add_argument('--test_batch_size', type=int, default=TEST_BATCH_SIZE,\n help='Number of images in test batch')\n parser.add_argument('--test_dataset_path', type=str, default=TEST_DATASET_PATH,\n help='Path to the test dataset')\n parser.add_argument('--test_dataset_info_path', type=str, default=TEST_DATASET_INFO_PATH,\n help='Path to the test dataset info')\n parser.add_argument('--test_steps', type=int, default=TEST_STEPS, \n help='Total number of steps (batches of samples) to draw before stopping when performing evaluate at the end of every epoch.')\n parser.add_argument('--test_cluster', nargs='*', type=str, default=TEST_CLUSTER, choices=LIST_TEST_CLUSTER,\n help='What cluster dataset to eval', required=False)\n \n\n parser.add_argument('--hot_test_size', type=int, default=HOT_TEST_SIZE,\n help='Number of images in hot test')\n parser.add_argument('--lr_hot_test_path', type=str, default=LR_HOT_TEST_PATH,\n help='Path to the hot test dataset')\n parser.add_argument('--hr_hot_test_path', type=str, default=HR_HOT_TEST_PATH,\n help='Path to the hr hot test path')\n\n parser.add_argument('--ckpt_path', default=CHECKPOINT,\n help='Path to the model checkpoint to evaluate')\n parser.add_argument('--load_weights', action='store_true',\n help='Load weights')\n parser.add_argument('--load_weights_perc', action='store_true',\n help='Load weights perceptual')\n parser.add_argument('--eval', action='store_true',\n help='Avaluete model')\n parser.add_argument('--range_to_save', type=int, default=10,\n help='Range of image to save for teste.' ) \n parser.add_argument('--transfer_learning', action='store_true',\n help='Transfer learning from lower-upscale model')\n parser.add_argument('--trainable_layer', type=str, default=TRAINNABLE_LAYER,\n help='Transfer learning from lower-upscale model')\n parser.add_argument('--scaleFrom', type=int, default=2,\n help='Perform transfer learning from lower-upscale model' )\n parser.add_argument('--shuffle_buffer_size', type=int, default=SHUFFLE_BUFFER_SIZE,\n help='Buffer size used for shuffling examples in dataset')\n parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,\n help='Learning rate used for training')\n parser.add_argument('--lr_decay_rate', type=float, default=LEARNING_DECAY_RATE,\n help='Learning rate decay rate used in exponential decay')\n parser.add_argument('--lr_decay_epochs', type=int, default=LEARNING_DECAY_EPOCHS,\n help='Number of epochs before full decay rate tick used in exponential decay')\n parser.add_argument('--type_reduce_lr', type=str, default=TYPE_REDUCE_LR, choices=['plateau','schedules'],\n help='Type of reduce learning rate')\n parser.add_argument('--schedule_values',nargs='*', type=int, default=SCHEDULE_VALUES,\n help='list of epochs values to reduce lr')\n\n parser.add_argument('--loss_fn', type=str, default=LOSS_FN, choices=['mse','mae','huber', 'fea'],\n help='Set the loss function to knowledge distillation model')\n parser.add_argument('--distillation_rate', type=float, default=DISTILLATION_RATE,\n help='Distillation rate in knowledge distillation model')\n parser.add_argument('--alpha', type=float, default=ALPHA,\n help='Weight for distillation loss function in knowledge distillation model')\n parser.add_argument('--beta', type=float, default=BETA,\n help='Weight for perceptual loss function in knowledge distillation model')\n parser.add_argument('--list_weights', nargs='*', type=float, default=LIST_WEIGHTS,\n help='Auxiliary list to weight values')\n parser.add_argument('--inter_method', type=str, default=None, choices=['bilinear','lanczos3','lanczos5','bicubic','nearest','mitchellcubic'],\n help='Type of interpolation resize used of same models')\n\n parser.add_argument('--epochs_per_save', type=int, default=EPOCHS_PER_SAVE,\n help='How often to save checkpoints')\n parser.add_argument('--logdir', type=str, default=LOGDIR,\n help='Where to save checkpoints and summaries')\n parser.add_argument('--test_logdir', type=str, default=TEST_LOGDIR,\n help='Where to save tests images')\n \n parser.add_argument('--path_to_eval', type=str, default=PATH_TO_EVAL,\n help='Path to save evals')\n \n\n return parser.parse_args()\n\n\ndef main():\n \n args = get_arguments()\n # train dataset\n train_dataset = Dataset(args.batch_size,\n args.train_dataset_path,\n args.train_dataset_info_path,\n args.shuffle_buffer_size)\n \n scale_factor = train_dataset.scale_factor\n\n if args.steps_per_epoch == 0:\n steps_per_epoch = train_dataset.examples_num // args.batch_size \\\n if train_dataset.examples_num % args.batch_size != 0 else 0\n else:\n steps_per_epoch = args.steps_per_epoch\n \n\n train_dataset = train_dataset.get_data(args.num_epochs)\n train_batch = train_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n \n # val dataset\n val_dataset = Dataset(args.val_batch_size,\n args.val_dataset_path,\n args.val_dataset_info_path,\n args.shuffle_buffer_size)\n\n if args.validation_steps == 0:\n validation_steps = val_dataset.examples_num // args.val_batch_size \\\n if val_dataset.examples_num % args.val_batch_size != 0 else 0\n else:\n validation_steps = args.validation_steps\n \n val_dataset = val_dataset.get_data()\n val_batch = val_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n \n # test dataset\n test_dataset = Dataset(args.test_batch_size,\n args.test_dataset_path,\n args.test_dataset_info_path,\n args.shuffle_buffer_size)\n\n if args.test_steps == 0:\n test_steps = test_dataset.examples_num // args.test_batch_size \\\n if test_dataset.examples_num % args.test_batch_size != 0 else 0\n else:\n test_steps = args.test_steps\n \n test_dataset = test_dataset.get_data()\n test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n\n\n # hot test\n lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(args.lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(args.hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n test_print = [lr_img_paths,hr_img_paths]\n \n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_paph,\n save_weights_only=True,\n monitor='val_loss',\n save_freq= 'epoch', \n mode='min',\n save_best_only=True)\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=args.logdir+\"/\"+args.model,\n histogram_freq=1, \n write_graph=True,\n write_images=True, \n write_steps_per_second=True,\n update_freq='batch') \n file_writer_cm = tf.summary.create_file_writer(args.logdir+\"/\"+args.model + '/validation')\n \n earlystopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', \n min_delta=1e-5,\n patience=100, verbose=1,\n mode='min', \n restore_best_weights=True)\n \n if args.type_reduce_lr == 'plateau':\n reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_rmse', factor=args.lr_decay_rate,\n patience=args.lr_decay_epochs, mode='min', min_lr=1e-6,verbose=1)\n elif args.type_reduce_lr == 'schedules':\n def scheduler(epoch, lr):\n if epoch in args.schedule_values:\n return lr * tf.math.exp(-0.1)\n else:\n return lr\n reduce_lr=tf.keras.callbacks.LearningRateScheduler(scheduler)\n else: \n print(\"--type_reduce_lr not valid!\")\n exit(1)\n \n if args.model == 'espcn': \n callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr] \n eval,run_time=train_espcn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n \n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n elif args.model == 'imdn': \n callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr] \n eval,run_time=train_imdn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n \n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n \n elif args.model == 'g_rtsrgan':\n callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr] \n eval, run_time=train_g_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n\n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n\n elif args.model == 'rtsrgan':\n callbacks=[tensorboard_callback]\n eval,run_time=train_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n\n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n\n elif args.model == 'evsrnet':\n callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]\n eval,run_time=train_evsrnet(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n\n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n\n\n # Ours models\n elif args.model == 'g_rtvsrgan':\n callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr] \n eval,run_time=train_g_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n\n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n \n elif args.model == 'teacher':\n callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr] \n eval,run_time=train_teacher(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n\n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n\n elif args.model == 'rtvsrgan':\n callbacks=[tensorboard_callback,reduce_lr]\n eval,run_time=train_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n\n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n \n elif args.model == 'k_dist': \n callbacks=[tensorboard_callback, reduce_lr] \n eval,run_time=train_k_distillation(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n \n print_eval(args.path_to_eval,eval,args.model+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n \n elif args.model == 'percsr':\n callbacks=[tensorboard_callback, reduce_lr] \n print(\"CALLING MODEL {}\".format(args.model))\n eval,run_time=train_percsr(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)\n \n print_eval(args.path_to_eval,eval,args.model+'_'+args.generator+\"_{}X_q{}\".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)\n else:\n exit(1)\n\n\ndef trainable_weights(model):\n print(\"Weights:\", len(model.weights))\n print(\"Trainable_weights:\", len(model.trainable_weights))\n print(\"Non_trainable_weights:\", len(model.non_trainable_weights))\n\ndef trainable_layers(model, trainable_layer):\n for i in range(len(model.layers)):\n if(i+1 == trainable_layer):\n break\n else:\n model.layers[i].trainable=False\n\n\ndef print_eval(file_stats,eval,model_name,run_time):\n statsFile=open(file_stats,\"a\")\n print(model_name, file = statsFile)\n print(eval, file = statsFile)\n print(run_time, file = statsFile)\n statsFile.close()\n\ndef saved_model(model, filepath):\n tf.keras.models.save_model(model, filepath, save_traces=True)\n\ndef train_espcn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,\n file_writer_cm,trainable_layer):\n model = espcn(scale_factor=scale_factor)\n if args.load_weights:\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n if args.transfer_learning:\n checkpoint_paph_from=\"{}{}_{}x/model.ckpt\".format(\"checkpoint/\",args.model,args.scaleFrom)\n print(\"Transfer learning from {}x-upscale model...\".format(args.scaleFrom))\n modelFrom = espcn(scale_factor=args.scaleFrom)\n modelFrom.load_weights(checkpoint_paph_from)\n for i in range(len(modelFrom.layers)):\n if(modelFrom.layers[i].name == trainable_layer):\n break\n else:\n print(\"Set_weights in: {} layer\".format(model.layers[i].name))\n model.layers[i].set_weights(modelFrom.layers[i].get_weights())\n model.layers[i].trainable=False\n \n opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n\n if args.loss_fn == \"mse\":\n loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n loss_fn = tf.keras.losses.MeanAbsoluteError()\n\n model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(model)\n\n if(args.eval==True):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n model.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n get_test_dataset(model,scale_factor,args)\n exit(1)\n\n save_img_callback = SaveImageCallback(\n model=model,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n\n print(\"Evaluate model\")\n eval = model.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(model, 'saved_model/{}/'.format(args.model))\n return eval,model.get_run_time()\n\ndef train_imdn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,\n file_writer_cm,trainable_layer):\n model = IMDN(scale_factor=scale_factor)\n if args.load_weights:\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n if args.transfer_learning:\n checkpoint_paph_from=\"{}{}_{}x/model.ckpt\".format(\"checkpoint/\",args.model,args.scaleFrom)\n print(\"Transfer learning from {}x-upscale model...\".format(args.scaleFrom))\n modelFrom = IMDN(scale_factor=args.scaleFrom)\n modelFrom.load_weights(checkpoint_paph_from)\n for i in range(len(modelFrom.layers)):\n if(modelFrom.layers[i].name == trainable_layer):\n break\n else:\n print(\"Set_weights in: {} layer\".format(model.layers[i].name))\n model.layers[i].set_weights(modelFrom.layers[i].get_weights())\n model.layers[i].trainable=False\n \n opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n\n if args.loss_fn == \"mse\":\n loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n loss_fn = tf.keras.losses.MeanAbsoluteError()\n\n model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(model)\n\n if(args.eval==True):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n model.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n get_test_dataset(model,scale_factor,args)\n exit(1)\n\n save_img_callback = SaveImageCallback(\n model=model,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n\n print(\"Evaluate model\")\n eval = model.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(model, 'saved_model/{}/'.format(args.model))\n return eval, model.get_run_time()\n\ndef train_g_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,\n file_writer_cm,trainable_layer):\n model = g_rtsrgan(scale_factor=scale_factor)\n if args.load_weights:\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n if args.transfer_learning:\n checkpoint_paph_from=\"{}{}_{}x/model.ckpt\".format(\"checkpoint/\",args.model,args.scaleFrom)\n print(\"Transfer learning from {}x-upscale model...\".format(args.scaleFrom))\n modelFrom = g_rtsrgan(scale_factor=args.scaleFrom)\n modelFrom.load_weights(checkpoint_paph_from)\n for i in range(len(modelFrom.layers)):\n if(modelFrom.layers[i].name == trainable_layer):\n break\n else:\n print(\"Set_weights in: {} layer\".format(model.layers[i].name))\n model.layers[i].set_weights(modelFrom.layers[i].get_weights())\n model.layers[i].trainable=False\n \n opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n\n if args.loss_fn == \"mse\":\n loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n loss_fn = tf.keras.losses.MeanAbsoluteError()\n\n model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(model)\n\n if(args.eval==True):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n model.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n get_test_dataset(model,scale_factor,args)\n exit(1)\n\n save_img_callback = SaveImageCallback(\n model=model,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n\n print(\"Evaluate model\")\n eval = model.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(model, 'saved_model/{}/'.format(args.model))\n return eval,model.get_run_time()\n\ndef train_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):\n g=g_rtsrgan(scale_factor=scale_factor)\n g.compile(metrics=[psnr,ssim,rmse,lpips])\n \n d=d_rtsrgan(input_shape=(36*scale_factor,36*scale_factor,1))\n gan = GAN(discriminator = d, generator = g)\n\n if args.loss_fn == \"mse\":\n cont_loss = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n cont_loss = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n cont_loss = tf.keras.losses.MeanAbsoluteError()\n\n shape_hr = (36*scale_factor,36*scale_factor,3) \n vgg_loss = VGGLoss(shape_hr,cont_loss)\n perc_loss = vgg_loss.custom_perceptual_loss\n adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n lbd = 1 * 1e-5\n eta = 1 * 1e-2\n mu = 1 * 1e-2\n gan_loss=GANLoss(perc_loss, cont_loss, adv_loss,lbd,eta,mu)\n \n if (args.load_weights):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}g_rtsrgan_{}x/model.ckpt\".format(args.ckpt_path,scale_factor) \n gan.load_weights_gen(checkpoint_paph)\n for i in range(len(g.layers)):\n if(g.layers[i].name == trainable_layer):\n break\n else:\n g.layers[i].trainable=False\n \n gan.compile(d_optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),\n g_optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),\n d_loss = gan_loss.discriminator_loss,\n g_loss = gan_loss.generator_loss,\n metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(gan)\n\n save_img_callback = SaveImageCallback(\n model=g,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor)\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_paph,\n save_weights_only=True,\n monitor='val_lpips',\n save_freq= 'epoch', \n mode='min',\n save_best_only=True)\n callbacks.append(checkpoint_callback)\n\n gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch)\n checkpoint_paph=\"{}{}_{}x/g_rtsrgan/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n gan.save_weights_gen(checkpoint_paph)\n\n print(\"Evaluate model\")\n eval = g.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(g, 'saved_model/{}/'.format(args.model))\n return eval, g.get_run_time()\n\n\ndef train_evsrnet(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,\n file_writer_cm,trainable_layer):\n model = EVSRNet(scale_factor=scale_factor,method=args.inter_method)\n model.build((None, None, None,1))\n #print(model.summary())\n if args.load_weights:\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n \n opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n\n if args.loss_fn == \"mse\":\n loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\": # default\n loss_fn = tf.keras.losses.MeanAbsoluteError()\n\n model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(model)\n\n if(args.eval==True):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n model.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n get_test_dataset(model,scale_factor,args)\n exit(1)\n\n save_img_callback = SaveImageCallback(\n model=model,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n\n print(\"Evaluate model\")\n eval = model.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(model, 'saved_model/{}/'.format(args.model))\n return eval,model.get_run_time()\n\ndef train_teacher(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):\n model = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)\n model.build((None, None, None,1))\n print(model.summary())\n if args.load_weights:\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n\n if(args.eval==True):\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n model.compile(metrics=[psnr,ssim,rmse,lpips])\n get_test_dataset(model,scale_factor,args)\n exit(1)\n\n if args.transfer_learning:\n checkpoint_paph_from=\"{}{}_{}x/model.ckpt\".format(\"checkpoint/\",args.model,args.scaleFrom)\n print(\"Transfer learning from {}x-upscale model...\".format(args.scaleFrom))\n modelFrom = g_rtvsrgan(scale_factor=args.scaleFrom)\n modelFrom.load_weights(checkpoint_paph_from)\n for i in range(len(modelFrom.layers)):\n if(modelFrom.layers[i].name == trainable_layer):\n break\n else:\n print(\"Set_weights in: {} layer\".format(model.layers[i].name))\n model.layers[i].set_weights(modelFrom.layers[i].get_weights())\n model.layers[i].trainable=False\n \n opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n\n if args.loss_fn == \"mse\":\n loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n loss_fn = tf.keras.losses.MeanAbsoluteError()\n if args.loss_fn == \"fea\": \n loss_aux = tf.keras.losses.MeanAbsoluteError()\n shape_hr = (36*scale_factor,36*scale_factor,3) \n vgg_loss = VGGLoss(shape_hr,loss_aux)\n loss_fn = vgg_loss.custom_perceptual_loss\n\n model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(model)\n \n save_img_callback = SaveImageCallback(\n model=model,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n\n print(\"Evaluate model\")\n if args.loss_fn == \"fea\": \n eval = []\n else:\n eval = model.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(model, 'saved_model/{}/'.format(args.model))\n return eval, model.get_run_time()\n\n\ndef train_g_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):\n model = g_rtvsrgan(scale_factor=scale_factor,method=args.inter_method)\n if args.load_weights:\n print(\"Loading weights...\")\n model.load_weights(checkpoint_paph)\n if args.transfer_learning:\n checkpoint_paph_from=\"{}{}_{}x/model.ckpt\".format(\"checkpoint/\",args.model,args.scaleFrom)\n print(\"Transfer learning from {}x-upscale model...\".format(args.scaleFrom))\n modelFrom = g_rtvsrgan(scale_factor=args.scaleFrom)\n modelFrom.load_weights(checkpoint_paph_from)\n for i in range(len(modelFrom.layers)):\n if(modelFrom.layers[i].name == trainable_layer):\n break\n else:\n print(\"Set_weights in: {} layer\".format(model.layers[i].name))\n model.layers[i].set_weights(modelFrom.layers[i].get_weights())\n model.layers[i].trainable=False\n \n opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n\n if args.loss_fn == \"mse\":\n loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n loss_fn = tf.keras.losses.MeanAbsoluteError()\n\n model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])\n trainable_weights(model)\n\n if(args.eval==True):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n model.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n get_test_dataset(model,scale_factor,args)\n exit(1)\n \n save_img_callback = SaveImageCallback(\n model=model,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n\n print(\"Evaluate model\")\n eval = model.evaluate(test_batch, verbose=1, steps=test_steps)\n saved_model(model, 'saved_model/{}/'.format(args.model))\n return eval,model.get_run_time()\n\n\ndef train_k_distillation(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):\n \n opt=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)\n \n if args.loss_fn == \"mse\":\n aux_loss_fn = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n aux_loss_fn = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n aux_loss_fn = tf.keras.losses.MeanAbsoluteError()\n \n student_loss_fn = tf.keras.losses.MeanSquaredError()\n distillation_loss_fn= tf.keras.losses.MeanAbsoluteError() \n \n shape_hr = (36*scale_factor,36*scale_factor,3) \n vgg_loss = VGGLoss(shape_hr,aux_loss_fn)\n perc_loss = vgg_loss.custom_perceptual_loss\n \n teacher = g_rtvsrgan(channels=1,scale_factor=scale_factor)\n print(\"Loading teacher weights...\")\n weights_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,'g_rtvsrgan',scale_factor)\n teacher.load_weights(weights_paph)\n student = g_rtvsrgan(channels=1,scale_factor=scale_factor) \n student.build((None, None, None,1))\n\n # Initialize and compile distiller\n distiller = Distiller(student=student, teacher=teacher)\n distiller.compile(\n optimizer=opt,\n metrics=[psnr,ssim,rmse,lpips],\n student_loss_fn=student_loss_fn,\n distillation_loss_fn=distillation_loss_fn,\n perc_loss_fn=perc_loss,\n alpha=args.alpha,\n beta=args.beta\n )\n trainable_weights(student)\n if args.load_weights:\n print(\"Loading student weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,'g_rtvsrgan',scale_factor)\n student.load_weights(checkpoint_paph)\n trainable_layers(student, len(student.layers)-1)\n trainable_weights(student)\n \n if args.transfer_learning:\n checkpoint_paph_from=\"{}{}_{}x/model.ckpt\".format(\"checkpoint/\",args.model,args.scaleFrom)\n print(\"Transfer learning from {}x-upscale model...\".format(args.scaleFrom))\n modelFrom = student(scale_factor=args.scaleFrom)\n modelFrom.load_weights(checkpoint_paph_from)\n for i in range(len(modelFrom.layers)):\n if(modelFrom.layers[i].name == trainable_layer):\n break\n else:\n print(\"Set_weights in: {} layer\".format(student.layers[i].name))\n student.layers[i].set_weights(modelFrom.layers[i].get_weights())\n student.layers[i].trainable=False\n \n \n save_img_callback = SaveImageCallback(\n model=distiller.student,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm)\n\n callbacks.append(save_img_callback)\n\n earlystopping = tf.keras.callbacks.EarlyStopping(\n monitor='val_rmse', \n min_delta=1e-5,\n patience=50, verbose=1,\n mode='min', \n restore_best_weights=True)\n \n callbacks.append(earlystopping)\n \n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor)\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_paph,\n save_weights_only=True,\n monitor='val_lpips',\n save_freq= 'epoch', \n mode='min',\n save_best_only=True)\n callbacks.append(checkpoint_callback)\n\n # Distill teacher to student\n distiller.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,\n verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n checkpoint_paph=\"{}{}_{}x/g_rtsrgan/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n student.save_weights(checkpoint_paph)\n\n print(\"Evaluate model\")\n # Evaluate student on test dataset\n eval = distiller.evaluate(test_batch, verbose=1, steps=test_steps)\n\n saved_model(distiller.student, 'saved_model/{}/'.format(args.model))\n return eval,distiller.student.get_run_time()\n\n\ndef train_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):\n g=g_rtvsrgan(scale_factor=scale_factor)\n g.build((None, None, None,1))\n\n d=d_rtvsrgan(input_shape=(36*scale_factor,36*scale_factor,1))\n ra_d=rad_rtvsrgan(discriminator=d,shape_hr=(36*scale_factor,36*scale_factor,1))\n \n\n if args.loss_fn == \"mse\":\n aux_loss = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n aux_loss = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n aux_loss = tf.keras.losses.MeanAbsoluteError()\n\n cont_loss = tf.keras.losses.MeanSquaredError()\n shape_hr = (36*scale_factor,36*scale_factor,3) \n vgg_loss = VGGLoss(shape_hr,aux_loss)\n perc_loss = vgg_loss.custom_perceptual_loss\n\n adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n lbd = args.list_weights[0]\n eta = args.list_weights[1]\n mu = args.list_weights[2]\n gan_loss=GANLoss(perc_loss, cont_loss, adv_loss,lbd,eta,mu)\n\n ra_gan = RaGAN(ra_discriminator=ra_d, generator=g)\n ra_gan.compile(d_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),\n g_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),\n ra_d_loss=gan_loss.discriminator_loss,\n g_loss = gan_loss.generator_loss,\n metrics=[psnr,ssim,rmse,lpips])\n if (args.load_weights):\n print(\"Loading weights...\") \n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,'g_rtvsrgan',scale_factor)\n ra_gan.load_weights_gen(checkpoint_paph)\n trainable_layers(g, len(g.layers)-1)\n trainable_weights(g)\n \n save_img_callback = SaveImageCallback(\n model=g,\n model_name=args.model,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm) \n callbacks.append(save_img_callback)\n\n\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.model,scale_factor)\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_paph,\n save_weights_only=True,\n monitor='val_lpips',\n save_freq= 'epoch', \n mode='min',\n save_best_only=True)\n callbacks.append(checkpoint_callback)\n\n ra_gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n checkpoint_paph=\"{}{}_{}x/g_rtvsrgan/model.ckpt\".format(args.ckpt_path,args.model,scale_factor) \n ra_gan.save_weights_gen(checkpoint_paph)\n\n print(\"Evaluate model\")\n eval = ra_gan.evaluate(test_batch, verbose=1)\n saved_model(ra_gan.generator, 'saved_model/{}/'.format(args.model))\n return eval,ra_gan.student.get_run_time()\n \n\ndef model_generator(args=None,scale_factor=None):\n if args.generator== 'espcn':\n model= espcn(scale_factor=scale_factor)\n elif args.generator== 'g_rtsrgan':\n model= g_rtsrgan(scale_factor=scale_factor)\n elif args.generator== 'imdn':\n model= IMDN(scale_factor=scale_factor)\n elif args.generator== 'evsrnet':\n model= EVSRNet(scale_factor=scale_factor,method=args.inter_method)\n elif args.generator== 'g_rtvsrgan':\n model= g_rtvsrgan(scale_factor=scale_factor)\n elif args.generator== 'teacher':\n model = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)\n else:\n exit(1)\n return model\n\n\n\ndef print_hot_test(lr_hot_test_path,hr_hot_test_path,model=None,model_name=None,args=None,scale_factor=2): \n time_elapsed = plot_test_images(model,lr_hot_test_path,hr_hot_test_path,\n args.test_logdir,scale_factor=scale_factor,model_name=model_name,epoch=0)\n return time_elapsed\n \n\ndef get_test_dataset(model,scale_factor,args):\n bic = True\n if ('generic' in args.test_cluster): \n # test dataset\n test_dataset_path=test_datasets['test_generic']['test_dataset_path']\n test_dataset_info_path=test_datasets['test_generic']['test_dataset_info_path']\n test_dataset = Dataset(\n args.test_batch_size,\n test_dataset_path,\n test_dataset_info_path,\n args.shuffle_buffer_size)\n\n if args.test_steps == 0:\n test_steps = test_dataset.examples_num // args.test_batch_size \\\n if test_dataset.examples_num % args.test_batch_size != 0 else 0\n else:\n test_steps = args.test_steps\n \n test_dataset = test_dataset.get_data()\n test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n\n name_dataset = args.model+'_'+args.generator+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator!=None else args.model+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) \n print(name_dataset,args.path_to_eval)\n\n\n lr_path=test['test_generic']['lr_test_path']\n hr_path=test['test_generic']['hr_test_path']\n logdir=test['test_generic']['logdir']\n lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])\n hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])\n if (bic):\n print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)\n exit(1)\n # plot_images(\"bi\", lr_paths, hr_paths, args, logdir+\"/\"+\"bicubic\"+\"/\",scale_factor=scale_factor)\n # plot_images(\"hr\", lr_paths, hr_paths, args, logdir+\"/\"+\"hr\"+\"/\",scale_factor=scale_factor)\n # run_time = plot_images(model, lr_paths, hr_paths, args, logdir+\"/\"+args.generator+\"/\",scale_factor=scale_factor)\n run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)\n eval = model.evaluate(test_batch, verbose=1)\n\n lr_hot_test_path=hot_test['hot_test_generic']['lr_hot_test_path']\n hr_hot_test_path=hot_test['hot_test_generic']['hr_hot_test_path']\n lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n test_print = [lr_img_paths,hr_img_paths]\n\n name_model = \"generic\"+'_'+args.model+'_'+args.generator if args.generator != None else \"generic\"+'_'+args.model \n # run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)\n print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))\n\n if ('game' in args.test_cluster):\n # test dataset\n test_dataset_path=test_datasets['test_game']['test_dataset_path']\n test_dataset_info_path=test_datasets['test_game']['test_dataset_info_path']\n test_dataset = Dataset(\n args.test_batch_size,\n test_dataset_path,\n test_dataset_info_path,\n args.shuffle_buffer_size)\n\n if args.test_steps == 0:\n test_steps = test_dataset.examples_num // args.test_batch_size \\\n if test_dataset.examples_num % args.test_batch_size != 0 else 0\n else:\n test_steps = args.test_steps\n \n test_dataset = test_dataset.get_data()\n test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n\n name_dataset = args.model+'_'+args.generator+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) \n print(name_dataset,args.path_to_eval)\n\n\n lr_path=test['test_game']['lr_test_path']\n hr_path=test['test_game']['hr_test_path']\n logdir=test['test_game']['logdir']\n lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])\n hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])\n if (bic):\n print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)\n exit(1)\n # plot_images(\"bi\", lr_paths, hr_paths, args, logdir+\"/\"+\"bicubic\"+\"/\",scale_factor=scale_factor)\n # plot_images(\"hr\", lr_paths, hr_paths, args, logdir+\"/\"+\"hr\"+\"/\",scale_factor=scale_factor)\n # run_time = plot_images(model, lr_paths, hr_paths, args, logdir+\"/\"+args.generator+\"/\",scale_factor=scale_factor)\n run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)\n eval = model.evaluate(test_batch, verbose=1)\n\n lr_hot_test_path=hot_test['hot_test_game']['lr_hot_test_path']\n hr_hot_test_path=hot_test['hot_test_game']['hr_hot_test_path']\n lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n test_print = [lr_img_paths,hr_img_paths]\n\n name_model = \"game\"+'_'+args.model+'_'+args.generator if args.generator != None else \"game\"+'_'+args.model \n # run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)\n print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))\n \n if ('sport' in args.test_cluster):\n # test dataset\n test_dataset_path=test_datasets['test_sport']['test_dataset_path']\n test_dataset_info_path=test_datasets['test_sport']['test_dataset_info_path']\n test_dataset = Dataset(\n args.test_batch_size,\n test_dataset_path,\n test_dataset_info_path,\n args.shuffle_buffer_size)\n\n if args.test_steps == 0:\n test_steps = test_dataset.examples_num // args.test_batch_size \\\n if test_dataset.examples_num % args.test_batch_size != 0 else 0\n else:\n test_steps = args.test_steps\n \n test_dataset = test_dataset.get_data()\n test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n\n name_dataset = args.model+'_'+args.generator+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) \n\n print(name_dataset,args.path_to_eval)\n\n\n lr_path=test['test_sport']['lr_test_path']\n hr_path=test['test_sport']['hr_test_path']\n logdir=test['test_sport']['logdir']\n lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])\n hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])\n if (bic):\n print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)\n exit(1)\n # plot_images(\"bi\", lr_paths, hr_paths, args, logdir+\"/\"+\"bicubic\"+\"/\",scale_factor=scale_factor)\n # plot_images(\"hr\", lr_paths, hr_paths, args, logdir+\"/\"+\"hr\"+\"/\",scale_factor=scale_factor)\n # run_time = plot_images(model, lr_paths, hr_paths, args, logdir+\"/\"+args.generator+\"/\",scale_factor=scale_factor)\n run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)\n eval = model.evaluate(test_batch, verbose=1)\n\n lr_hot_test_path=hot_test['hot_test_sport']['lr_hot_test_path']\n hr_hot_test_path=hot_test['hot_test_sport']['hr_hot_test_path']\n lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n test_print = [lr_img_paths,hr_img_paths]\n\n name_model = \"sport\"+'_'+args.model+'_'+args.generator if args.generator != None else \"sport\"+'_'+args.model \n # run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)\n print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))\n\n if ('podcast' in args.test_cluster):\n # test dataset\n test_dataset_path=test_datasets['test_podcast']['test_dataset_path']\n test_dataset_info_path=test_datasets['test_podcast']['test_dataset_info_path']\n test_dataset = Dataset(\n args.test_batch_size,\n test_dataset_path,\n test_dataset_info_path,\n args.shuffle_buffer_size)\n\n if args.test_steps == 0:\n test_steps = test_dataset.examples_num // args.test_batch_size \\\n if test_dataset.examples_num % args.test_batch_size != 0 else 0\n else:\n test_steps = args.test_steps\n \n test_dataset = test_dataset.get_data()\n test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))\n\n name_dataset = args.model+'_'+args.generator+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+\"_{}_{}X_q{}\".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) \n print(name_dataset,args.path_to_eval)\n\n\n lr_path=test['test_podcast']['lr_test_path']\n hr_path=test['test_podcast']['hr_test_path']\n logdir=test['test_podcast']['logdir']\n lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])\n hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])\n if (bic):\n print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)\n exit(1)\n # plot_images(\"bi\", lr_paths, hr_paths, args, logdir+\"/\"+\"bicubic\"+\"/\",scale_factor=scale_factor)\n # plot_images(\"hr\", lr_paths, hr_paths, args, logdir+\"/\"+\"hr\"+\"/\",scale_factor=scale_factor)\n # run_time = plot_images(model, lr_paths, hr_paths, args, logdir+\"/\"+args.generator+\"/\",scale_factor=scale_factor)\n run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)\n eval = model.evaluate(test_batch, verbose=1)\n\n lr_hot_test_path=hot_test['hot_test_podcast']['lr_hot_test_path']\n hr_hot_test_path=hot_test['hot_test_podcast']['hr_hot_test_path']\n lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]\n test_print = [lr_img_paths,hr_img_paths]\n\n name_model = \"podcast\"+'_'+args.model+'_'+args.generator if args.generator != None else \"podcast\"+'_'+args.model \n # run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)\n print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))\n\n\n\ndef train_percsr(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):\n\n g=model_generator(scale_factor=scale_factor,args=args)\n g.build((None, None, None,1))\n\n d=d_percsr(input_shape=(36*scale_factor,36*scale_factor,1))\n ra_d=rad_percsr(discriminator=d,shape_hr=(36*scale_factor,36*scale_factor,1))\n\n if args.loss_fn == \"mse\":\n aux_loss = tf.keras.losses.MeanSquaredError() \n if args.loss_fn == \"huber\":\n aux_loss = tf.keras.losses.Huber()\n if args.loss_fn == \"mae\":\n aux_loss = tf.keras.losses.MeanAbsoluteError()\n\n loss_pix = tf.keras.losses.MeanSquaredError()\n shape_hr = (36*scale_factor,36*scale_factor,3) \n vgg_loss = VGGLoss(shape_hr,aux_loss)\n loss_fea = vgg_loss.custom_perceptual_loss\n loss_dis = tf.keras.losses.MeanAbsoluteError()\n adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n \n alfa = args.list_weights[0]\n eta = args.list_weights[1]\n lbd = args.list_weights[2]\n mu = args.list_weights[3]\n\n gan_loss=GANLoss(loss_pix, loss_fea, loss_dis, adv_loss, alfa, eta, lbd, mu)\n\n \n teacher = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)\n print(\"Loading teacher weights...\")\n weights_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,'teacher',scale_factor)\n teacher.load_weights(weights_paph)\n teacher.build((None, None, None,1))\n\n\n ra_gan = PercSR(ra_discriminator=ra_d, generator=g,teacher=teacher)\n ra_gan.compile(d_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),\n g_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),\n perc_loss=gan_loss.generative_loss,\n metrics=[psnr,ssim,rmse,lpips])\n \n if(args.eval==True):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/{}/model.ckpt\".format(args.ckpt_path,args.model,scale_factor,args.generator)\n ra_gan.load_weights(checkpoint_paph)\n print(\"Evaluate model\")\n g.compile(metrics=[psnr,ssim,rmse,lpips])\n get_test_dataset(g,scale_factor,args)\n exit(1)\n\n if (args.load_weights):\n print(\"Loading weights...\")\n checkpoint_paph=\"{}{}_{}x/model.ckpt\".format(args.ckpt_path,args.generator,scale_factor) \n ra_gan.load_weights_gen(checkpoint_paph)\n # trainable_layers(g, len(g.layers)-1)\n trainable_weights(g)\n\n if (args.load_weights_perc):\n print(\"Loading weights perceptual...\")\n checkpoint_paph=\"{}{}_{}x/{}/model.ckpt\".format(args.ckpt_path,args.model,scale_factor,args.generator) \n ra_gan.load_weights(checkpoint_paph)\n\n for i in range(len(g.layers)):\n print(\"Camada: {}\".format(g.layers[i].name))\n if(g.layers[i].name == trainable_layer):\n break\n else:\n g.layers[i].trainable=False\n #trainable_layers(g, len(g.layers)-1)\n trainable_weights(g)\n\n \n\n\n save_img_callback = SaveImageCallback(\n model=g,\n model_name=args.model+'_'+args.generator,\n scale_factor=scale_factor,\n epochs_per_save=args.epochs_per_save,\n lr_paths=test_print[0],\n hr_paths=test_print[1],\n log_dir=args.test_logdir,\n file_writer_cm=file_writer_cm) \n callbacks.append(save_img_callback)\n\n\n checkpoint_paph=\"{}{}_{}x/{}/model.ckpt\".format(args.ckpt_path,args.model,scale_factor,args.generator)\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_paph,\n save_weights_only=True,\n monitor='val_lpips',\n save_freq= 'epoch', \n mode='min',\n save_best_only=True)\n callbacks.append(checkpoint_callback)\n\n ra_gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)\n checkpoint_paph=\"{}{}_{}x/{}/{}/model.ckpt\".format(args.ckpt_path,args.model,scale_factor,args.generator,'generator') \n ra_gan.save_weights_gen(checkpoint_paph)\n \n\n print(\"Evaluate model\")\n eval = ra_gan.evaluate(test_batch, verbose=1)\n saved_model(ra_gan.generator, 'saved_model/{}/'.format(args.model))\n return eval, ra_gan.generator.get_run_time()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.losses.Huber", "tensorflow.math.exp", "tensorflow.keras.losses.MeanSquaredError", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.keras.losses.MeanAbsoluteError", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.summary.create_file_writer", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.models.save_model", "tensorflow.keras.callbacks.EarlyStopping" ] ]
RetiSpA/Covid-19
[ "d2d5d73a78bbd2966abfb2ceb43a2270a165fb5c" ]
[ "python/utilities/path_dataset.py" ]
[ "import pandas as pd\nimport numpy as np\nimport re\nimport os\nimport requests\n\n\ndef get_folder_path(custom_path=''):\n \"\"\"\n Define a base folder on your file system\n \"\"\"\n if custom_path is '' or custom_path is None:\n BASEPATH = os.path.abspath('')\n else:\n BASEPATH = os.path.abspath(custom_path)\n\n return BASEPATH\n\n\ndef checkpath(to_path, filename):\n \"\"\"\n Check path and filename\n \"\"\"\n if to_path == '':\n to_path = get_folder_path('./')\n\n if filename == '':\n print(\"Please insert a valid filename\")\n return None\n\n file_path = os.path.join(to_path, filename)\n\n return file_path\n\n\ndef define_path(BASEPATH='', DATASET='test_data.xlsx', DATA_LOCATION='data', SEED=42):\n \"\"\"\n Define a path for a file.\n INPUT: Basepath you want to search and dataset name plus a default location\n OUTPUT: the file path\n \"\"\"\n\n # Set the random seed\n np.random.seed(SEED)\n\n if BASEPATH == '':\n BASEPATH = os.path.abspath('')\n\n # Set the default Data Path\n if DATA_LOCATION == '' or DATA_LOCATION is None:\n DATA_PATH = os.path.join(BASEPATH, DATASET)\n else:\n DATA_PATH = os.path.join(BASEPATH, DATA_LOCATION, DATASET)\n\n print(f\"\\n Dataset location: {DATA_PATH} \\n\")\n\n return DATA_PATH\n\n\ndef read_dataset_online(dataset_url, file_path='', file_name='', write_disk=False):\n try:\n\n file_bin = requests.get(dataset_url)\n\n # Write to disk\n if write_disk:\n to_path = checkpath(file_path, file_name)\n output_file = open(to_path, 'w')\n output_file.write(file_bin.text)\n output_file.close()\n print(f\"File successfully writed to: {to_path}\")\n return file_bin\n\n # Finally get the pandas dataframe\n df = pd.read_csv(dataset_url)\n print(f'File successfully readed')\n return df\n\n except Exception as message:\n print(f\"Impossible to read the csv from url source : {message}\")\n return None\n\n\ndef read_dataset(path='', csv_sep=',', xlsx_sheet=\"Sheet1\", header=0):\n \"\"\"\n Read automatically a csv or xlsx file\n \"\"\"\n # Check if the path is empty\n if path == '' or path == False:\n print(\"Please specify a path where you want to read\")\n return False\n\n df = pd.DataFrame()\n\n # Load xlsx\n if re.search(r\"xlsx\", path):\n print(\"Found excel in the path\")\n\n try:\n df = pd.read_excel(path, sheet_name=xlsx_sheet, header=header)\n print(\"Dataset loaded\")\n\n except Exception as message:\n print(f\"Impossibile to read xlsx: {message}\")\n\n # Load csv\n elif re.search(r\"csv\", path):\n print(\"Found csv in the path\")\n\n try:\n df = pd.read_csv(path, sep=csv_sep, header=header)\n print(\"Dataset loaded\")\n\n except Exception as message:\n print(f\"Impossibile to read csv: {message}\")\n\n else:\n print(\"Impossibile to find dataset\")\n return False\n\n return df\n\n\ndef save_dataset(path='', dataframe=None, csv_sep=',', index=True, header=True):\n \"\"\"\n Save dataframe to disk\n \"\"\"\n if dataframe is None:\n print(\"Please specify dataset you want to save\")\n return False\n elif isinstance(dataframe, pd.DataFrame):\n\n # Check if the path is empty\n if path == '' or path == False:\n print(\"Please specify a path where you want to write\")\n return False\n\n try:\n dataframe.to_csv(path, index=index, header=header, sep=csv_sep, encoding='utf-8')\n print(f'File saved succesfully to: {path}')\n return True\n except Exception as message:\n print(f\"Impossibile to write dataframe: {message}\")\n return False\n\n else:\n print(\"Please use only pandas dataframe (for now)\")\n return False\n" ]
[ [ "numpy.random.seed", "pandas.DataFrame", "pandas.read_csv", "pandas.read_excel" ] ]
keyonvafa/sequential-rationales
[ "515d78718ee09c924d72bc9f9c6caebaec9b0b11" ]
[ "fairseq/fairseq/models/transformer/transformer_decoder.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Any, Dict, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom fairseq import utils\nfrom fairseq.distributed import fsdp_wrap\nfrom fairseq.models import FairseqIncrementalDecoder\nfrom fairseq.models.transformer import TransformerConfig\nfrom fairseq.modules import (\n AdaptiveSoftmax,\n BaseLayer,\n FairseqDropout,\n LayerDropModuleList,\n LayerNorm,\n PositionalEmbedding,\n SinusoidalPositionalEmbedding,\n)\nfrom fairseq.modules import transformer_layer\nfrom fairseq.modules.checkpoint_activations import checkpoint_wrapper\nfrom fairseq.modules.quant_noise import quant_noise as apply_quant_noise_\nfrom torch import Tensor\n\n# rewrite name for backward compatibility in `make_generation_fast_`\ndef module_name_fordropout(module_name: str) -> str:\n if module_name == 'TransformerDecoderBase':\n return 'TransformerDecoder'\n else:\n return module_name\n\n\nclass TransformerDecoderBase(FairseqIncrementalDecoder):\n \"\"\"\n Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer\n is a :class:`TransformerDecoderLayer`.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n dictionary (~fairseq.data.Dictionary): decoding dictionary\n embed_tokens (torch.nn.Embedding): output embedding\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(\n self,\n cfg,\n dictionary,\n embed_tokens,\n no_encoder_attn=False,\n output_projection=None,\n ):\n self.cfg = cfg\n super().__init__(dictionary)\n self.register_buffer(\"version\", torch.Tensor([3]))\n self._future_mask = torch.empty(0)\n\n self.dropout_module = FairseqDropout(\n cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)\n )\n self.decoder_layerdrop = cfg.decoder.layerdrop\n self.share_input_output_embed = cfg.share_decoder_input_output_embed\n\n # added for sequential rationales:\n self.word_dropout_mixture = self.cfg.word_dropout_mixture\n self.word_dropout_type = self.cfg.word_dropout_type\n\n input_embed_dim = embed_tokens.embedding_dim\n embed_dim = cfg.decoder.embed_dim\n self.embed_dim = embed_dim\n self.output_embed_dim = cfg.decoder.output_dim\n\n self.padding_idx = embed_tokens.padding_idx\n self.max_target_positions = cfg.max_target_positions\n\n self.embed_tokens = embed_tokens\n\n self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)\n\n if not cfg.adaptive_input and cfg.quant_noise.pq > 0:\n self.quant_noise = apply_quant_noise_(\n nn.Linear(embed_dim, embed_dim, bias=False),\n cfg.quant_noise.pq,\n cfg.quant_noise.pq_block_size,\n )\n else:\n self.quant_noise = None\n\n self.project_in_dim = (\n Linear(input_embed_dim, embed_dim, bias=False)\n if embed_dim != input_embed_dim\n else None\n )\n self.embed_positions = (\n PositionalEmbedding(\n self.max_target_positions,\n embed_dim,\n self.padding_idx,\n learned=cfg.decoder.learned_pos,\n )\n if not cfg.no_token_positional_embeddings\n else None\n )\n if cfg.layernorm_embedding:\n self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)\n else:\n self.layernorm_embedding = None\n\n self.cross_self_attention = cfg.cross_self_attention\n\n if self.decoder_layerdrop > 0.0:\n self.layers = LayerDropModuleList(p=self.decoder_layerdrop)\n else:\n self.layers = nn.ModuleList([])\n self.layers.extend(\n [\n self.build_decoder_layer(cfg, no_encoder_attn)\n for _ in range(cfg.decoder.layers)\n ]\n )\n self.num_layers = len(self.layers)\n\n if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm:\n self.layer_norm = LayerNorm(embed_dim, export=cfg.export)\n else:\n self.layer_norm = None\n\n self.project_out_dim = (\n Linear(embed_dim, self.output_embed_dim, bias=False)\n if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights\n else None\n )\n\n self.adaptive_softmax = None\n self.output_projection = output_projection\n if self.output_projection is None:\n self.build_output_projection(cfg, dictionary, embed_tokens)\n\n def build_output_projection(self, cfg, dictionary, embed_tokens):\n if cfg.adaptive_softmax_cutoff is not None:\n self.adaptive_softmax = AdaptiveSoftmax(\n len(dictionary),\n self.output_embed_dim,\n utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int),\n dropout=cfg.adaptive_softmax_dropout,\n adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None,\n factor=cfg.adaptive_softmax_factor,\n tie_proj=cfg.tie_adaptive_proj,\n )\n elif self.share_input_output_embed:\n self.output_projection = nn.Linear(\n self.embed_tokens.weight.shape[1],\n self.embed_tokens.weight.shape[0],\n bias=False,\n )\n self.output_projection.weight = self.embed_tokens.weight\n else:\n self.output_projection = nn.Linear(\n self.output_embed_dim, len(dictionary), bias=False\n )\n nn.init.normal_(\n self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5\n )\n num_base_layers = cfg.base_layers\n for i in range(num_base_layers):\n self.layers.insert(\n ((i + 1) * cfg.decoder.layers) // (num_base_layers + 1),\n BaseLayer(cfg),\n )\n\n def build_decoder_layer(self, cfg, no_encoder_attn=False):\n layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)\n checkpoint = cfg.checkpoint_activations\n if checkpoint:\n offload_to_cpu = cfg.offload_activations\n layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)\n # if we are checkpointing, enforce that FSDP always wraps the\n # checkpointed layer, regardless of layer size\n min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0\n layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)\n return layer\n\n def forward(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n features_only: bool = False,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n src_lengths: Optional[Any] = None,\n return_all_hiddens: bool = False,\n position_ids: Optional[Tensor] = None,\n cross_attn_word_dropout_mask: Optional[Tensor] = None,\n return_embeddings: bool = False,\n return_all_attns: bool = False,\n token_embeddings: Optional[Tensor] = None,\n ):\n \"\"\"\n Args:\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (optional): output from the encoder, used for\n encoder-side attention, should be of size T x B x C\n incremental_state (dict): dictionary used for storing state during\n :ref:`Incremental decoding`\n features_only (bool, optional): only return features without\n applying output layer (default: False).\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n \"\"\"\n x, extra = self.extract_features(\n prev_output_tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_state,\n full_context_alignment=full_context_alignment,\n alignment_layer=alignment_layer,\n alignment_heads=alignment_heads,\n position_ids=position_ids,\n cross_attn_word_dropout_mask=cross_attn_word_dropout_mask,\n return_embeddings=return_embeddings,\n return_all_attns=return_all_attns,\n token_embeddings=token_embeddings,\n )\n if not features_only:\n x = self.output_layer(x)\n return x, extra\n\n def extract_features(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]],\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n position_ids: Optional[Tensor] = None,\n cross_attn_word_dropout_mask: Optional[Tensor] = None,\n return_embeddings: bool = False,\n return_all_attns: bool = False,\n token_embeddings: Optional[Tensor] = None,\n ):\n return self.extract_features_scriptable(\n prev_output_tokens,\n encoder_out,\n incremental_state,\n full_context_alignment,\n alignment_layer,\n alignment_heads,\n position_ids,\n cross_attn_word_dropout_mask,\n return_embeddings,\n return_all_attns,\n token_embeddings,\n )\n\n \"\"\"\n A scriptable subclass of this class has an extract_features method and calls\n super().extract_features, but super() is not supported in torchscript. A copy of\n this function is made to be used in the subclass instead.\n \"\"\"\n\n def extract_features_scriptable(\n self,\n prev_output_tokens,\n encoder_out: Optional[Dict[str, List[Tensor]]],\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n full_context_alignment: bool = False,\n alignment_layer: Optional[int] = None,\n alignment_heads: Optional[int] = None,\n position_ids: Optional[Tensor] = None,\n cross_attn_word_dropout_mask: Optional[Tensor] = None,\n return_embeddings: bool = False,\n return_all_attns: bool = False,\n token_embeddings: Optional[Tensor] = None,\n ):\n \"\"\"\n Similar to *forward* but only return features.\n\n Includes several features from \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n full_context_alignment (bool, optional): don't apply\n auto-regressive mask to self-attention (default: False).\n alignment_layer (int, optional): return mean alignment over\n heads at this layer (default: last layer).\n alignment_heads (int, optional): only average alignment over\n this many heads (default: all heads).\n position_ids (Tensor, optional): Tensor of same size as\n *prev_output_tokens* containing position indices.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n \"\"\"\n bs, slen = prev_output_tokens.size()\n if alignment_layer is None:\n alignment_layer = self.num_layers - 1\n\n enc: Optional[Tensor] = None\n padding_mask: Optional[Tensor] = None\n if encoder_out is not None and len(encoder_out[\"encoder_out\"]) > 0:\n enc = encoder_out[\"encoder_out\"][0]\n assert (\n enc.size()[1] == bs\n ), f\"Expected enc.shape == (t, {bs}, c) got {enc.shape}\"\n if encoder_out is not None and len(encoder_out[\"encoder_padding_mask\"]) > 0:\n padding_mask = encoder_out[\"encoder_padding_mask\"][0]\n\n # embed positions\n positions = None\n if self.embed_positions is not None:\n positions = self.embed_positions(\n prev_output_tokens, incremental_state=incremental_state, \n positions=position_ids\n )\n\n if incremental_state is not None:\n prev_output_tokens = prev_output_tokens[:, -1:]\n if positions is not None:\n positions = positions[:, -1:]\n\n # embed tokens and positions\n if token_embeddings is None:\n token_embeddings = self.embed_tokens(prev_output_tokens)\n x = self.embed_scale * token_embeddings\n\n if self.quant_noise is not None:\n x = self.quant_noise(x)\n\n if self.project_in_dim is not None:\n x = self.project_in_dim(x)\n\n if positions is not None:\n x += positions\n\n if self.layernorm_embedding is not None:\n x = self.layernorm_embedding(x)\n\n x = self.dropout_module(x)\n\n first_layer_embedding = [x] if return_embeddings else []\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n self_attn_padding_mask: Optional[Tensor] = None\n if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():\n self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)\n\n # With probability `word_dropout_mixture`, perform word dropout.\n # Otherwise, keep the batch the same.\n if self.training and torch.rand(1) < self.word_dropout_mixture:\n batch_size, tgt_len = prev_output_tokens.size()\n # self_attn_word_dropout_mask.shape = (batch_size, tgt_len, tgt_len), \n # where each dropped out token will be -1e4 and each kept token will be\n # 1.\n self_attn_word_dropout_mask = torch.eye(tgt_len)[None].repeat(\n [batch_size, 1, 1]).to(x)\n if self.word_dropout_type == 'uniform_length':\n # Sample input sequences uniformly at random from all possible \n # lengths.\n candidates = torch.randperm(tgt_len)\n num_tokens_to_keep = torch.randint(low=0, high=tgt_len + 1, size=[1])\n self_attn_word_dropout_mask[:, :, candidates[:num_tokens_to_keep]] = 1.\n elif self.word_dropout_type == 'inverse_length':\n # dropout_prob = 1 - (1 / tgt_len), so keep_prob = 1 / tgt_len\n tokens_to_keep = torch.bernoulli((1 / tgt_len) + torch.zeros(\n [batch_size, tgt_len])).to(x)\n self_attn_word_dropout_mask += tokens_to_keep[:, None, :]\n self_attn_word_dropout_mask = torch.clamp(\n self_attn_word_dropout_mask, 0., 1.)\n else:\n raise ValueError(\"Dropout type must be 'uniform_length' or \"\n \"'inverse_length'\")\n # To avoid nan errors, we use -1e4 instead of -inf.\n self_attn_word_dropout_mask = torch.where(\n self_attn_word_dropout_mask == 0., -1e4, 0.)\n else:\n self_attn_word_dropout_mask = None\n\n # decoder layers\n attn: Optional[Tensor] = None\n inner_states: List[Optional[Tensor]] = [x]\n\n self_attns: List[Optional[Tensor]] = []\n cross_attns: List[Optional[Tensor]] = []\n for idx, layer in enumerate(self.layers):\n if incremental_state is None and not full_context_alignment:\n self_attn_mask = self.buffered_future_mask(x)\n else:\n self_attn_mask = None\n\n x, layer_attn, _, self_attn, cross_attn = layer(\n x,\n enc,\n padding_mask,\n incremental_state,\n self_attn_mask=self_attn_mask,\n self_attn_padding_mask=self_attn_padding_mask,\n need_attn=bool((idx == alignment_layer)),\n need_head_weights=bool((idx == alignment_layer)),\n self_attn_word_dropout_mask=self_attn_word_dropout_mask,\n cross_attn_word_dropout_mask=cross_attn_word_dropout_mask,\n return_all_attns=return_all_attns,\n )\n if return_all_attns:\n self_attns.append(self_attn)\n cross_attns.append(cross_attn)\n inner_states.append(x)\n if layer_attn is not None and idx == alignment_layer:\n attn = layer_attn.float().to(x)\n\n if attn is not None:\n if alignment_heads is not None:\n attn = attn[:alignment_heads]\n\n # average probabilities over heads\n attn = attn.mean(dim=0)\n\n if self.layer_norm is not None:\n x = self.layer_norm(x)\n\n # T x B x C -> B x T x C\n x = x.transpose(0, 1)\n\n if self.project_out_dim is not None:\n x = self.project_out_dim(x)\n\n return x, {\"attn\": [attn], \"inner_states\": inner_states,\n \"first_layer_embedding\": first_layer_embedding,\n \"self_attns\": self_attns, \"cross_attns\": cross_attns}\n\n def output_layer(self, features):\n \"\"\"Project features to the vocabulary size.\"\"\"\n if self.adaptive_softmax is None:\n # project back to size of vocabulary\n return self.output_projection(features)\n else:\n return features\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the decoder.\"\"\"\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)\n\n def buffered_future_mask(self, tensor):\n dim = tensor.size(0)\n # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.\n if (\n self._future_mask.size(0) == 0\n or (not self._future_mask.device == tensor.device)\n or self._future_mask.size(0) < dim\n ):\n self._future_mask = torch.triu(\n utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1\n )\n self._future_mask = self._future_mask.to(tensor)\n return self._future_mask[:dim, :dim]\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"Upgrade a (possibly old) state dict for new versions of fairseq.\"\"\"\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.0)\n return m\n\n\nclass TransformerDecoder(TransformerDecoderBase):\n def __init__(\n self,\n args,\n dictionary,\n embed_tokens,\n no_encoder_attn=False,\n output_projection=None,\n ):\n self.args = args\n super().__init__(\n TransformerConfig.from_namespace(args),\n dictionary,\n embed_tokens,\n no_encoder_attn=no_encoder_attn,\n output_projection=output_projection,\n )\n\n def build_output_projection(self, args, dictionary, embed_tokens):\n super().build_output_projection(\n TransformerConfig.from_namespace(args), dictionary, embed_tokens\n )\n\n def build_decoder_layer(self, args, no_encoder_attn=False):\n return super().build_decoder_layer(\n TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn\n )\n" ]
[ [ "torch.nn.Linear", "torch.rand", "torch.zeros", "torch.nn.ModuleList", "torch.nn.init.constant_", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.randperm", "torch.clamp", "torch.nn.init.normal_", "torch.randint", "torch.eye", "torch.empty", "torch.Tensor", "torch.where" ] ]
Belovolov/wrn-server
[ "76bd2652f1129986843918f9d7ef669229793859" ]
[ "utils/wr_nets.py" ]
[ "import cv2\nimport numpy as np\nimport wide_residual_network as wrn\nfrom keras.datasets import cifar10\nfrom keras import backend as K\nimport tensorflow as tf\nimport cifarMeta\n\nclass Wrn168c10():\n def __init__(self, weightsFile):\n (self.trainX, trainY), (testX, testY) = cifar10.load_data()\n init_shape = (3, 32, 32) if K.image_dim_ordering() == 'th' else (32, 32, 3)\n self.model = wrn.create_wide_residual_network(init_shape, nb_classes=10, N=2, k=8, dropout=0.00)\n self.model.load_weights(weightsFile)\n self.graph = tf.get_default_graph()\n def get_prediction(self, imageData):\n img = np.frombuffer(imageData, np.uint8)\n image = cv2.imdecode(img, cv2.IMREAD_COLOR).astype('float32')\n \n arr = np.empty([1,32,32,3])\n arr[0] = image\n \n #that's the mean/std normalization\n trainX_t = np.concatenate((arr, self.trainX.astype('float32')))\n trainX_t = (trainX_t - trainX_t.mean(axis=0)) / (trainX_t.std(axis=0))\n \n with self.graph.as_default():\n yPreds = self.model.predict(trainX_t[0:1]).round(2)\n \n result = {}\n for i in range(0, len(yPreds[0])):\n if (yPreds[0][i]>0):\n result[cifarMeta.c10[i]]=str(yPreds[0][i])\n topResults = [(k, result[k]) for k in sorted(result, key=result.get, reverse=True)][0:5]\n print(topResults)\n \n return topResults\n \nclass Wrn22c100():\n def __init__(self, weightsFile):\n (self.trainX, trainY), (testX, testY) = cifar10.load_data()\n init_shape = (3, 32, 32) if K.image_dim_ordering() == 'th' else (32, 32, 3)\n self.model = wrn.create_wide_residual_network(init_shape, nb_classes=10, N=2, k=8, dropout=0.00)\n self.model.load_weights(weightsFile)\n self.graph = tf.get_default_graph()\n def get_prediction(self, imageData):\n img = np.frombuffer(imageData, np.uint8)\n image = cv2.imdecode(img, cv2.IMREAD_COLOR).astype('float32')\n \n arr = np.empty([1,32,32,3])\n arr[0] = image\n \n #that's the mean/std normalization\n trainX_t = np.concatenate((arr, self.trainX.astype('float32')))\n trainX_t = (trainX_t - trainX_t.mean(axis=0)) / (trainX_t.std(axis=0))\n \n with self.graph.as_default():\n yPreds = self.model.predict(trainX_t[0:1]).round(2)\n \n result = {}\n for i in range(0, len(yPreds[0])):\n if (yPreds[0][i]>0):\n result[cifarMeta.c10[i]]=str(yPreds[0][i])\n topResults = [(k, result[k]) for k in sorted(result, key=result.get, reverse=True)][0:5]\n print(topResults)\n \n return topResults" ]
[ [ "numpy.frombuffer", "numpy.empty", "tensorflow.get_default_graph" ] ]
noobermin/sharks-butts
[ "55de06a4c626257d77b374eab955975ef4eac23e" ]
[ "bin/sclrq.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n'''\nJust render something.\n\nUsage:\n ./sclrq.py [options] (--show|-s) <i>\n ./sclrq.py [options] <i> <outname>\n\nOptions:\n --help -h\n --show -s Show\n --nozip -U sclr/flds are NOT gzipped.\n --zip -Z sclr/flds are gzipped. If neither of these two are set,\n guess based on name.\n --log10 -l Log it.\n --lims=LIM Set lims [default: (1e18,1e23)]\n --highlight=H Set highlight.\n --quantity=Q -Q Q Render this quantity [default: RhoN10]\n --dir=D -D D Read from this dir [default: .]\n --restrict=R Restrict it.\n --x-restrict=R Restrict by positions as a 4 tuple.\n --t-offset=T Set time offset in fs. [default: 0].\n --title=T Set the title [default: Electron density]\n --units=U Set the colorbar units [default: number/cc]\n --laser Plot contours of the laser poyting vector.\n --intensity=I -I I Make a contour of this intensity [default: 3e18]\n --equal -E Make spatial dimensions equal.\n --rotate -R Rotate instead of flipping x and y (ie., flip x axis).\n --no-ticks Don't include ticks.\n --orientation=V \"V\" for vertical or \"H\" for horizontal [default: V]\n'''\nfrom docopt import docopt;\nimport numpy as np;\nimport numpy.linalg as lin;\nfrom pys import parse_ftuple, parse_ituple;\nfrom lspreader.flds import read_indexed, restrict\nfrom lspplot.sclr import S;\nfrom lspplot.pc import pc,highlight;\nfrom lspplot.consts import c,mu0,e0;\n\nopts = docopt(__doc__,help=True);\n\nquantity = opts['--quantity'];\nfvar=['E','B'] if opts['--laser'] else None;\ntitlestr=opts['--title']\nunits=opts['--units'];\nsvar=[quantity];\nif opts['--nozip']:\n gzip = False;\nelif opts['--zip']:\n gzip = True;\nelse:\n gzip = 'guess';\n#####################################\n#reading data\nd = read_indexed(int(opts['<i>']),\n flds=fvar,sclr=svar,\n gzip=gzip,dir=opts['--dir'],\n gettime=True,vector_norms=False);\n#choosing positions\nylabel = 'z' if np.isclose(d['y'].max(),d['y'].min()) else 'y';\n\nif opts['--x-restrict']:\n res = parse_ftuple(opts['--x-restrict'], length=4);\n res[:2] = [ np.abs(d['x'][:,0]*1e4 - ires).argmin() for ires in res[:2] ];\n res[2:] = [ np.abs(d[ylabel][0,:]*1e4 - ires).argmin() for ires in res[2:] ];\n #including the edges\n res[1]+=1;\n res[3]+=1;\n restrict(d,res);\nelif opts['--restrict']:\n res = parse_ituple(opts['--restrict'],length=None);\n restrict(d,res);\n\nx,y = d['x']*1e4, d[ylabel]*1e4;\n#massaging data\nt = d['t'];\nq = d[quantity];\n\n#####################################\n#plotting\n\n#getting options from user\nmn,mx = parse_ftuple(opts['--lims'],length=2);\nif opts['--rotate']:\n rot,flip = True, False;\nelse:\n rot,flip = False, True;\n\n\n#plot the density\ntoff = float(opts['--t-offset']);\ntitle=\"{}\\nTime: {:.2f} fs\".format(titlestr,t*1e6 + toff);\n\n#orientation of colorbar\nif opts['--orientation'] == \"V\":\n orient = \"vertical\"\nelif opts['--orientation'] == \"H\":\n orient = \"horizontal\"\nelse:\n print('orientation must be either \"V\" or \"H\"');\n print(__doc__);\n quit();\n\nr=pc(\n q,(x,y), lims=(mn,mx),log=opts['--log10'],\n clabel=units, title=title,\n agg=not opts['--show'],\n flip=flip,\n rotate=rot,\n orient=orient,);\n\nif opts['--highlight'] and opts['--highlight'] != \"None\" and opts['--highlight'] != 'none':\n myhi = float(opts['--highlight']);\n highlight(\n r, myhi,\n color=\"lightyellow\", alpha=0.5);\n\nif opts['--laser']:\n laser = S(d);\n I = float(opts['--intensity']);\n highlight(r, I, q=laser,\n color=\"red\", alpha=0.15);\n \nimport matplotlib.pyplot as plt;\nif opts['--equal']:\n plt.axis('equal');\n r['axes'].autoscale(tight=True);\nif opts['--no-ticks']:\n plt.tick_params(\n axis='both',\n which='both',\n bottom='off',\n top='off',\n right='off',\n left='off');\n\nif opts['--show']:\n plt.show();\nelse:\n plt.savefig(opts['<outname>']);\n\n\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.tick_params", "numpy.abs", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
kevintzd/TargetDetection
[ "be4aebb3774be70fe37b1f3df2815cba024b6eae" ]
[ "image_web_server.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom flask import Flask, request, jsonify, send_from_directory, render_template\nfrom flask import Flask, request, redirect, url_for\nfrom werkzeug.utils import secure_filename\nfrom keras.backend import set_session\nimport argparse\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom lib.config import config as cfg\nfrom lib.utils.nms_wrapper import nms\nfrom lib.utils.test import im_detect\n#from nets.resnet_v1 import resnetv1\nfrom lib.nets.vgg16 import vgg16\nfrom lib.utils.timer import Timer\n\n\nUPLOAD_FOLDER = 'www/upload/'\nRESULT_FOLDER = 'www/result'\nALLOWED_EXTENSIONS = set([ 'png', 'jpg', 'jpeg', 'JPG', 'PNG', 'JPEG'])\nCLASSES = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\nCOLORSET = {'aeroplane':'aqua', 'bicycle':'red', 'bird':'chocolate', 'boat':'darkkhaki',\n 'bottle':'floralwhite', 'bus':'gray', 'car':'green', 'cat':'ivory', 'chair':'indigo',\n 'cow':'blue', 'diningtable':'silver', 'dog':'lime', 'horse':'violet',\n 'motorbike':'purple', 'person':'lightblue', 'pottedplant':'salmon',\n 'sheep':'mintcream', 'sofa':'orange', 'train':'peru', 'tvmonitor':'pink'}\nNETS = {'vgg16': ('vgg16_faster_rcnn_iter_40000.ckpt',), 'res101': ('res101_faster_rcnn_iter_110000.ckpt',)}\nDATASETS = {'pascal_voc': ('voc_2007_trainval',), 'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef demo(sess, net, image_name, conter, result = True):\n im_read = cv2.imread(image_name, cv2.IMREAD_ANYCOLOR)\n b, g, r = cv2.split(im_read)\n im = cv2.merge([r, g, b])\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))\n # Visualize detections for each class\n CONF_THRESH = 0.7\n NMS_THRESH = 0.3\n fig, ax = plt.subplots(figsize=(12, 12))\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # skipped background\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n inds = np.where(dets[:, -1] >= CONF_THRESH)[0]\n if len(inds) == 0:\n conter +=1\n continue\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor=COLORSET[cls], linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(cls, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n if conter == len(CLASSES)-1:\n result = False\n print(result)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n return result\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')\n parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',\n choices=NETS.keys(), default='vgg16')\n parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',\n choices=DATASETS.keys(), default='pascal_voc')\n args = parser.parse_args()\n return args\n\ndef return_img_stream(img_local_path):\n import base64\n img_stream = ''\n with open(img_local_path, 'r') as img_f:\n img_stream = img_f.read()\n img_stream = base64.b64encode(img_stream)\n return img_stream\n\ndef load_model():\n global sess\n global net\n global graph\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n graph = tf.get_default_graph()\n set_session(sess)\n args = parse_args()\n # model path\n demonet = args.demo_net\n dataset = args.dataset\n tfmodel = os.path.join('default', DATASETS[dataset][0], 'default', NETS[demonet][0])\n if not os.path.isfile(tfmodel + '.meta'):\n print(tfmodel)\n raise IOError(('{:s} not found.\\nDid you download the proper networks from '\n 'our server and place them properly?').format(tfmodel + '.meta'))\n # set config\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n # init session\n sess = tf.Session(config=tfconfig)\n # load network\n if demonet == 'vgg16':\n net = vgg16(batch_size=1)\n else:\n raise NotImplementedError\n n_classes = len(CLASSES)\n # create the structure of the net having a certain shape (which depends on the number of classes)\n net.create_architecture(sess, \"TEST\", n_classes,\n tag='default', anchor_scales=[8, 16, 32])\n saver = tf.train.Saver()\n saver.restore(sess, tfmodel)\n print('Loaded network {:s}'.format(tfmodel))\n\napp = Flask(__name__)\nload_model()\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['RESULT_FOLDER'] = RESULT_FOLDER\n\n\n\[email protected]('/', methods=['GET'])\ndef send_index():\n return send_from_directory('www', \"index.html\")\n\[email protected]('/www/<path:path>', methods=['GET'])\ndef send_root(path):\n return send_from_directory('www', path)\n\[email protected]('/predict', methods=['POST'])\ndef upload_image1():\n # check if the post request has the file part\n if 'image' not in request.files:\n return jsonify({'error': 'No posted image. Should be attribute named image.'})\n file = request.files['image']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n return jsonify({'error': 'Empty filename submitted.'})\n if file and allowed_file(file.filename):\n\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n cachedir = os.path.join(app.config['RESULT_FOLDER'],filename)\n if os.path.exists(cachedir):\n response = {'url': \"www/result/{:s}\".format(filename.split('/')[-1])}\n print(response)\n return jsonify(response)\n else:\n filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n conter = 0\n result = True\n result = demo(sess, net, filename, conter, result)\n if result == True:\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.savefig(\"www/result/{:s}\".format(filename.split('/')[-1]), bbox_inches='tight', transparent=True, pad_inches=0.0)\n response = {'url': \"www/result/{:s}\".format(filename.split('/')[-1])}\n print(response)\n return jsonify(response)\n else:\n response = {'false': \"fail to classify\"}\n print(response)\n return jsonify(response)\n else:\n return jsonify({'error':'File has invalid extension'})\n\nif __name__ == '__main__':\n app.run(host= 'localhost',debug=True)" ]
[ [ "tensorflow.get_default_graph", "tensorflow.Session", "tensorflow.train.Saver", "matplotlib.pyplot.subplots", "tensorflow.ConfigProto", "numpy.where", "matplotlib.pyplot.margins", "matplotlib.pyplot.draw", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.Rectangle", "numpy.hstack", "matplotlib.pyplot.gca", "matplotlib.pyplot.NullLocator", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.axis" ] ]
hb563/chempropvle
[ "217a246607184137c0a00e06ef917aeb18321a52" ]
[ "chemprop/hyperparameter_optimization.py" ]
[ "\"\"\"Optimizes hyperparameters using Bayesian optimization.\"\"\"\n\nfrom copy import deepcopy\nimport json\nfrom typing import Dict, Union\nimport os\n\nfrom hyperopt import fmin, hp, tpe\nimport numpy as np\n\nfrom chempropvle.chemprop.args import HyperoptArgs\nfrom chempropvle.chemprop.constants import HYPEROPT_LOGGER_NAME\nfrom chempropvle.chemprop.models import MoleculeModel\nfrom chempropvle.chemprop.nn_utils import param_count\nfrom chempropvle.chemprop.train import cross_validate, run_training\nfrom chempropvle.chemprop.utils import create_logger, makedirs, timeit\n\n\nSPACE = {\n 'hidden_size': hp.quniform('hidden_size', low=300, high=2400, q=100),\n 'depth': hp.quniform('depth', low=2, high=6, q=1),\n 'dropout': hp.quniform('dropout', low=0.0, high=0.4, q=0.05),\n 'ffn_num_layers': hp.quniform('ffn_num_layers', low=1, high=3, q=1)\n}\nINT_KEYS = ['hidden_size', 'depth', 'ffn_num_layers']\n\n\n@timeit(logger_name=HYPEROPT_LOGGER_NAME)\ndef hyperopt(args: HyperoptArgs) -> None:\n \"\"\"\n Runs hyperparameter optimization on a Chemprop model.\n\n Hyperparameter optimization optimizes the following parameters:\n\n * :code:`hidden_size`: The hidden size of the neural network layers is selected from {300, 400, ..., 2400}\n * :code:`depth`: The number of message passing iterations is selected from {2, 3, 4, 5, 6}\n * :code:`dropout`: The dropout probability is selected from {0.0, 0.05, ..., 0.4}\n * :code:`ffn_num_layers`: The number of feed-forward layers after message passing is selected from {1, 2, 3}\n\n The best set of hyperparameters is saved as a JSON file to :code:`args.config_save_path`.\n\n :param args: A :class:`~chemprop.args.HyperoptArgs` object containing arguments for hyperparameter\n optimization in addition to all arguments needed for training.\n \"\"\"\n # Create logger\n logger = create_logger(name=HYPEROPT_LOGGER_NAME, save_dir=args.log_dir, quiet=True)\n\n # Run grid search\n results = []\n\n # Define hyperparameter optimization\n def objective(hyperparams: Dict[str, Union[int, float]]) -> float:\n # Convert hyperparams from float to int when necessary\n for key in INT_KEYS:\n hyperparams[key] = int(hyperparams[key])\n\n # Copy args\n hyper_args = deepcopy(args)\n\n # Update args with hyperparams\n if args.save_dir is not None:\n folder_name = '_'.join(f'{key}_{value}' for key, value in hyperparams.items())\n hyper_args.save_dir = os.path.join(hyper_args.save_dir, folder_name)\n\n for key, value in hyperparams.items():\n setattr(hyper_args, key, value)\n\n hyper_args.ffn_hidden_size = hyper_args.hidden_size\n\n # Record hyperparameters\n logger.info(hyperparams)\n\n # Cross validate\n mean_score, std_score = cross_validate(args=hyper_args, train_func=run_training)\n\n # Record results\n temp_model = MoleculeModel(hyper_args)\n num_params = param_count(temp_model)\n logger.info(f'num params: {num_params:,}')\n logger.info(f'{mean_score} +/- {std_score} {hyper_args.metric}')\n\n results.append({\n 'mean_score': mean_score,\n 'std_score': std_score,\n 'hyperparams': hyperparams,\n 'num_params': num_params\n })\n\n # Deal with nan\n if np.isnan(mean_score):\n if hyper_args.dataset_type == 'classification':\n mean_score = 0\n else:\n raise ValueError('Can\\'t handle nan score for non-classification dataset.')\n\n return (1 if hyper_args.minimize_score else -1) * mean_score\n\n fmin(objective, SPACE, algo=tpe.suggest, max_evals=args.num_iters, rstate=np.random.RandomState(args.seed))\n\n # Report best result\n results = [result for result in results if not np.isnan(result['mean_score'])]\n best_result = min(results, key=lambda result: (1 if args.minimize_score else -1) * result['mean_score'])\n logger.info('best')\n logger.info(best_result['hyperparams'])\n logger.info(f'num params: {best_result[\"num_params\"]:,}')\n logger.info(f'{best_result[\"mean_score\"]} +/- {best_result[\"std_score\"]} {args.metric}')\n\n # Save best hyperparameter settings as JSON config file\n makedirs(args.config_save_path, isfile=True)\n\n with open(args.config_save_path, 'w') as f:\n json.dump(best_result['hyperparams'], f, indent=4, sort_keys=True)\n\n\ndef chemprop_hyperopt() -> None:\n \"\"\"Runs hyperparameter optimization for a Chemprop model.\n\n This is the entry point for the command line command :code:`chemprop_hyperopt`.\n \"\"\"\n hyperopt(args=HyperoptArgs().parse_args())\n" ]
[ [ "numpy.isnan", "numpy.random.RandomState" ] ]
gluru/tensorflow
[ "0290bfd96901018d6fd0a520e77aafb44b19a1ac" ]
[ "tensorflow/lite/python/lite_v2_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lite.py functionality related to TensorFlow 2.0.\"\"\"\n\nimport ctypes\nimport functools\nimport itertools\nimport os\nimport sys\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow as tf\n\n# Force loaded shared object symbols to be globally visible. This is needed so\n# that the interpreter_wrapper, in one .so file, can see the test_registerer,\n# in a different .so file. Note that this may already be set by default.\n# pylint: disable=g-import-not-at-top\nif hasattr(sys, 'setdlopenflags') and hasattr(sys, 'getdlopenflags'):\n sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)\n\nfrom tensorflow.lite.python import conversion_metadata_schema_py_generated as metadata_fb\nfrom tensorflow.lite.python import convert\nfrom tensorflow.lite.python import lite\nfrom tensorflow.lite.python import lite_v2_test_util\nfrom tensorflow.lite.python import schema_py_generated as schema_fb\nfrom tensorflow.lite.python import test_util as tflite_test_util\nfrom tensorflow.lite.python import util\nfrom tensorflow.lite.python.convert import mlir_quantize\nfrom tensorflow.lite.python.interpreter import Interpreter\nfrom tensorflow.lite.python.interpreter import InterpreterWithCustomOps\nfrom tensorflow.lite.python.interpreter import OpResolverType\nfrom tensorflow.lite.python.testdata import _pywrap_test_registerer as test_registerer\nfrom tensorflow.lite.python.testdata import double_op\nfrom tensorflow.lite.python.util import get_conversion_metadata\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import map_ops\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import save_options\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.saved_model.loader_impl import parse_saved_model\nfrom tensorflow.python.saved_model.save import save\nfrom tensorflow.python.training.tracking import tracking\n\n# Only run jax related tests when we can import jax.\nDISABLE_JAX_TEST = False\ntry:\n import jax\n from jax import numpy as jnp\nexcept ImportError:\n DISABLE_JAX_TEST = True\n# pylint: enable=g-import-not-at-top\n\n\nclass FromConcreteFunctionTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testTypeInvalid(self):\n root = self._getSimpleVariableModel()\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_concrete_functions([root.f], root)\n self.assertIn('call get_concrete_function', str(error.exception))\n\n @test_util.run_v2_only\n def testFloat(self):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n tflite_model = converter.convert()\n\n # Check output value from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @parameterized.named_parameters(('_INT8InputOutput', dtypes.int8),\n ('_UINT8InputOutput', dtypes.uint8),\n ('_INT16InputOutput', dtypes.int16))\n @test_util.run_v2_only\n def testInvalidFloat(self, inference_input_output_type):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n with self.assertRaises(ValueError) as error:\n converter.inference_input_type = inference_input_output_type\n converter.inference_output_type = inference_input_output_type\n converter.convert()\n self.assertEqual(\n 'The inference_input_type and inference_output_type '\n 'must be tf.float32.', str(error.exception))\n\n @test_util.run_v2_only\n def testScalarInput(self):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testModelWithoutInputs(self):\n\n def _get_random_number_gen():\n root = tracking.AutoTrackable()\n\n @tf.function(input_signature=[])\n def func():\n return tf.random.uniform(shape=[1], dtype=tf.float32)\n\n root.f = func\n to_save = root.f.get_concrete_function()\n return (root, to_save)\n\n # Model with no input\n root, concrete_func = _get_random_number_gen()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n @test_util.run_v2_only\n def testMultiFunctionModel(self):\n \"\"\"Convert a single model in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.add.get_concrete_function(input_data)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.add(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testConvertMultipleFunctions(self):\n \"\"\"Convert multiple functions in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n sub_func = root.sub.get_concrete_function(input_data)\n\n # Try converting multiple functions.\n converter = lite.TFLiteConverterV2.from_concrete_functions(\n [add_func, sub_func], root)\n tflite_model = converter.convert()\n\n # Check signatures are valid from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 2)\n self.assertEqual(list(signature_defs.keys()), ['add', 'sub'])\n self.assertEqual(len(signature_defs.values()), 2)\n self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['add']['inputs'], ['x'])\n self.assertEqual(list(signature_defs['add']['outputs']), ['output_0'])\n self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['sub']['inputs'], ['x'])\n self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0'])\n\n # Verify the Signature runner executions.\n add_signature_runner = interpreter.get_signature_runner('add')\n add_output = add_signature_runner(x=input_data)\n self.assertEqual(add_output['output_0'], 3)\n input_details = add_signature_runner.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('add_x:0', input_details['x']['name'])\n self.assertEqual(np.float32, input_details['x']['dtype'])\n self.assertTrue(([1] == input_details['x']['shape']).all())\n self.assertEqual((0.0, 0), input_details['x']['quantization'])\n\n sub_signature_runner = interpreter.get_signature_runner('sub')\n sub_output = sub_signature_runner(x=input_data)\n self.assertEqual(sub_output['output_0'], -2)\n output_details = sub_signature_runner.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('StatefulPartitionedCall:0',\n output_details['output_0']['name'])\n self.assertEqual(np.float32, output_details['output_0']['dtype'])\n self.assertTrue(([1] == output_details['output_0']['shape']).all())\n self.assertEqual((0.0, 0), output_details['output_0']['quantization'])\n\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.environment.apiVersion, 2)\n self.assertEqual(metadata.environment.modelType,\n metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)\n self.assertAllEqual([], metadata.options.modelOptimizationModes)\n\n def _getIntegerQuantizeModel(self, num_filters=16):\n np.random.seed(0)\n\n root = tracking.AutoTrackable()\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])\n def func(inp):\n conv = tf.nn.conv2d(\n inp,\n tf.ones([3, 3, 3, num_filters]), strides=[1, 1, 1, 1], padding='SAME')\n output = tf.nn.relu(conv, name='output')\n return output\n\n def calibration_gen():\n for _ in range(5):\n yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]\n\n root.f = func\n to_save = root.f.get_concrete_function()\n return (root, to_save, calibration_gen)\n\n @parameterized.named_parameters(\n ('EnableMlirQuantizer', True), # enable mlir quantizer\n ('DisableMlirQuantizer', False)) # disable mlir quantizer\n def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer):\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert float model.\n float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n quantized_converter.experimental_new_quantizer = mlir_quantizer\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n # Check the conversion metadata.\n metadata = get_conversion_metadata(quantized_tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(\n metadata.environment.tensorflowVersion.decode('utf-8'),\n versions.__version__)\n self.assertEqual(metadata.environment.apiVersion, 2)\n self.assertEqual(metadata.environment.modelType,\n metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)\n self.assertEqual(metadata.options.allowCustomOps, False)\n self.assertEqual(metadata.options.enableSelectTfOps, False)\n self.assertEqual(metadata.options.forceSelectTfOps, False)\n self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER],\n metadata.options.modelOptimizationModes)\n\n # The default input and output types should be float.\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(np.float32, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @parameterized.named_parameters(('_INT8InputOutput', dtypes.int8),\n ('_UINT8InputOutput', dtypes.uint8),\n ('_INT16InputOutput', dtypes.int16))\n @test_util.run_v2_only\n def testInvalidPostTrainingDynamicRangeQuantization(\n self, inference_input_output_type):\n root, func, _ = self._getIntegerQuantizeModel()\n\n # Convert float model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n with self.assertRaises(ValueError) as error:\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_converter.convert()\n self.assertEqual(\n 'The inference_input_type and inference_output_type '\n 'must be tf.float32.', str(error.exception))\n\n @parameterized.named_parameters(\n ('EnableMlirQuantizer', True), # enable mlir quantizer\n ('DisableMlirQuantizer', False)) # disable mlir quantizer\n def testQuantizationRemovesQDQsForFloatIO(self, mlir_quantizer):\n func, calibration_gen = self._getSqrtModel()\n converter = lite.TFLiteConverterV2.from_concrete_functions(\n [func.get_concrete_function()])\n converter.representative_dataset = calibration_gen\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.experimental_new_quantizer = mlir_quantizer\n quantized_model = converter.convert()\n\n # Because assertions on the model later, we opt out applying default TFLite\n # delegates (i.e. the XNNPACK delegate).\n interpreter = Interpreter(\n model_content=quantized_model,\n experimental_op_resolver_type=OpResolverType\n .BUILTIN_WITHOUT_DEFAULT_DELEGATES)\n interpreter.allocate_tensors()\n # The model should have only one sqrt op.\n op_details = interpreter._get_ops_details()\n self.assertLen(op_details, 1)\n self.assertEqual(op_details[0]['op_name'], 'SQRT')\n\n @parameterized.named_parameters(\n ('_Default', False, False, dtypes.float32),\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize', False, True, dtypes.float32),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly', True, False, dtypes.float32),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize', True, True, dtypes.float32),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))\n def testIntegerQuantization(self, is_int_only, is_int16_quantize,\n inference_input_output_type):\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert float model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n # Check the conversion metadata.\n metadata = get_conversion_metadata(quantized_tflite_model)\n self.assertIsNotNone(metadata)\n expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]\n if is_int16_quantize:\n expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_INT16]\n self.assertAllEqual(expected_opt_options,\n metadata.options.modelOptimizationModes)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details[0]['dtype'])\n\n # Ensure that the quantized tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(tflite_model))\n\n @parameterized.named_parameters(\n ('_INT16Quantize_INT8InputOutput', True, dtypes.int8))\n def testInvalidIntegerQuantization(self, is_int16_quantize,\n inference_input_output_type):\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n with self.assertRaises(ValueError) as error:\n quantized_converter.inference_input_type = dtypes.int8\n quantized_converter.inference_output_type = dtypes.int8\n quantized_converter.convert()\n self.assertEqual(\n 'The inference_input_type and inference_output_type '\n \"must be in ['tf.float32', 'tf.int16'].\", str(error.exception))\n\n def testCalibrateAndQuantizeBuiltinInt16(self):\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n\n # Convert float model.\n float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)\n # TODO(b/156309549): We should add INT16 to the builtin types.\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.representative_dataset = calibration_gen\n converter._experimental_calibrate_only = True\n calibrated_tflite = converter.convert()\n quantized_tflite_model = mlir_quantize(\n calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16)\n\n self.assertIsNotNone(quantized_tflite_model)\n\n # The default input and output types should be float.\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(np.float32, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @test_util.run_v2_only\n def testSignatureDefs(self):\n \"\"\"Test converting SignatureDef is correct and uses SignatureDef API.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n\n converter = lite.TFLiteConverterV2([add_func], trackable_obj=root)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = add_func(input_data)\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n results = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model, 'serving_default', {'x': input_data})\n self.assertLen(list(results.keys()), 1)\n self.assertStartsWith(list(results.keys())[0], 'output')\n self.assertAllClose(\n expected_value.numpy(),\n results[signature_defs['serving_default']['outputs'][0]])\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 1)\n self.assertEqual(list(signature_defs.keys()), ['serving_default'])\n self.assertEqual(len(signature_defs.values()), 1)\n self.assertEqual(\n list(signature_defs['serving_default'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['serving_default']['inputs'], ['x'])\n self.assertLen(list(signature_defs['serving_default']['outputs']), 1)\n self.assertStartsWith(\n list(signature_defs['serving_default']['outputs'])[0], 'output')\n\n @test_util.run_v2_only\n def testNoSignatureDefsWhenTrackingObjIsNone(self):\n \"\"\"Test converting SignatureDef is correct and uses SignatureDef API.\"\"\"\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n None)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n # Verify that there is no SignatureDef structure found.\n self.assertEqual(len(signature_defs), 0)\n\n @test_util.run_v2_only\n def testNoSignatureDefsWhenInvalidTrackingObjIsGiven(self):\n \"\"\"Test converting SignatureDef is correct and uses SignatureDef API.\"\"\"\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func], trackable_obj=tracking.AutoTrackable())\n tflite_model = converter.convert()\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n # Verify that there is no SignatureDef structure found.\n self.assertEqual(len(signature_defs), 0)\n\n @test_util.run_v2_only\n def testTrackbleObject(self):\n \"\"\"Test converting with trackable objects.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n\n converter = lite.TFLiteConverterV2.from_concrete_functions(\n [add_func], trackable_obj=root)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = add_func(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n def _getTrainingTimeQuantizedModel(self):\n\n class QLinear(tf.keras.layers.Layer):\n\n def __init__(self, units=3, **kwargs):\n super(QLinear, self).__init__(**kwargs)\n self.units = units\n\n def build(self, input_shape):\n self.w = self.add_weight(\n 'weight',\n shape=(input_shape[-1], self.units),\n initializer='random_normal',\n trainable=True)\n self.min_var = self.add_weight(\n 'min',\n initializer=tf.keras.initializers.Constant(-6.0),\n trainable=False)\n self.max_var = self.add_weight(\n 'max',\n initializer=tf.keras.initializers.Constant(6.0),\n trainable=False)\n\n def call(self, inputs):\n x = tf.quantization.fake_quant_with_min_max_vars(\n inputs, self.min_var, self.max_var)\n\n w_fq = tf.quantization.fake_quant_with_min_max_vars(\n self.w, self.min_var, self.max_var)\n x = tf.matmul(x, w_fq)\n\n x = tf.quantization.fake_quant_with_min_max_vars(\n x, self.min_var, self.max_var)\n\n return x\n\n return tf.keras.Sequential(QLinear(3, input_shape=(2,)))\n\n @parameterized.named_parameters(\n ('_DefaultFLOAT32InputOutput', dtypes.float32),\n ('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8))\n @test_util.run_v2_only\n def testTrainingTimeQuantization(self, inference_input_output_type):\n model = self._getTrainingTimeQuantizedModel()\n\n float_converter = lite.TFLiteConverterV2.from_keras_model(model)\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n\n quantized_converter = lite.TFLiteConverterV2.from_keras_model(model)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n # Check the conversion metadata.\n metadata = get_conversion_metadata(quantized_tflite_model)\n self.assertIsNotNone(metadata)\n self.assertAllEqual(\n [metadata_fb.ModelOptimizationMode.QUANTIZATION_AWARE_TRAINING],\n metadata.options.modelOptimizationModes)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details[0]['dtype'])\n\n # Ensure that the quantized tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @test_util.run_v2_only\n def testNewQuantizer(self):\n \"\"\"Test the model quantized by the new converter.\"\"\"\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n quantized_converter.representative_dataset = calibration_gen\n\n # default quantizer\n quantized_converter.experimental_new_quantizer = False\n old_tflite = quantized_converter.convert()\n\n # new quantizer\n quantized_converter.experimental_new_quantizer = True\n new_tflite = quantized_converter.convert()\n\n for _ in range(5):\n input_data = tf.constant(\n np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))\n old_value = self._evaluateTFLiteModel(old_tflite, [input_data])\n new_value = self._evaluateTFLiteModel(new_tflite, [input_data])\n self.assertAllClose(old_value, new_value, atol=1e-01)\n\n @test_util.run_v2_only\n def testEmbeddings(self):\n \"\"\"Test model with embeddings.\"\"\"\n input_data = tf.constant(\n np.array(np.random.random_sample((20)), dtype=np.int32))\n\n class EmbeddingModel(tf.keras.Model):\n\n def __init__(self):\n super(EmbeddingModel, self).__init__()\n self.shared_weights = self.add_weight(\n 'weights',\n shape=(2000, 300),\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n @tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)])\n def func(self, x):\n return tf.gather(self.shared_weights, x)\n\n # Building the model.\n root = EmbeddingModel()\n concrete_func = root.func.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.func(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05)\n\n @test_util.run_v2_only\n def testGraphDebugInfo(self):\n \"\"\"Test a concrete function has debug info captured.\"\"\"\n root = tracking.AutoTrackable()\n root.v1 = tf.Variable(3.)\n root.f = tf.function(lambda x: root.v1 * x)\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter.convert()\n self._assertValidDebugInfo(converter._debug_info)\n\n def _getIntegerQuantizationModelWithFlexOp(self):\n np.random.seed(0)\n\n root = tracking.AutoTrackable()\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[3, 3, 3, 3, 3], dtype=tf.float32)\n ])\n def func(inp):\n tanh = tf.math.tanh(inp)\n # Flex delegate will merge the consecutive conv3d and erf ops into one\n # Delegate node.\n conv3d = tf.nn.conv3d(\n tanh,\n tf.ones([3, 3, 3, 3, 3]),\n strides=[1, 1, 1, 1, 1],\n padding='SAME')\n erf = tf.math.erf(conv3d)\n output = tf.math.tanh(erf)\n return output\n\n def calibration_gen():\n for _ in range(5):\n yield [\n np.random.uniform(-1, 1, size=(3, 3, 3, 3, 3)).astype(np.float32)\n ]\n\n root.f = func\n return (root, root.f.get_concrete_function(), calibration_gen)\n\n @parameterized.named_parameters(\n ('_Default', False, False, dtypes.float32),\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize', False, True, dtypes.float32),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly', True, False, dtypes.float32),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize', True, True, dtypes.float32),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))\n @test_util.run_v2_only\n def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize,\n inference_input_output_type):\n root, func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp()\n\n quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(\n [func], root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.SELECT_TF_OPS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.SELECT_TF_OPS\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS,\n lite.OpsSet.SELECT_TF_OPS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS\n ]\n\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n # Check the conversion metadata.\n metadata = get_conversion_metadata(quantized_tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.options.enableSelectTfOps, True)\n expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_FULL_INTEGER]\n if is_int16_quantize:\n expected_opt_options = [metadata_fb.ModelOptimizationMode.PTQ_INT16]\n self.assertAllEqual(expected_opt_options,\n metadata.options.modelOptimizationModes)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details[0]['dtype'])\n\n def _getIntegerQuantizationModelWithUnsupportedOps(self):\n np.random.seed(0)\n\n root = tracking.AutoTrackable()\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[3], dtype=tf.float32),\n tf.TensorSpec(shape=[3], dtype=tf.float32)\n ])\n def func(a, b):\n # ceil kernel does not support int8 nor int16 types neither.\n left = tf.math.ceil(a)\n right = tf.nn.tanh(b)\n add = tf.math.add(left, right)\n # ceil kernel does not support int8 nor int16 types neither.\n output = tf.math.ceil(add)\n return (output, right)\n\n def calibration_gen():\n for _ in range(5):\n yield [\n np.random.uniform(-1, 1, size=(3)).astype(np.float32),\n np.random.uniform(-1, 1, size=(3)).astype(np.float32)\n ]\n\n root.f = func\n return (root, root.f.get_concrete_function(), calibration_gen)\n\n @parameterized.named_parameters(\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),\n ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),\n ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True))\n @test_util.run_v2_only\n def testIntegerQuantizationWithUnsupportedOps(self,\n is_int_only,\n is_int16_quantize,\n inference_input_output_type,\n enable_mlir_quantizer=False):\n root, func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps(\n )\n\n quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(\n [func], root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calib_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS\n ]\n\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_converter.experimental_new_quantizer = enable_mlir_quantizer\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n expected_dtype = inference_input_output_type.as_numpy_dtype\n # Allow float32 for fallback on non-quantizable op.\n expected_ceil_dtype = (\n expected_dtype if enable_mlir_quantizer else dtypes.float32)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 2)\n self.assertEqual(input_details[0]['dtype'], expected_dtype)\n self.assertEqual(input_details[1]['dtype'], expected_ceil_dtype)\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 2)\n self.assertEqual(output_details[0]['dtype'], expected_dtype)\n self.assertEqual(output_details[1]['dtype'], expected_ceil_dtype)\n\n def _getIntegerQuantizationModelWithControlFlow(self):\n def true_fn(x):\n return x\n\n def false_fn(x):\n return x\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, 2], dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.bool)\n ])\n def model(x, b):\n x = x + x\n x = tf.cond(b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n return x + x\n\n def calibration_gen():\n for _ in range(5):\n yield [\n np.random.uniform(-1, 1, size=(\n 1,\n 2,\n )).astype(np.float32),\n tf.constant(True),\n ]\n for _ in range(5):\n yield [\n np.random.uniform(-1, 1, size=(\n 1,\n 2,\n )).astype(np.float32),\n tf.constant(False),\n ]\n\n return (model, model.get_concrete_function(), calibration_gen)\n\n @parameterized.named_parameters(\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),\n # TODO(b/198231624): Support control flow ops in MLIR quantizer\n # ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),\n # ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True),\n )\n @test_util.run_v2_only\n def testIntegerQuantizationWithControlFlow(self,\n is_int_only,\n is_int16_quantize,\n inference_input_output_type,\n enable_mlir_quantizer=False):\n root, func, calib_gen = self._getIntegerQuantizationModelWithControlFlow()\n\n quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(\n [func], root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calib_gen\n if is_int_only:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet\n .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet\n .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,\n lite.OpsSet.TFLITE_BUILTINS\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS\n ]\n\n quantized_converter.inference_input_type = inference_input_output_type\n quantized_converter.inference_output_type = inference_input_output_type\n quantized_converter.experimental_new_quantizer = enable_mlir_quantizer\n\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n expected_dtype = inference_input_output_type.as_numpy_dtype\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 2)\n self.assertEqual(input_details[0]['dtype'], expected_dtype)\n self.assertEqual(input_details[1]['dtype'], dtypes.bool)\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(output_details[0]['dtype'], expected_dtype)\n\n @parameterized.named_parameters(\n ('_BlocklistedNoneWithLowering', None, None, True),\n ('_BlocklistedNoneWithoutLowering', None, None, False),\n ('_BlocklistedOpsWithLowering', {'CONV_2D'}, None, True),\n ('_BlocklistedOpsWithoutLowering', {'CONV_2D'}, None, False),\n ('_BlocklistedNodesWithLowering', None, {'PartitionedCall:0'}, True),\n ('_BlocklistedNodesWithoutLowering', None, {'Identity'}, False))\n @test_util.run_v2_only\n def testNewQuantizerBlocklistingArgs(self, denylisted_ops, denylisted_nodes,\n lower_to_saved_model):\n \"\"\"Test the model quantized by the new converter and denylisted options.\"\"\"\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n quantized_converter.representative_dataset = calibration_gen\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.experimental_new_quantizer = True\n quantized_converter._experimental_calibrate_only = True\n quantized_converter.experimental_lower_to_saved_model = lower_to_saved_model\n calibrated = quantized_converter.convert()\n quantized_tflite_model = mlir_quantize(\n calibrated,\n denylisted_ops=denylisted_ops,\n denylisted_nodes=denylisted_nodes)\n interpreter = Interpreter(model_content=quantized_tflite_model)\n details = interpreter.get_tensor_details()\n num_quantized_tensors = sum(\n [1 for detail in details\n if len(detail['quantization_parameters']['scales'])])\n if denylisted_nodes or denylisted_ops:\n self.assertEqual(num_quantized_tensors, 0)\n return\n self.assertEqual(num_quantized_tensors, 4) # quant, filter, bias, dequant\n\n @parameterized.named_parameters(\n ('_SingleLayer', False),\n ('_WholeModel', True),\n )\n @test_util.run_v2_only\n def testNewQuantizerNumericVerificationDebugMode(self, whole_model_verify):\n \"\"\"Test the model quantized by the new converter with numeric verify ops.\"\"\"\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n quantized_converter.representative_dataset = calibration_gen\n\n # Create a TFLite model with new quantizer.\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.experimental_new_quantizer = True\n production_tflite = quantized_converter.convert()\n # Create a TFLite model with new quantizer and numeric verify ops.\n quantized_converter._experimental_calibrate_only = True\n calibrated = quantized_converter.convert()\n debug_mode_tflite = mlir_quantize(\n calibrated,\n enable_numeric_verify=True,\n enable_whole_model_verify=whole_model_verify)\n\n # Check if adding debug mode should output a different flatbuffer.\n self.assertNotEqual(production_tflite, debug_mode_tflite)\n\n # Check if newly added ops are numeric verify ops.\n input_data = tf.constant(\n np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))\n\n def examine_tflite_model(tflite_content, input_data):\n interpreter = Interpreter(\n model_content=tflite_content,\n experimental_op_resolver_type=OpResolverType\n .BUILTIN_WITHOUT_DEFAULT_DELEGATES)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], input_data.numpy())\n interpreter.invoke()\n tensor_details = interpreter.get_tensor_details()\n return {\n details['name']: interpreter.get_tensor(details['index'])\n for details in interpreter.get_tensor_details()\n }, tensor_details\n\n tflite_result, _ = examine_tflite_model(production_tflite, input_data)\n debug_mode_tflite_result, debug_tensor_details = examine_tflite_model(\n debug_mode_tflite, input_data)\n\n # MLIR-based quantizer should output flatbuffer model with `tfl.quantize`.\n num_production_quantize_ops = len([\n None for output_tensor_name in tflite_result\n if 'tfl.quantize' in output_tensor_name\n ])\n self.assertEqual(num_production_quantize_ops, 1)\n # MLIR-based quantizer should output flatbuffer model with `tfl.quantize`.\n num_debug_quantize_ops = len([\n None for output_tensor_name in debug_mode_tflite_result\n if 'tfl.quantize' in output_tensor_name\n ])\n # Two numbers should be equal.\n self.assertEqual(num_production_quantize_ops, num_debug_quantize_ops)\n # DebugMode TFLite flatbuffer should have NumericVerifyOps more than zero.\n # The name has the prefix \"NumericVerify/{name}:{id}\n # where {name} is the tensor name of the original quantized op's activation,\n # and {id} is its tensor id.\n num_debug_ops = 0\n for output_tensor_name in debug_mode_tflite_result:\n if 'NumericVerify' in output_tensor_name:\n pos_end_prefix = len('NumericVerify/')\n pos_colon = output_tensor_name.rfind(':')\n self.assertEqual('NumericVerify/', output_tensor_name[:pos_end_prefix])\n tensor_id = int(output_tensor_name[pos_colon + 1:])\n original_tensor_name = output_tensor_name[pos_end_prefix:pos_colon]\n self.assertEqual(original_tensor_name,\n debug_tensor_details[tensor_id]['name'])\n num_debug_ops += 1\n self.assertEqual(num_debug_ops, 1)\n # The number of debug ops should be equal to that of quantized ops.\n self.assertEqual(num_debug_ops, num_debug_quantize_ops)\n\n @parameterized.named_parameters(\n ('_PerChannelQuant', False, False),\n ('_PerChannelMlirQuant', False, True),\n ('_PerTensorQuant', True, False),\n ('_PerTensorMlirQuant', True, True),\n ('_PerChannelDynamicRange', False, False, False),\n ('_PerTensorDynamicRange', True, False, False))\n @test_util.run_v2_only\n def testDisablePerChannelQuantization(self, disable_per_channel=False,\n enable_mlir_quantizer=False,\n representative_dataset=True):\n k_conv_name = 'Conv2D1'\n # Dynamic range quant requires total num elements of filters > 1024.\n k_num_filters = 38\n root, func, calib_gen = self._getIntegerQuantizeModel(k_num_filters)\n quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(\n [func], root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calib_gen\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS\n ]\n quantized_converter.experimental_new_quantizer = enable_mlir_quantizer\n if disable_per_channel:\n quantized_converter._experimental_disable_per_channel = (\n disable_per_channel)\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n detail = next((d for d in interpreter.get_tensor_details()\n if d['name'] == k_conv_name))\n quant_params = detail['quantization_parameters']\n expected_num_params = 1 if disable_per_channel else k_num_filters\n self.assertLen(quant_params['scales'], expected_num_params)\n self.assertLen(quant_params['zero_points'], expected_num_params)\n\n @parameterized.named_parameters(('MlirQuantize', True),\n ('TocoQuantize', False))\n @test_util.run_v2_only\n def testQuantizeBiasOverflow(self, enable_mlir_quantizer):\n \"\"\"Tests if the quantizer handles bias overflow by adjusting scales.\"\"\"\n input_data = np.array([[-1e-3, 1e-3]], dtype=np.float32)\n\n def calibration_gen():\n yield {'x': input_data}\n\n root = self._getMatMulModelWithSmallWeights()\n input_data = tf.constant([-1e-3, 1e-3], shape=(1, 2))\n concrete_func = root.matmul.get_concrete_function(input_data)\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.representative_dataset = calibration_gen\n converter.experimental_new_quantizer = enable_mlir_quantizer\n quantized_model = converter.convert()\n\n interpreter = Interpreter(model_content=quantized_model)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n output_details = interpreter.get_output_details()\n output = interpreter.get_tensor(output_details[0]['index'])\n # the inputs and weights are far smaller than the biases, so the final\n # result should be equal to the biases.\n self.assertAllClose(root.bias, output.flatten())\n\n @test_util.run_v2_only\n def testOpVersion(self):\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[5, 5], dtype=tf.float32)])\n def custom_resize(image):\n # Add \"batch\" and \"channels\" dimensions\n image = image[tf.newaxis, ..., tf.newaxis]\n # ResizeBilinear version 3.\n resize1 = tf.compat.v1.image.resize_bilinear(\n image, [2, 2], half_pixel_centers=True)\n # ResizeBilinear version 1.\n resize2 = tf.compat.v1.image.resize_bilinear(image, [2, 2])\n return resize1 + resize2\n\n concrete_func = custom_resize.get_concrete_function()\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n custom_resize)\n tflite_model = converter.convert()\n model_object = schema_fb.Model.GetRootAsModel(tflite_model, 0)\n model = schema_fb.ModelT.InitFromObj(model_object)\n\n for operator in model.operatorCodes:\n if operator.builtinCode == schema_fb.BuiltinOperator.RESIZE_BILINEAR:\n # half_pixel_centers is supported by ResizeBilinear version 3.\n self.assertEqual(operator.version, 3)\n break\n\n @test_util.run_v2_only\n def testForceSelectTFOps(self):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.options.forceSelectTfOps, True)\n\n # Check output value from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n def testExcludeConversionMetadata(self):\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n converter.exclude_conversion_metadata = True\n tflite_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNone(metadata)\n\n def testConversionMetadataForDynamicRange(self):\n func, _ = self._getSqrtModel()\n converter = lite.TFLiteConverterV2.from_concrete_functions(\n [func.get_concrete_function()])\n converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(quantized_model)\n self.assertIsNotNone(metadata)\n self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_DYNAMIC_RANGE],\n metadata.options.modelOptimizationModes)\n\n def testConversionMetadataForFloat16(self):\n root, func, calibration_gen = self._getIntegerQuantizeModel()\n converter = lite.TFLiteConverterV2.from_concrete_functions([func], root)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.representative_dataset = calibration_gen\n converter.target_spec.supported_types = [dtypes.float16]\n quantized_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(quantized_model)\n self.assertIsNotNone(metadata)\n self.assertAllEqual([metadata_fb.ModelOptimizationMode.PTQ_FLOAT16],\n metadata.options.modelOptimizationModes)\n\n\nclass FromSavedModelTest(lite_v2_test_util.ModelTest):\n\n def _createV1SavedModel(self, shape):\n \"\"\"Create a simple SavedModel.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor_1 = tf.compat.v1.placeholder(\n shape=shape, dtype=tf.float32, name='inputB')\n in_tensor_2 = tf.compat.v1.placeholder(\n shape=shape, dtype=tf.float32, name='inputA')\n variable_node = tf.Variable(1.0, name='variable_node')\n out_tensor = in_tensor_1 + in_tensor_2 * variable_node\n inputs = {'x': in_tensor_1, 'y': in_tensor_2}\n outputs = {'z': out_tensor}\n sess.run(tf.compat.v1.variables_initializer([variable_node]))\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n def _createV2QATSavedModel(self, shape):\n \"\"\"Create a simple QAT SavedModel in TF 2.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n input_name = 'input'\n output_name = 'scores'\n\n input_tensor = tf.keras.layers.Input((32, 32, 128), name=input_name)\n x = tf.quantization.fake_quant_with_min_max_args(input_tensor, -3.0, 3.0)\n x = tf.keras.layers.Conv2D(1, (3, 3))(x)\n x = tf.quantization.fake_quant_with_min_max_args(x, -3.0, 3.0)\n scores = tf.keras.layers.Reshape((-1,), name=output_name)(x)\n model = tf.keras.Model(input_tensor, scores)\n model.save(saved_model_dir)\n return saved_model_dir, input_name, output_name\n\n @test_util.run_v2_only\n def testV1SimpleModel(self):\n \"\"\"Test a SavedModel.\"\"\"\n with tf.Graph().as_default():\n saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 2)\n self.assertStartsWith(input_details[0]['name'], 'inputA')\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertStartsWith(\n input_details[1]['name'],\n 'inputB',\n )\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue([1, 16, 16, 3], input_details[1]['shape'])\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertStartsWith(output_details[0]['name'], 'add')\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue([1, 16, 16, 3], output_details[0]['shape'])\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n @parameterized.named_parameters(\n ('Default', False),\n ('UnfoldLargeConstant', True),\n )\n @test_util.run_v2_only\n def testUnfoldLargeConstant(self, unfold_large_constant):\n \"\"\"Test unfolding large splat constant in a TF Lite model.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1000, 1000], dtype=tf.float32, name='input')\n constant = tf.constant(value=1, dtype=tf.float32, shape=[1000, 1000])\n out_tensor = in_tensor + constant\n inputs = {'x': in_tensor}\n outputs = {'y': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter._experimental_unfold_large_splat_constant = unfold_large_constant\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n model = util._convert_model_from_bytearray_to_object(tflite_model)\n if unfold_large_constant:\n self.assertEqual(model.operatorCodes[0].builtinCode,\n schema_fb.BuiltinOperator.FILL)\n self.assertEqual(model.operatorCodes[1].builtinCode,\n schema_fb.BuiltinOperator.ADD)\n else:\n self.assertEqual(model.operatorCodes[0].builtinCode,\n schema_fb.BuiltinOperator.ADD)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual('input:0', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([1000, 1000], input_details[0]['shape'])\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual('add:0', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertAllEqual([1000, 1000], output_details[0]['shape'])\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n interpreter.set_tensor(input_details[0]['index'],\n np.ones(shape=[1000, 1000], dtype=np.float32))\n interpreter.invoke()\n self.assertAllEqual(\n np.full(shape=[1000, 1000], fill_value=2.0, dtype=np.float32),\n interpreter.get_tensor(output_details[0]['index']))\n\n @test_util.run_v2_only\n def testTF1HubFormattedModel(self):\n \"\"\"Test a TF1 hub formatted model.\"\"\"\n saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])\n\n # TF1 hub model is based on V1 saved model and they omit the saved model\n # schema version setting.\n saved_model_proto = parse_saved_model(saved_model_dir)\n saved_model_proto.saved_model_schema_version = 0\n\n saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb')\n with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer:\n writer.write(saved_model_proto.SerializeToString())\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n def _createV1ModelWithHashTableInitializer(self):\n # Create a v1 saved model with hash table initializers.\n tf.compat.v1.disable_eager_execution()\n saved_model_dir = os.path.join(self.get_temp_dir(),\n 'savedmodel_with_hashtable')\n\n table_initializer = tf.lookup.KeyValueTensorInitializer(\n keys=['a', 'b', 'c', 'd'],\n values=[1, 2, 3, 4],\n key_dtype=tf.string,\n value_dtype=tf.int64)\n table = tf.lookup.StaticHashTable(\n table_initializer, default_value=tf.constant(-1, dtype=tf.int64))\n\n x = tf.compat.v1.placeholder(tf.string, shape=(), name='input')\n y = table.lookup(x)\n\n tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)\n tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)\n\n signature_def_map, init_op, assets_collection = {\n 'serving_default':\n (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': tensor_info_x},\n outputs={'y': tensor_info_y},\n method_name='some_function'))\n }, tf.compat.v1.tables_initializer(), None\n\n sess = tf.compat.v1.Session()\n sess.run(tf.compat.v1.initializers.global_variables())\n\n builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(\n saved_model_dir)\n builder.add_meta_graph_and_variables(\n sess, [tf.compat.v1.saved_model.tag_constants.SERVING],\n signature_def_map,\n main_op=init_op,\n assets_collection=assets_collection,\n strip_default_attrs=True)\n builder.save()\n\n # Restore TF v2 behavior.\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.enable_eager_execution()\n return saved_model_dir\n\n @test_util.run_v2_only\n def testModelWithHashTableInitializer(self):\n \"\"\"Test a model with saved_model's session initializer for hash tables.\"\"\"\n saved_model_dir = self._createV1ModelWithHashTableInitializer()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n input_data = np.array(['a', 'b', 'c', 'z'], dtype=np.string_)\n interpreter.resize_tensor_input(\n input_details[0]['index'], [4], strict=False)\n interpreter.allocate_tensors()\n\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n # Invoke multiple times to ensure the initializer graph runs only once.\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual([1, 2, 3, -1], list(actual_value))\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual([1, 2, 3, -1], list(actual_value))\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual([1, 2, 3, -1], list(actual_value))\n\n def _createV1ModelWithMutableHashTable(self):\n # Create a v1 saved model with mutable hash table.\n tf.compat.v1.disable_eager_execution()\n saved_model_dir = os.path.join(self.get_temp_dir(),\n 'savedmodel_with_mutable_hashtable')\n\n table = tf.raw_ops.MutableHashTableV2(\n key_dtype=tf.string, value_dtype=tf.int64)\n x = tf.compat.v1.placeholder(tf.string, shape=(), name='input')\n keys = tf.constant(['a', 'b'], tf.string)\n values = tf.constant([1, 5], tf.int64)\n default_value = tf.constant(-1, tf.int64)\n insert_call = tf.raw_ops.LookupTableInsertV2(\n table_handle=table, keys=keys, values=values)\n with tf.control_dependencies([insert_call]):\n y = tf.raw_ops.LookupTableFindV2(\n table_handle=table, keys=x, default_value=default_value)\n\n tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)\n tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)\n\n signature_def_map, init_op, assets_collection = {\n 'serving_default':\n (tf.compat.v1.saved_model.signature_def_utils.build_signature_def(\n inputs={'x': tensor_info_x},\n outputs={'y': tensor_info_y},\n method_name='some_function'))\n }, tf.compat.v1.tables_initializer(), None\n\n sess = tf.compat.v1.Session()\n\n builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(\n saved_model_dir)\n builder.add_meta_graph_and_variables(\n sess, [tf.compat.v1.saved_model.tag_constants.SERVING],\n signature_def_map,\n main_op=init_op,\n assets_collection=assets_collection,\n strip_default_attrs=True)\n builder.save()\n\n # Restore TF v2 behavior.\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.enable_eager_execution()\n return saved_model_dir\n\n @test_util.run_v2_only\n def testModelWithMutableHashTable(self):\n \"\"\"Test a model with saved_model's session initializer for hash tables.\"\"\"\n saved_model_dir = self._createV1ModelWithMutableHashTable()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n input_data = np.array(['a', 'b', 'c'], dtype=np.string_)\n interpreter.resize_tensor_input(\n input_details[0]['index'], [3], strict=False)\n interpreter.allocate_tensors()\n\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual([1, 5, -1], list(actual_value))\n\n @test_util.run_v2_only\n def testConstModel(self):\n \"\"\"Test a basic model with functions to make sure functions are inlined.\"\"\"\n input_data = tf.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.f = tf.function(lambda x: 2. * x)\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testVariableModel(self):\n \"\"\"Test a basic model with Variables with saving/loading the SavedModel.\"\"\"\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.environment.modelType,\n metadata_fb.ModelType.TF_SAVED_MODEL)\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @parameterized.named_parameters(('EnableResourceVariables', True),\n ('DisableResourceVariables', False))\n @test_util.run_v2_only\n def testNativeVariablesModel(self, enable_resource_variables):\n \"\"\"Test a basic model with Variables with saving/loading the SavedModel.\"\"\"\n root = self._getSimpleModelWithVariables()\n input_data = tf.constant(1., shape=[1, 10])\n to_save = root.assign_add.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n converter.experimental_enable_resource_variables = enable_resource_variables\n\n if not enable_resource_variables:\n with self.assertRaises(convert.ConverterError) as error:\n tflite_model = converter.convert()\n self.assertIn(\n 'Variable constant folding is failed. Please consider using enabling '\n '`experimental_enable_resource_variables` flag in the TFLite '\n 'converter object.',\n str(error.exception))\n return\n\n # Enable resource variables.\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.assign_add(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n for tf_result, tflite_result in zip(expected_value, actual_value[0]):\n self.assertAllClose(tf_result, tflite_result, atol=1e-05)\n\n @test_util.run_v2_only\n def testSignatures(self):\n \"\"\"Test values for `signature_keys` argument.\"\"\"\n root = self._getSimpleVariableModel()\n input_data = tf.constant(1., shape=[1])\n to_save = root.f.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save)\n\n # Convert model with invalid `signature_keys`.\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=['INVALID'])\n self.assertIn(\"Invalid signature key 'INVALID'\", str(error.exception))\n\n # Convert model with empty `signature_keys`.\n converter = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=[])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value.numpy(), actual_value)\n\n @test_util.run_v2_only\n def testSignatureDefsWithFullIntegerQuantization(self):\n # SETUP\n # 1. Define input shapes\n tf_input_shape = (32, 32, 128)\n tflite_input_shape = (1,) + tf_input_shape\n # 2. Define model\n tf_saved_model_dir, input_name, output_name = (\n self._createV2QATSavedModel(tf_input_shape))\n\n # MODEL 1: TFLite (float) model\n # 1. Create TFLite model\n converter = tf.lite.TFLiteConverter.from_saved_model(tf_saved_model_dir)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n tflite_model = converter.convert()\n # 2. Initialize the Intepreter\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()[0]\n output_details = interpreter.get_output_details()[0]\n interpreter.resize_tensor_input(input_details['index'], tflite_input_shape)\n interpreter.allocate_tensors()\n signature_list = interpreter._get_full_signature_list()['serving_default']\n # 3. (Skip) Verify that signature def input/output tensors are in the model.\n # 4. Evaluate the model\n input_data = np.random.random(tflite_input_shape).astype(np.float32)\n result = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model, 'serving_default', {input_name: input_data})[output_name]\n\n # MODEL 2: TFLite (full integer quantized) model\n # 1. Create TFLite model\n converter = tf.lite.TFLiteConverter.from_saved_model(tf_saved_model_dir)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.inference_input_type = tf.int8\n converter.inference_output_type = tf.int8\n tflite_model_quant = converter.convert()\n # 2. Initialize the Intepreter\n interpreter = Interpreter(model_content=tflite_model_quant)\n input_details = interpreter.get_input_details()[0]\n output_details = interpreter.get_output_details()[0]\n interpreter.resize_tensor_input(input_details['index'], tflite_input_shape)\n interpreter.allocate_tensors()\n # 3. Verify that signature def input/output tensors are in the model.\n all_indices = {item['index'] for item in interpreter.get_tensor_details()}\n signature_list = interpreter._get_full_signature_list()['serving_default']\n input_tensor_indices = set(signature_list['inputs'].values())\n assert input_tensor_indices.issubset(all_indices)\n output_tensor_indices = set(signature_list['outputs'].values())\n assert output_tensor_indices.issubset(all_indices)\n\n # 4. Evaluate the model\n input_data = np.random.random(tflite_input_shape)\n input_scale, input_zero_point = input_details['quantization']\n if (input_scale, input_zero_point) != (0.0, 0):\n input_data = input_data / input_scale + input_zero_point\n input_data = input_data.astype(input_details['dtype'])\n result_quant = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model_quant, 'serving_default',\n {input_name: input_data})[output_name]\n output_scale, output_zero_point = output_details['quantization']\n if (output_scale, output_zero_point) != (0.0, 0):\n result_quant = result_quant.astype(np.float32)\n result_quant = (result_quant - output_zero_point) * output_scale\n\n # COMPARE: Validate that results from both models are approx. the same.\n root_mean_squared = np.sqrt(np.mean((result-result_quant)**2))\n assert root_mean_squared < 1.0\n\n @test_util.run_v2_only\n def testSignatureDefs(self):\n \"\"\"Test converting SignatureDef is correct and uses SignatureDef API.\"\"\"\n root = self._getMultiFunctionModel()\n input_data_0 = tf.constant(1., shape=[1])\n input_data_1 = tf.constant(3., shape=[1])\n mul_add_func = root.mul_add.get_concrete_function(input_data_1,\n input_data_0)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'mul_add': mul_add_func})\n\n converter = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=['mul_add'])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.mul_add(input_data_1, input_data_0)\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n results = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model, 'mul_add', {\n 'y': input_data_0,\n 'x': input_data_1\n })\n self.assertEqual(list(results.keys()), ['output_0'])\n self.assertEqual(expected_value.numpy(), results['output_0'])\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 1)\n self.assertEqual(list(signature_defs.keys()), ['mul_add'])\n self.assertEqual(len(signature_defs.values()), 1)\n self.assertEqual(\n list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])\n self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])\n\n @test_util.run_v2_only\n def testSignatureDefsWithDefaultValue(self):\n \"\"\"Test converting SignatureDef is correct and uses SignatureDef API.\n\n This test uses None as signature_key to test default behavior.\n \"\"\"\n root = self._getMultiFunctionModel()\n input_data_0 = tf.constant(1., shape=[1])\n input_data_1 = tf.constant(3., shape=[1])\n mul_add_func = root.mul_add.get_concrete_function(input_data_1,\n input_data_0)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'mul_add': mul_add_func})\n\n converter = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=['mul_add'])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.mul_add(input_data_1, input_data_0)\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n results = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model, None, {\n 'y': input_data_0,\n 'x': input_data_1\n })\n self.assertEqual(list(results.keys()), ['output_0'])\n self.assertEqual(expected_value.numpy(), results['output_0'])\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 1)\n self.assertEqual(list(signature_defs.keys()), ['mul_add'])\n self.assertEqual(len(signature_defs.values()), 1)\n self.assertEqual(\n list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])\n self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])\n\n @test_util.run_v2_only\n def testSignatureDefsQuantizedModel(self):\n \"\"\"Test converting SignatureDef on quantized model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data_0 = tf.constant(1., shape=[1])\n input_data_1 = tf.constant(3., shape=[1])\n mul_add_func = root.mul_add.get_concrete_function(input_data_1,\n input_data_0)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'mul_add': mul_add_func})\n\n converter = lite.TFLiteConverterV2.from_saved_model(\n save_dir, signature_keys=['mul_add'])\n\n def representative_dataset_gen():\n for _ in range(2):\n yield {\n 'x':\n np.random.uniform(low=0, high=1,\n size=(1, 1)).astype(np.float32),\n 'y':\n np.random.uniform(low=0, high=1, size=(1, 1)).astype(np.float32)\n }\n\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset_gen\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n tflite_model = converter.convert()\n\n # Check signatures are valid from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 1)\n self.assertEqual(list(signature_defs.keys()), ['mul_add'])\n self.assertEqual(len(signature_defs.values()), 1)\n self.assertEqual(\n list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['mul_add']['inputs'], ['x', 'y'])\n self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])\n\n @test_util.run_v2_only\n def testMultipleFunctionModel(self):\n \"\"\"Convert multiple functions in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n sub_func = root.sub.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'add': add_func, 'sub': sub_func})\n\n # Try converting multiple functions.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n interpreter = tf.lite.Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 2)\n self.assertEqual(list(signature_defs.keys()), ['add', 'sub'])\n self.assertEqual(len(signature_defs.values()), 2)\n self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['add']['inputs'], ['x'])\n self.assertEqual(list(signature_defs['add']['outputs']), ['output_0'])\n self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['sub']['inputs'], ['x'])\n self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0'])\n\n # Verify the Signature runner executions.\n add_signature_runner = interpreter.get_signature_runner('add')\n add_output = add_signature_runner(x=input_data)\n self.assertEqual(add_output['output_0'], 3)\n\n sub_signature_runner = interpreter.get_signature_runner('sub')\n sub_output = sub_signature_runner(x=input_data)\n self.assertEqual(sub_output['output_0'], -2)\n\n @parameterized.named_parameters(\n ('_Default', False, False, dtypes.float32, False),\n ('_DefaultMlirQuant', False, False, dtypes.float32, True),\n ('_INT8InputOutput', False, False, dtypes.int8),\n ('_UINT8InputOutput', False, False, dtypes.uint8),\n ('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),\n ('_IntOnly_INT8InputOutput', True, False, dtypes.int8),\n ('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),\n ('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16),\n ('_IntOnly_INT8InputOutputMlirQuant', True, False, dtypes.int8, True),\n ('_IntOnly_UINT8InputOutputMlirQuant', True, False, dtypes.uint8, True))\n @test_util.run_v2_only\n def testMultipleFunctionQuantizedModel(self,\n is_int_only,\n is_int16_quantize,\n inference_input_output_type,\n enable_mlir_quantizer=False):\n \"\"\"Convert multiple functions in a multi-functional model.\"\"\"\n root = self._getMultiFunctionModel()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n sub_func = root.sub.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'add': add_func, 'sub': sub_func})\n\n # Try converting multiple functions.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n\n def representative_dataset_gen():\n for _ in range(2):\n yield ('add', {\n 'x': np.random.uniform(low=0, high=1, size=(1,)).astype(np.float32),\n })\n for _ in range(2):\n yield ('sub', {\n 'x': np.random.uniform(low=0, high=1, size=(1,)).astype(np.float32),\n })\n\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = representative_dataset_gen\n if is_int_only:\n if is_int16_quantize:\n converter.target_spec.supported_ops = [\n lite.OpsSet\n .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\n ]\n else:\n converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS_INT8]\n else:\n if is_int16_quantize:\n converter.target_spec.supported_ops = [\n lite.OpsSet\n .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\n ]\n else:\n converter.target_spec.supported_ops = [lite.OpsSet.TFLITE_BUILTINS]\n converter.inference_input_type = inference_input_output_type\n converter.inference_output_type = inference_input_output_type\n converter.experimental_new_quantizer = enable_mlir_quantizer\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n interpreter = tf.lite.Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 2)\n self.assertEqual(list(signature_defs.keys()), ['add', 'sub'])\n self.assertEqual(len(signature_defs.values()), 2)\n self.assertEqual(list(signature_defs['add'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['add']['inputs'], ['x'])\n self.assertEqual(list(signature_defs['add']['outputs']), ['output_0'])\n self.assertEqual(list(signature_defs['sub'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['sub']['inputs'], ['x'])\n self.assertEqual(list(signature_defs['sub']['outputs']), ['output_0'])\n\n # Verify the Signature runner executions.\n input_data = tf.constant(\n np.random.uniform(-1, 1, size=(1,)).astype(\n inference_input_output_type.as_numpy_dtype))\n add_signature_runner = interpreter.get_signature_runner('add')\n add_output = add_signature_runner(x=input_data)\n self.assertIsNotNone(add_output['output_0'])\n input_details = add_signature_runner.get_input_details()\n self.assertLen(input_details, 1)\n self.assertStartsWith(input_details['x']['name'], 'add_x:0')\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n input_details['x']['dtype'])\n self.assertTrue(([1] == input_details['x']['shape']).all())\n if inference_input_output_type == dtypes.float32:\n self.assertEqual((0.0, 0), input_details['x']['quantization'])\n\n sub_signature_runner = interpreter.get_signature_runner('sub')\n sub_output = sub_signature_runner(x=input_data)\n self.assertIsNotNone(sub_output['output_0'])\n output_details = sub_signature_runner.get_output_details()\n self.assertLen(output_details, 1)\n self.assertStartsWith(output_details['output_0']['name'],\n 'StatefulPartitionedCall:0')\n self.assertEqual(inference_input_output_type.as_numpy_dtype,\n output_details['output_0']['dtype'])\n self.assertTrue(([1] == output_details['output_0']['shape']).all())\n if inference_input_output_type == dtypes.float32:\n self.assertEqual((0.0, 0), output_details['output_0']['quantization'])\n\n @test_util.run_v2_only\n def testMultipleFunctionModelWithSharedWeight(self):\n \"\"\"Convert multiple functions with the shared weight.\"\"\"\n root = self._getMultiFunctionModelWithSharedWeight()\n input_data = tf.constant(1., shape=[1])\n add_func = root.add.get_concrete_function(input_data)\n sub_func = root.sub.get_concrete_function(input_data)\n mul_func = root.mul.get_concrete_function(input_data)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, {'add': add_func, 'sub': sub_func, 'mul': mul_func})\n\n # Try converting multiple functions.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Make sure that the weight tensors are shared.\n self.assertLess(len(tflite_model), 1100000)\n\n # TODO(b/184696047): Write down the test codes for multiple signature\n # runners once the Python API is ready to use.\n interpreter = tf.lite.Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n self.assertLen(signature_defs, 3)\n add_signature_runner = interpreter.get_signature_runner('add')\n sub_signature_runner = interpreter.get_signature_runner('sub')\n mul_signature_runner = interpreter.get_signature_runner('mul')\n self.assertIsNotNone(add_signature_runner)\n self.assertIsNotNone(sub_signature_runner)\n self.assertIsNotNone(mul_signature_runner)\n\n @test_util.run_v2_only\n def testNoConcreteFunctionModel(self):\n root = self._getMultiFunctionModel()\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir)\n\n with self.assertRaises(ValueError) as error:\n _ = lite.TFLiteConverterV2.from_saved_model(save_dir)\n self.assertIn('Only support at least one signature key.',\n str(error.exception))\n\n @test_util.run_v2_only\n def testKerasSequentialModel(self):\n \"\"\"Test a simple sequential tf.Keras model.\"\"\"\n input_data = tf.constant(1., shape=[1, 1])\n\n x = np.array([[1.], [2.]])\n y = np.array([[2.], [4.]])\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(1),\n ])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(model, save_dir)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testGraphDebugInfo(self):\n \"\"\"Test a SavedModel has debug info captured.\"\"\"\n input_data = tf.constant(1., shape=[1])\n root = tracking.AutoTrackable()\n root.f = tf.function(lambda x: 2. * x)\n to_save = root.f.get_concrete_function(input_data)\n options = save_options.SaveOptions(save_debug_info=True)\n save_dir = os.path.join(self.get_temp_dir(), 'saved_model')\n save(root, save_dir, to_save, options)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_saved_model(save_dir)\n converter.convert()\n self._assertValidDebugInfo(converter._debug_info)\n\n @test_util.run_v2_only\n def testNonStatefulConvLSTM2D(self):\n \"\"\"Test saved model with non stateful ConvLSTM2D keras layer.\"\"\"\n # Create keras model\n model = tf.keras.Sequential([\n tf.keras.layers.ConvLSTM2D(\n 32, (3, 3),\n padding='same',\n return_sequences=True,\n stateful=False,\n batch_input_shape=(1, 1, 10, 10, 1))\n ])\n model.compile()\n\n # Export the keras model to saved model.\n saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d')\n model.save(saved_model_dir, save_format='tf', include_optimizer=False)\n\n converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n @test_util.run_v2_only\n def testKerasConvLSTM2DWithMoreThanOneDilationRate(self):\n input_tensor = tf.keras.layers.Input(\n batch_size=8,\n shape=[9, 10, 11, 12],\n name='input_tensor',\n dtype=tf.float32)\n\n output = tf.keras.layers.ConvLSTM2D(\n filters=3,\n kernel_size=3,\n strides=1,\n padding='VALID',\n dilation_rate=2,\n use_bias=False,\n bias_initializer='ones',\n data_format='channels_last')(\n input_tensor)\n\n model = tf.keras.Model(inputs=[input_tensor], outputs=output)\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n # Export the keras model to saved model.\n saved_model_dir = os.path.join(self.get_temp_dir(),\n 'conv_lstm_2d_with_dilation_rate')\n model.save(saved_model_dir, save_format='tf', include_optimizer=False)\n\n converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n def _createUnknownInputShapeModel(self):\n \"\"\"Create a simple SavedModel with unknown input.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'unknown_input_shape')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n unknown_shape = tf.TensorShape(None)\n in_tensor = tf.compat.v1.placeholder(\n shape=unknown_shape, dtype=tf.float32, name='input')\n out_tensor = in_tensor + in_tensor\n inputs = {'input': in_tensor}\n outputs = {'output': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n @test_util.run_v2_only\n def testUnknownInputShapeModel(self):\n \"\"\"Test a SavedModel with an unknown input shape.\"\"\"\n saved_model_dir = self._createUnknownInputShapeModel()\n\n converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n input_data = np.array([1., 2., 3.], dtype=np.float32)\n interpreter.resize_tensor_input(\n input_details[0]['index'], [3], strict=False)\n interpreter.allocate_tensors()\n\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual([2., 4., 6.], list(actual_value))\n\n @parameterized.named_parameters(\n ('_PerChannelQuant', False, False),\n ('_PerChannelMlirQuant', False, True),\n ('_PerTensorQuant', True, False),\n ('_PerTensorMlirQuant', True, True),\n ('_PerChannelDynamicRange', False, False, True),\n ('_PerTensorDynamicRange', True, False, True))\n @test_util.run_v2_only\n def testDisablePerChannelQuantization(self,\n disable_per_channel=False,\n enable_mlir_quantizer=False,\n representative_dataset=True):\n # Dynamic range quant requires total num elements of filters > 1024.\n k_num_filters = 38\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(k_num_filters, (3, 3), activation='relu')\n ])\n model.build(input_shape=(1, 5, 5, 3))\n saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_saved_model')\n save(model, saved_model_dir)\n k_conv_name = 'sequential/conv2d/Conv2D1'\n quantized_converter = tf.lite.TFLiteConverter.from_saved_model(\n saved_model_dir)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n if representative_dataset:\n def calib_gen():\n for _ in range(5):\n yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]\n quantized_converter.representative_dataset = calib_gen\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS\n ]\n quantized_converter.experimental_new_quantizer = enable_mlir_quantizer\n if disable_per_channel:\n quantized_converter._experimental_disable_per_channel = (\n disable_per_channel)\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n detail = next((d for d in interpreter.get_tensor_details()\n if d['name'] == k_conv_name))\n quant_params = detail['quantization_parameters']\n expected_num_params = k_num_filters\n if disable_per_channel:\n expected_num_params = 1\n self.assertLen(quant_params['scales'], expected_num_params)\n self.assertLen(quant_params['zero_points'], expected_num_params)\n\n @parameterized.named_parameters(\n ('_INT8Quant_INT32Bias', False, False, dtypes.int32, True),\n ('_INT16Quant_INT64Bias', True, False, dtypes.int64, True),\n ('_INT8Quant_INT32Bias_Set', False, True, dtypes.int32, True),\n ('_INT8Quant_INT64Bias_Set', False, True, dtypes.int64, False),\n ('_INT16Quant_INT32Bias_Set', True, True, dtypes.int32, True),\n ('_INT16Quant_INT64Bias_Set', True, True, dtypes.int64, True),\n ('_INT16Quant_FLOAT32Bias_Set', True, True, dtypes.float32, False),\n )\n @test_util.run_v2_only\n def testBiasQuantization(self, is_int16_quantize, explicitly_set_bias,\n bias_type, is_valid_bias_type):\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dense(\n 1024, input_shape=[1024], activation=None, bias_initializer='ones')\n ])\n saved_model_dir = os.path.join(self.get_temp_dir(), 'dense_saved_model')\n save(model, saved_model_dir)\n k_dense_bias_name = 'dense/bias'\n quantized_converter = tf.lite.TFLiteConverter.from_saved_model(\n saved_model_dir)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n\n if explicitly_set_bias:\n quantized_converter._experimental_full_integer_quantization_bias_type = bias_type\n\n if is_int16_quantize:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet\n .EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\n ]\n else:\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n\n def calibration_gen():\n for _ in range(5):\n yield [np.random.randn(1, 1024).astype(np.float32)]\n\n quantized_converter.representative_dataset = calibration_gen\n\n if not is_valid_bias_type:\n with self.assertRaisesRegex(ValueError, 'Expected bias type to be'):\n quantized_converter.convert()\n return\n\n quantized_tflite_model = quantized_converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n dense_bias = next((d for d in interpreter.get_tensor_details()\n if d['name'] == k_dense_bias_name))\n self.assertEqual(bias_type, dense_bias['dtype'])\n\n @parameterized.named_parameters(\n ('_Int8PerChannelMlirDynamicRangeQuant', True, False, False),\n ('_Int8PerChannelTocoDynamicRangeQuant', False, False, False),\n ('_Int8PerTensorMlirDynamicRangeQuant', True, True, False),\n ('_Int8PerTensorTocoDynamicRangeQuant', False, True, False),\n ('_Float16DynamicRangeQuant', True, False, True))\n @test_util.run_v2_only\n def testMlirDynamicRangeQuantization(self, enable_new_dynamic_range_quantizer,\n disable_per_channel, test_float16):\n num_filters = 1024\n conv_name = 'sequential/conv2d/Conv2D1'\n model = tf.keras.models.Sequential(\n [tf.keras.layers.Conv2D(num_filters, (3, 3), activation='relu')])\n model.build(input_shape=(1, 32, 32, 3))\n saved_model_dir = self.create_tempdir()\n save(model, saved_model_dir.full_path)\n\n converter = tf.lite.TFLiteConverter.from_saved_model(\n saved_model_dir.full_path)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter._experimental_new_dynamic_range_quantizer = (\n enable_new_dynamic_range_quantizer)\n converter._experimental_disable_per_channel = disable_per_channel\n if test_float16:\n converter.target_spec.supported_types = [tf.float16]\n quantized_tflite_model = converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n quantized_weight = next(\n d for d in interpreter.get_tensor_details() if d['name'] == conv_name)\n quant_params = quantized_weight['quantization_parameters']\n\n if test_float16:\n expected_num_params = 0\n else:\n expected_num_params = 1 if disable_per_channel else num_filters\n self.assertLen(quant_params['scales'], expected_num_params)\n self.assertLen(quant_params['zero_points'], expected_num_params)\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n if test_float16:\n self.assertEqual(np.float16, quantized_weight['dtype'])\n else:\n self.assertEqual(np.int8, quantized_weight['dtype'])\n\n\nclass FromKerasModelTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testSequentialModel(self):\n \"\"\"Test a simple sequential tf.Keras model.\"\"\"\n input_data = tf.constant(1., shape=[1, 1])\n\n # Create a simple Keras model.\n x = np.array([[1.], [2.]])\n y = np.array([[2.], [4.]])\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(units=1, input_shape=[1])\n ])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.environment.modelType,\n metadata_fb.ModelType.KERAS_MODEL)\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertEqual(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testSequentialMultiInputOutputModel(self):\n \"\"\"Test a tf.Keras model with multiple inputs and outputs.\"\"\"\n left_input_data = tf.constant(1., shape=[1, 3])\n right_input_data = tf.constant(1., shape=[1, 3])\n\n # Create a simple Keras model.\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n output_c_np = np.random.random((10, 3))\n output_d_np = np.random.random((10, 2))\n\n input_a = tf.keras.layers.Input(shape=(3,), name='input_a')\n input_b = tf.keras.layers.Input(shape=(3,), name='input_b')\n\n dense = tf.keras.layers.Dense(8, name='dense_1')\n interm_a = dense(input_a)\n interm_b = dense(input_b)\n merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge')\n\n output_c = tf.keras.layers.Dense(\n 3, activation='softmax', name='dense_2')(\n merged)\n output_d = tf.keras.layers.Dense(\n 2, activation='softmax', name='dense_3')(\n merged)\n\n model = tf.keras.models.Model(\n inputs=[input_a, input_b], outputs=[output_c, output_d])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n input_data = [left_input_data, right_input_data]\n expected_value = model.predict(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, input_data)\n for tf_result, tflite_result in zip(expected_value, actual_value):\n self.assertAllClose(tf_result, tflite_result, atol=1e-05)\n\n @test_util.run_v2_only\n def testGraphDebugInfo(self):\n \"\"\"Test a tf.Keras model has debug info captured.\"\"\"\n # Create a simple Keras model.\n x = [-1, 0, 1, 2, 3, 4]\n y = [-3, -1, 1, 3, 5, 7]\n model = tf.keras.models.Sequential(\n [tf.keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(x, y, epochs=1)\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n converter.convert()\n self._assertValidDebugInfo(converter._debug_info)\n\n @test_util.run_v2_only\n def testKerasFallbackPath(self):\n \"\"\"Test keras model which failed when exporting to the saved model.\"\"\"\n input_data = tf.constant(\n np.array(np.random.random_sample((20)), dtype=np.float32))\n\n class Model(tf.keras.Model):\n\n def __init__(self):\n super(Model, self).__init__()\n # A None name will cause a failure in exporting to a saved model.\n self.shared_weights = self.add_weight(\n name=None,\n shape=(20, 1),\n dtype=tf.float32,\n initializer=tf.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n def call(self, x):\n return tf.add(self.shared_weights, x)\n\n # Building the model.\n model = Model()\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(input_data, input_data, epochs=1)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n @test_util.run_v2_only\n def testSignatureDefs(self):\n \"\"\"Test converting SignatureDef is correct and uses SignatureDef API.\"\"\"\n keras_model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(\n 32,\n kernel_size=3,\n padding='same',\n activation='relu',\n input_shape=(32, 32, 3),\n name='tensor'),\n tf.keras.layers.Dense(10, name='output_tensor')\n ])\n\n converter = lite.TFLiteConverterV2.from_keras_model(keras_model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n input_data = tf.constant(\n np.random.uniform(-1, 1, size=(1, 32, 32, 3)).astype(np.float32))\n expected_value = keras_model(input_data)\n interpreter = Interpreter(model_content=tflite_model)\n signature_defs = interpreter.get_signature_list()\n results = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model, 'serving_default', {'tensor_input': input_data})\n self.assertEqual(list(results.keys()), ['output_tensor'])\n self.assertAllClose(expected_value.numpy(), results['output_tensor'])\n\n # Verify the SignatureDef structure returned is as expected.\n self.assertEqual(len(signature_defs), 1)\n self.assertEqual(list(signature_defs.keys()), ['serving_default'])\n self.assertEqual(len(signature_defs.values()), 1)\n self.assertEqual(\n list(signature_defs['serving_default'].keys()), ['inputs', 'outputs'])\n self.assertCountEqual(signature_defs['serving_default']['inputs'],\n ['tensor_input'])\n self.assertEqual(\n list(signature_defs['serving_default']['outputs']), ['output_tensor'])\n\n @parameterized.named_parameters(\n ('_PerChannelMlirDynamicRangeQuant', True, False, False),\n ('_PerChannelTocoDynamicRangeQuant', False, False, False),\n ('_PerTensorMlirDynamicRangeQuant', True, True, False),\n ('_PerTensorTocoDynamicRangeQuant', False, True, False),\n ('_Float16DynamicRangeQuant', True, False, True))\n @test_util.run_v2_only\n def testMlirDynamicRangeQuantization(self,\n enable_new_dynamic_range_quantizer,\n disable_per_channel, test_float16):\n num_filters = 1024\n conv_name = 'sequential/conv2d/Conv2D1'\n model = tf.keras.models.Sequential(\n [tf.keras.Input(shape=(32, 32, 3)),\n tf.keras.layers.Conv2D(num_filters, (3, 3), activation='relu')])\n model.build()\n\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter._experimental_new_dynamic_range_quantizer = (\n enable_new_dynamic_range_quantizer)\n converter._experimental_disable_per_channel = disable_per_channel\n if test_float16:\n converter.target_spec.supported_types = [tf.float16]\n quantized_tflite_model = converter.convert()\n self.assertIsNotNone(quantized_tflite_model)\n\n interpreter = Interpreter(model_content=quantized_tflite_model)\n interpreter.allocate_tensors()\n quantized_weight = next(\n d for d in interpreter.get_tensor_details() if d['name'] == conv_name)\n quant_params = quantized_weight['quantization_parameters']\n\n if test_float16:\n expected_num_params = 0\n else:\n expected_num_params = 1 if disable_per_channel else num_filters\n self.assertLen(quant_params['scales'], expected_num_params)\n self.assertLen(quant_params['zero_points'], expected_num_params)\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n if test_float16:\n self.assertEqual(np.float16, quantized_weight['dtype'])\n else:\n self.assertEqual(np.int8, quantized_weight['dtype'])\n\n @parameterized.named_parameters([\n ('{}BitWeightOnly={}LowBit={}'.format(num_bits, weight_only, low_bit),\n num_bits, weight_only, low_bit) for num_bits, weight_only, low_bit\n in itertools.product((2, 4, 6), (True, False), (True, False))])\n @test_util.run_v2_only\n def testQATLowBitKerasModel(self, num_bits, weight_only, low_bit):\n bit_max = (1 << (num_bits - 1)) - 1\n bit_min = -bit_max\n tf_input_shape = (5, 5, 3)\n tflite_input_shape = (1,) + tf_input_shape\n model, input_name, output_name = (self._createV2QATLowBitKerasModel(\n tf_input_shape, weight_only, num_bits, bit_min, bit_max))\n input_data = np.linspace(\n 0, 6, np.prod(tflite_input_shape)).reshape(tflite_input_shape)\n tf_result = model(input_data)\n\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n if low_bit:\n converter._experimental_low_bit_qat = True\n tflite_model = converter.convert()\n result = self._evaluateTFLiteModelUsingSignatureDef(\n tflite_model, 'serving_default',\n {input_name: input_data.astype(np.float32)})[output_name]\n self.assertAllClose(\n [np.linalg.norm(result - tf_result.numpy().astype(np.float32))], [0.0])\n interpreter = tf.lite.Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n num_8bit_activations = 0\n num_8bit_weights = 0\n kernel_name = ('model/conv_wrapper/Conv2D;model/conv_wrapper/'\n 'FakeQuantWithMinMaxVarsPerChannel')\n\n for detail in interpreter.get_tensor_details():\n if (detail['dtype'] == np.int8 and detail['name'] and\n detail['name'] == kernel_name):\n num_8bit_weights += 1\n weights = interpreter.get_tensor(detail['index'])\n if low_bit:\n self.assertFalse((bit_min > weights).any() or\n (weights > bit_max).any())\n else:\n self.assertTrue((bit_min > weights).any() or\n (weights > bit_max).any())\n self.assertIn('scales', detail['quantization_parameters'])\n if low_bit and detail['quantization_parameters']['scales']:\n self.assertAllClose(\n detail['quantization_parameters']['scales'], [1.0])\n elif detail['dtype'] == np.int8 and detail['name']:\n self.assertFalse(weight_only)\n self.assertIn('scales', detail['quantization_parameters'])\n if detail['quantization_parameters']['scales']:\n self.assertAllClose(\n detail['quantization_parameters']['scales'], [6/255])\n num_8bit_activations += 1\n\n self.assertEqual(num_8bit_weights, 0 if weight_only and not low_bit else 1)\n # 3 activations with full integer: conv_input, conv_output, reshape_output\n self.assertEqual(num_8bit_activations, 0 if weight_only else 3)\n\n\nclass FromJaxModelTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testInvalidInputsModel(self):\n if DISABLE_JAX_TEST:\n return\n\n def simple_model(input1, input2):\n return jnp.sin(input1) + jnp.cos(input2)\n\n input_tensor = jnp.zeros([10, 10])\n # Invalid case: not specify serving_func\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n None, [{\n 'input1': input_tensor\n }])\n with self.assertRaisesRegex(ValueError, 'No serving func is specified.'):\n converter.convert()\n\n # Invalid case: not specify input\n converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model],\n None)\n with self.assertRaisesRegex(ValueError, 'Input tensors are not specified.'):\n converter.convert()\n\n converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model], [])\n with self.assertRaisesRegex(ValueError, 'Input tensors are not specified.'):\n converter.convert()\n\n # Invalid case: not wrap input_tensor in a list.\n converter = lite.TFLiteConverterV2.experimental_from_jax([simple_model],\n input_tensor)\n with self.assertRaisesRegex(\n ValueError,\n 'The truth value of an array with more than one element is ambiguous.'):\n converter.convert()\n\n # Invalid case: only partial inputs are provided.\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [simple_model], [[('input1', input_tensor)]])\n with self.assertRaisesRegex(\n ValueError, 'Failed to convert the given Jax function to hlo.'):\n converter.convert()\n\n # Invalid case: serving functions length does not match input mapping.\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [simple_model, simple_model], [[\n ('input1', input_tensor),\n ('input2', input_tensor),\n ]])\n with self.assertRaisesRegex(\n ValueError,\n 'Input tensor mapping len 1 does not match serving func len 2.'):\n converter.convert()\n\n # Invalid case: multiple serving function is provided.\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [simple_model, simple_model], [[\n ('input1', input_tensor),\n ('input2', input_tensor),\n ], [\n ('input1', input_tensor),\n ('input2', input_tensor),\n ]])\n with self.assertRaisesRegex(\n ValueError, 'Currently only support single serving function.'):\n converter.convert()\n\n @test_util.run_v2_only\n def testSingleInputModel(self):\n if DISABLE_JAX_TEST:\n return\n\n def single_input(input_tensor):\n return jnp.sin(input_tensor)\n\n # Convert model.\n input_tensor = jnp.zeros([10, 10])\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [single_input], [[('input_tensor', input_tensor)]])\n tflite_model = converter.convert()\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.environment.modelType, metadata_fb.ModelType.JAX)\n\n # Check values from converted_model\n input_data = np.random.random_sample((10, 10))\n tf_input_data = tf.constant(input_data, dtype=np.float32)\n actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0]\n expected_value = single_input(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @test_util.run_v2_only\n def testMultipleInputsModel(self):\n if DISABLE_JAX_TEST:\n return\n\n def multiple_inputs(input1, input2):\n return input1 + input2\n\n # Convert model.\n input1 = jnp.zeros([10, 10])\n input2 = jnp.zeros([10, 1])\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [multiple_inputs], [[('input1', input1), ('input2', input2)]])\n tflite_model = converter.convert()\n\n # Check values from converted_model\n input1_data = np.random.random_sample((10, 10))\n tf_input1_data = tf.constant(input1_data, dtype=np.float32)\n input2_data = np.random.random_sample((10, 1))\n tf_input2_data = tf.constant(input2_data, dtype=np.float32)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [tf_input1_data, tf_input2_data])[0]\n expected_value = multiple_inputs(input1_data, input2_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @test_util.run_v2_only\n def testInputSignaturesModel(self):\n if DISABLE_JAX_TEST:\n return\n\n def multiple_inputs(input1, input2):\n return input1 + input2\n\n # Convert model.\n input1 = jnp.zeros([10, 10])\n input2 = jnp.zeros([10, 1])\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [multiple_inputs], [[('input1', input1), ('input2', input2)]])\n tflite_model = converter.convert()\n\n # Check values from converted_model\n input1_data = np.random.random_sample((10, 10))\n tf_input1_data = tf.constant(input1_data, dtype=np.float32)\n input2_data = np.random.random_sample((10, 1))\n tf_input2_data = tf.constant(input2_data, dtype=np.float32)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [tf_input1_data, tf_input2_data])[0]\n expected_value = multiple_inputs(input1_data, input2_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @test_util.run_v2_only\n def testModelWithParams(self):\n if DISABLE_JAX_TEST:\n return\n\n def model(inputs, weights):\n return jnp.matmul(weights, inputs)\n\n weights = np.random.random_sample((10, 10))\n serving_func = functools.partial(model, weights=weights)\n\n # Convert model\n input_tensor = jnp.zeros([10, 10])\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [serving_func], [[('inputs', input_tensor)]])\n tflite_model = converter.convert()\n\n # Check values from converted_model\n input_data = np.random.random_sample((10, 10))\n tf_input_data = tf.constant(input_data, dtype=np.float32)\n actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0]\n expected_value = serving_func(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @test_util.run_v2_only\n def testWhileLoop(self):\n if DISABLE_JAX_TEST:\n return\n\n def condition(x):\n return jnp.sum(x, keepdims=False) < 100\n\n def body(x):\n return jnp.add(x, 2.0)\n\n def model(x):\n result = jax.lax.while_loop(condition, body, x)\n return result[0]\n\n # Convert model.\n input_tensor = jnp.zeros([3, 3])\n converter = lite.TFLiteConverterV2.experimental_from_jax(\n [model], [[('x', input_tensor)]])\n tflite_model = converter.convert()\n\n # Check values from converted_model\n input_data = np.random.random_sample((3, 3))\n tf_input_data = tf.constant(input_data, dtype=np.float32)\n actual_value = self._evaluateTFLiteModel(tflite_model, [tf_input_data])[0]\n expected_value = model(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n\nclass ControlFlowTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testCond(self):\n input_data = {\n 'x': tf.constant([1., 2.], shape=[1, 2]),\n 'b': tf.constant(True)\n }\n\n weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)\n\n def true_fn(x):\n return tf.matmul(x, weights)\n\n def false_fn(x):\n return tf.add(x, weights)\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, 2], dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.bool)\n ])\n def model(x, b):\n return tf.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(**input_data)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [input_data['x'], input_data['b']])[0]\n self.assertAllClose(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testConverterErrorOnControlFlowV1Ops(self):\n filename = resource_loader.get_path_to_datafile(\n 'testdata/control_flow_v1_saved_model')\n converter = lite.TFLiteConverterV2.from_saved_model(filename)\n with self.assertRaises(convert.ConverterError) as error:\n converter.convert()\n self.assertIn(\n 'Failed to functionalize Control Flow V1 ops. Consider using Control '\n 'Flow V2 ops instead. See https://www.tensorflow.org/api_docs/python/'\n 'tf/compat/v1/enable_control_flow_v2.', str(error.exception))\n\n @test_util.run_v2_only\n def testStaticRnn(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((3, 10)), dtype=np.float32))\n\n cell = tf.keras.layers.LSTMCell(10)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)])\n def model(x):\n seq = tf.split(x, 3, 0)\n return rnn.static_rnn(cell, seq, dtype=tf.float32, sequence_length=[1])\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)[0]\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])\n for expected, actual in zip(expected_value, actual_value):\n self.assertAllClose(expected, actual)\n\n @test_util.run_v2_only\n def testWhileLoop(self):\n input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2])\n\n weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)\n\n def condition(x):\n return tf.reduce_sum(x) < 100\n\n def body(x):\n return tf.add(x, weights)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)])\n def model(x):\n return tf.while_loop(condition, body, [x])\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)[0]\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n self.assertAllClose(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testDynamicRnn(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))\n\n cell = tf.keras.layers.LSTMCell(10)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)])\n def model(x):\n rnn_layer = tf.keras.layers.RNN([cell], return_sequences=True)\n return rnn_layer(x)\n\n concrete_func = model.get_concrete_function()\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)\n lite_outputs = self._evaluateTFLiteModel(tflite_model, [input_data])\n self.assertLen(lite_outputs, 1)\n actual_value = lite_outputs[0]\n for expected, actual in zip(expected_value, actual_value):\n self.assertAllClose(expected, actual)\n\n @parameterized.named_parameters(\n ('LSTMBatchSizeOne', tf.keras.layers.LSTM, True),\n ('LSTM', tf.keras.layers.LSTM, False),\n ('SimpleRNNBatchSizeOne', tf.keras.layers.SimpleRNN, True),\n ('SimpleRNN', tf.keras.layers.SimpleRNN, False),\n ('GRUBatchSizeOne', tf.keras.layers.GRU, True),\n ('GRU', tf.keras.layers.GRU, False))\n @test_util.run_v2_only\n def testKerasRNN(self, rnn_layer, default_to_single_batch):\n input_data = tf.constant(\n np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))\n rnn_obj = rnn_layer(units=10, input_shape=(10, 10))\n model = tf.keras.models.Sequential([\n tf.keras.layers.Input(shape=(10, 10), name='input'),\n rnn_obj,\n ])\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch\n if not default_to_single_batch:\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM),\n ('SimpleRNN', tf.keras.layers.SimpleRNN),\n ('GRU', tf.keras.layers.GRU))\n @test_util.run_v2_only\n def testKerasRNNMultiBatches(self, rnn_layer):\n input_data = tf.constant(\n np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32))\n # Specify a fixed batch size(4) for the test model.\n x = tf.keras.layers.Input(batch_shape=(4, 10, 10))\n y = rnn_layer(units=10, input_shape=(10, 10))(x)\n model = tf.keras.Model(inputs=[x], outputs=[y])\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @parameterized.named_parameters(('ForceToUseBatchSizeOne', True),\n ('DontForceToUseBatchSizeOne', False))\n @test_util.run_v2_only\n def testKerasBidirectionalRNNReturnSequence(self, default_to_single_batch):\n input_data = tf.constant(\n np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Input(shape=(10, 10), name='input'))\n model.add(\n tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(units=10, return_sequences=True),\n input_shape=(10, 10)))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(5))\n model.add(tf.keras.layers.Activation('softmax'))\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch\n if not default_to_single_batch:\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n @parameterized.named_parameters(('ForceToUseBatchSizeOne', True),\n ('DontForceToUseBatchSizeOne', False))\n @test_util.run_v2_only\n def testKerasBidirectionalRNN(self, default_to_single_batch):\n input_data = tf.constant(\n np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Input(shape=(10, 10), name='input'))\n model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=10)))\n model.add(tf.keras.layers.Dense(5))\n model.add(tf.keras.layers.Activation('softmax'))\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_keras_model(model)\n converter._experimental_default_to_single_batch_in_tensor_list_ops = default_to_single_batch\n if not default_to_single_batch:\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n\n # Check values from converted model.\n expected_value = model.predict(input_data)\n self.assertAllClose(expected_value, actual_value, atol=1e-05)\n\n\nclass GrapplerTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testConstantFolding(self):\n # Constant folding handles the tf.broadcast_to operation which was not\n # supported by the TFLite at the time this test was added.\n input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3])\n\n @tf.function\n def func(x):\n y_const = tf.constant([1., 2., 3.])\n y_broadcast = tf.broadcast_to(y_const, [3, 3])\n return tf.matmul(x, y_broadcast)\n\n root = tracking.AutoTrackable()\n root.f = func\n concrete_func = root.f.get_concrete_function(input_data)\n\n # Convert model.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n root)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = root.f(input_data)\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n self.assertAllClose(expected_value, actual_value)\n\n # Enable hybrid quantization, same result\n converter.optimizations = [lite.Optimize.DEFAULT]\n tflite_model = converter.convert()\n actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]\n self.assertAllClose(expected_value, actual_value)\n\n\nclass UnknownShapes(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testMatMul(self):\n input_data = tf.constant(\n np.array(np.random.random_sample((10, 4)), dtype=np.float32))\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)])\n def model(in_tensor):\n shape = tf.shape(in_tensor)\n fill = tf.transpose(tf.fill(shape, 1.))\n return tf.matmul(fill, in_tensor)\n\n concrete_func = model.get_concrete_function()\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0]\n self.assertAllClose(expected_value, actual_value, atol=1e-06)\n\n def _getIntegerQuantizeModelWithUnknownShapes(self):\n np.random.seed(0)\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)])\n def model(input_tensor):\n \"\"\"Define a model with tf.MatMul and unknown shapes.\"\"\"\n # We need the tensor to have more than 1024 elements for quantize_weights\n # to kick in. Thus, the [33, 33] shape.\n const_tensor = tf.constant(\n np.random.uniform(low=-10., high=10., size=[33, 33]),\n shape=[33, 33],\n dtype=tf.float32,\n name='inputB')\n\n shape = tf.shape(input_tensor)\n fill = tf.transpose(tf.fill(shape, 1.))\n mult = tf.matmul(fill, input_tensor)\n return tf.matmul(mult, const_tensor)\n\n root = tracking.AutoTrackable()\n root.f = model\n concrete_func = root.f.get_concrete_function()\n\n def calibration_gen():\n for batch in range(5, 20, 5):\n for _ in range(5):\n yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)]\n\n return root, concrete_func, calibration_gen\n\n @test_util.run_v2_only\n def testMatMulQuantize(self):\n root, concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()\n float_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func], root)\n float_tflite_model = float_converter.convert()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func], root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_tflite_model = quantized_converter.convert()\n\n # The default input and output types should be float.\n quantized_interpreter = Interpreter(model_content=quantized_tflite_model)\n quantized_interpreter.allocate_tensors()\n input_details = quantized_interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n @test_util.run_v2_only\n def testMatMulCalibrateAndQuantize(self):\n root, concrete_func, calibration_gen = (\n self._getIntegerQuantizeModelWithUnknownShapes())\n float_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func], root)\n float_tflite_model = float_converter.convert()\n\n quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(\n [concrete_func], root)\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n quantized_tflite_model = quantized_converter.convert()\n\n # The default input and output types should be float.\n quantized_interpreter = Interpreter(model_content=quantized_tflite_model)\n quantized_interpreter.allocate_tensors()\n input_details = quantized_interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite_model), len(float_tflite_model))\n\n def testBatchMatMul(self):\n input_data_1 = tf.constant(\n np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))\n input_data_2 = tf.constant(\n np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32),\n tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32)\n ])\n def model(in_tensor_1, in_tensor_2):\n return tf.matmul(in_tensor_1, in_tensor_2)\n\n concrete_func = model.get_concrete_function()\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n tflite_model = converter.convert()\n\n # Check values from converted model.\n expected_value = concrete_func(input_data_1, input_data_2)\n actual_value = self._evaluateTFLiteModel(\n tflite_model, [input_data_1, input_data_2],\n input_shapes=[([-1, 256, 256], [1, 256, 256])])[0]\n self.assertAllClose(expected_value, actual_value, atol=4)\n\n def testSizeInvalid(self):\n\n @tf.function(input_signature=[\n tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32)\n ])\n def model(in_tensor):\n return in_tensor + in_tensor\n\n concrete_func = model.get_concrete_function()\n\n # Test invalid shape. None after 1st dimension. Run with TOCO in order to\n # invoke shape checking code.\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n converter.experimental_new_converter = False\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual(\n 'None is only supported in the 1st dimension. Tensor '\n '\\'in_tensor\\' has invalid shape \\'[1, None, 16, 3]\\'.',\n str(error.exception))\n\n\nclass ResourceAndVariantTypes(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testVariants(self):\n\n @tf.function(input_signature=[tf.TensorSpec(shape=[1], dtype=tf.float32)])\n def model(v):\n m = map_ops.empty_tensor_map()\n k = tf.constant(1.0)\n p = tf.add(k, v)\n with ops.control_dependencies([m]):\n m2 = map_ops.tensor_map_insert(m, p, v)\n with ops.control_dependencies([m2]):\n return map_ops.tensor_map_size(m2)\n\n concrete_func = model.get_concrete_function()\n\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([1.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(1, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(1, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(1, actual_value)\n\n @test_util.run_v2_only\n def testVariantsWithCond(self):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_cond')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n m = map_ops.empty_tensor_map()\n\n def body(i, m):\n m = map_ops.tensor_map_insert(m, i, i)\n return i + 1, m\n\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.int32, name='input')\n _, result_m = tf.cond(in_tensor < 10, lambda: body(in_tensor, m),\n lambda: body(in_tensor + 1, m))\n out_tensor = in_tensor + map_ops.tensor_map_size(result_m)\n\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([0], dtype=np.int32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n expected_value = np.array([1], dtype=np.int32)\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(expected_value, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(expected_value, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(expected_value, actual_value)\n\n @test_util.run_v2_only\n def testVariantsWithWhile(self):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(), 'variants_with_while')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n m = map_ops.empty_tensor_map()\n\n def cond(i, m):\n del m\n return i < 10\n\n def body(i, m):\n m = map_ops.tensor_map_insert(m, i, i)\n return i + 1, m\n\n _, result_m = tf.while_loop(cond, body, [0, m])\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.int32, name='input')\n out_tensor = in_tensor + map_ops.tensor_map_size(result_m)\n\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([0], dtype=np.int32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(10, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(10, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(10, actual_value)\n\n @test_util.run_v2_only\n def testResources(self):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_resources')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.float32, name='input')\n\n stack = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)\n w = tf.raw_ops.StackPushV2(handle=stack, elem=in_tensor)\n with ops.control_dependencies([w]):\n a = in_tensor + in_tensor\n with ops.control_dependencies([a]):\n out_tensor = a + tf.raw_ops.StackPopV2(\n handle=stack, elem_type=tf.float32)\n\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([1.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(3.0, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(3.0, actual_value)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(3.0, actual_value)\n\n @test_util.run_v2_only\n def testResourcesWithCond(self):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(), 'resources_with_cond')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.float32, name='input')\n\n def body(i, arr):\n n = tf.raw_ops.StackPushV2(\n handle=arr, elem=tf.cast(i, dtype=tf.float32))\n return n, arr\n\n arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)\n n, result_arr = tf.cond(in_tensor < 10, lambda: body(0, arr),\n lambda: body(1, arr))\n\n with ops.control_dependencies([result_arr, n]):\n out_tensor = tf.raw_ops.StackPopV2(\n handle=result_arr, elem_type=tf.float32)\n\n inputs = {'x': in_tensor}\n outputs = {'a': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([1.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(0.0, actual_value)\n\n @test_util.run_v2_only\n def testResourcesWithWhile(self):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(),\n 'resources_with_while')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.float32, name='input')\n\n def cond(i, arr, m):\n del arr\n del m\n return i < 10\n\n def body(i, arr, m):\n del m\n n = tf.raw_ops.StackPushV2(\n handle=arr, elem=tf.cast(i, dtype=tf.float32))\n return i + 1, arr, n\n\n arr = tf.raw_ops.StackV2(max_size=10, elem_type=tf.float32)\n _, result_arr, n = tf.while_loop(cond, body, [0, arr, 0.0])\n\n with ops.control_dependencies([result_arr, n]):\n out_tensor = tf.raw_ops.StackPopV2(\n handle=result_arr, elem_type=tf.float32)\n\n inputs = {'x': in_tensor}\n outputs = {'a': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([1.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(9.0, actual_value)\n\n @parameterized.named_parameters(('EnableLoweringTensorListOps', True),\n ('DisableLoweringTensorListOps', False))\n @test_util.run_v2_only\n def testTensorListWithStaticSize(self, lower_tensor_list_ops):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(),\n 'simple_mutable_variable')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.float32, name='input')\n\n ta = tf.TensorArray(\n tf.float32, size=3, dynamic_size=False, clear_after_read=False)\n ta = ta.write(0, 10.0)\n ta = ta.write(1, 20.0)\n ta = ta.write(2, 30.0)\n\n out_tensor = ta.read(0) + ta.read(2)\n\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n if not lower_tensor_list_ops:\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n converter._experimental_lower_tensor_list_ops = lower_tensor_list_ops\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([1.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(40.0, actual_value)\n\n @parameterized.named_parameters(('EnableLoweringTensorListOps', True),\n ('DisableLoweringTensorListOps', False))\n @test_util.run_v2_only\n def testTensorListWithDynamicSize(self, lower_tensor_list_ops):\n\n def create_v1_saved_model():\n saved_model_dir = os.path.join(self.get_temp_dir(),\n 'simple_mutable_variable')\n with tf.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1], dtype=tf.float32, name='input')\n\n ta = tf.TensorArray(\n tf.float32, size=0, dynamic_size=True, clear_after_read=False)\n ta = ta.write(0, 10.0)\n ta = ta.write(1, 20.0)\n ta = ta.write(2, 30.0)\n\n out_tensor = ta.read(0) + ta.read(2)\n\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n saved_model_dir = create_v1_saved_model()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n if lower_tensor_list_ops:\n with self.assertRaises(convert.ConverterError) as error:\n converter.convert()\n self.assertIn(\n 'Lowering tensor list ops is failed. Please consider using Select '\n 'TF ops and disabling `_experimental_lower_tensor_list_ops` flag in '\n 'the TFLite converter object.', str(error.exception))\n\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n input_data = np.array([1.0], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(40.0, actual_value)\n\n\nclass CalibrateAndQuantizeWithCustomOpTest(lite_v2_test_util.ModelTest):\n\n def _createGraphWithCustomOp(self):\n # Create a graph that has one double op.\n np.random.seed(0)\n\n saved_model_dir = os.path.join(self.get_temp_dir(), 'double_model')\n with ops.Graph().as_default():\n with tf.compat.v1.Session() as sess:\n in_tensor = tf.compat.v1.placeholder(\n shape=[1, 4], dtype=dtypes.float32, name='input')\n out_tensor = double_op.double(in_tensor)\n inputs = {'x': in_tensor}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n\n def calibration_gen():\n for _ in range(100):\n yield [np.random.uniform(-1, 1, size=(1, 4)).astype(np.float32)]\n\n return (saved_model_dir, calibration_gen)\n\n def testCustomOpRegistererByName(self):\n \"\"\"Test a calibration with custom op registered by name.\"\"\"\n saved_model_dir, calibration_gen = self._createGraphWithCustomOp()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.representative_dataset = calibration_gen\n converter.allow_custom_ops = True\n converter.target_spec._experimental_custom_op_registerers = [\n 'TF_TestRegisterer'\n ]\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0)\n self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model))\n # Check the conversion metadata.\n metadata = get_conversion_metadata(tflite_model)\n self.assertIsNotNone(metadata)\n self.assertEqual(metadata.options.allowCustomOps, True)\n\n # Check the model works with custom ops.\n interpreter = InterpreterWithCustomOps(\n model_content=tflite_model, custom_op_registerers=['TF_TestRegisterer'])\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertArrayNear(expected_output[0], output_data[0], err=1e-2)\n\n def testCustomOpRegistererByFunc(self):\n \"\"\"Test a calibration with custom op registered by function.\"\"\"\n saved_model_dir, calibration_gen = self._createGraphWithCustomOp()\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.representative_dataset = calibration_gen\n converter.allow_custom_ops = True\n converter.target_spec._experimental_custom_op_registerers = [\n test_registerer.TF_TestRegisterer\n ]\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n self.assertGreater(test_registerer.get_num_test_registerer_calls(), 0)\n self.assertIn('Double', tflite_test_util.get_ops_list(tflite_model))\n\n # Check the model works with custom ops.\n interpreter = InterpreterWithCustomOps(\n model_content=tflite_model,\n custom_op_registerers=[test_registerer.TF_TestRegisterer])\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n test_input = np.array([[0.0, 0.1, 0.2, 0.3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n expected_output = np.array([[0.0, 0.2, 0.4, 0.6]], dtype=np.float32)\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertArrayNear(expected_output[0], output_data[0], err=1e-2)\n\n def testCustomOpRegistererFailure(self):\n \"\"\"Test a calibration with wrong custom op registerer.\"\"\"\n saved_model_dir, calibration_gen = self._createGraphWithCustomOp()\n\n bogus_name = 'CompletelyBogusRegistererName'\n\n converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)\n converter.optimizations = [lite.Optimize.DEFAULT]\n converter.representative_dataset = calibration_gen\n converter.allow_custom_ops = True\n converter.target_spec._experimental_custom_op_registerers = [bogus_name]\n\n with self.assertRaisesRegex(\n ValueError, 'Looking up symbol \\'' + bogus_name + '\\' failed'):\n converter.convert()\n\n\nclass IntermediatesTest(lite_v2_test_util.ModelTest):\n\n def _run(self, experimental_preserve_all_tensors):\n\n @tf.function\n def f(x):\n y = tf.add(x, x, name='y')\n z = tf.add(y, y, name='z')\n w = tf.add(z, z, name='w')\n return w\n\n # NOTE this is exactly representable as a float as are the intermeidates of\n # f. So direct comparison is ok below.\n\n input_data = np.array(2.0, np.float32)\n concrete_func = f.get_concrete_function(input_data)\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n f)\n tflite_model = converter.convert()\n interpreter = Interpreter(\n model_content=tflite_model,\n experimental_preserve_all_tensors=experimental_preserve_all_tensors)\n interpreter.allocate_tensors()\n interpreter.set_tensor(interpreter.get_input_details()[0]['index'],\n input_data)\n interpreter.invoke()\n out = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])\n tensors = {}\n for t in interpreter.get_tensor_details():\n # With Tensorflow Lite default delegate applied to the model graph, the\n # access to original tensors of a delegated op could cause a ValueError\n # (i.e. 'Tensor data is null. Run allocate_tensors() first') to be thrown\n # out because the tensor memory isn't allocated at all.\n val = None\n try:\n val = interpreter.get_tensor(t['index'])\n except ValueError:\n pass\n tensors.update({t['name']: val})\n return (tensors, out)\n\n def testPreserve(self):\n tensors, result = self._run(experimental_preserve_all_tensors=True)\n # All intermediates should be true and result be true.\n self.assertAllClose(tensors['x'], 2.0)\n self.assertAllClose(tensors['y'], 4.0)\n self.assertAllClose(tensors['z'], 8.0)\n self.assertAllClose(result, 16.0)\n\n def testNoPreserve(self):\n tensors, result = self._run(experimental_preserve_all_tensors=False)\n # One of them should be wrong if preserve is not true, but result should be\n # ok. Input should still be ok for repeated invocation.\n self.assertAllClose(tensors['x'], 2.0)\n self.assertTrue(tensors['y'] != 4.0 or tensors['z'] != 8.0)\n self.assertAllClose(result, 16.0)\n\n\nclass DatasetOpsTest(lite_v2_test_util.ModelTest):\n\n @test_util.run_v2_only\n def testReduceDataset(self):\n\n @tf.function\n def model():\n dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4])\n output = dataset.reduce(np.int32(0), lambda x, y: x + y)\n return output\n\n concrete_func = model.get_concrete_function()\n converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func],\n model)\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS\n ]\n tflite_model = converter.convert()\n self.assertIsNotNone(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n output_details = interpreter.get_output_details()\n\n interpreter.allocate_tensors()\n\n interpreter.invoke()\n actual_value = interpreter.get_tensor(output_details[0]['index'])\n self.assertEqual(10, actual_value)\n\n\nclass SparsityTest(lite_v2_test_util.ModelTest):\n\n def _getSparsificableModel(self, matrix_b_values):\n np.random.seed(0)\n root = tracking.AutoTrackable()\n\n @tf.function(\n input_signature=[tf.TensorSpec(shape=[16, 4], dtype=tf.float32)])\n def func(inp):\n matrix_b = tf.constant(matrix_b_values, dtype=tf.float32)\n matrix_b = tf.reshape(matrix_b, [4, 8])\n matmul = tf.matmul(inp, matrix_b, transpose_a=False, transpose_b=False)\n output = tf.nn.relu(matmul, name='output')\n return output\n\n root.f = func\n to_save = root.f.get_concrete_function()\n return (root, to_save)\n\n def testRandomSparsity(self):\n matrix_b_values = [\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 1\n ]\n root, func = self._getSparsificableModel(matrix_b_values)\n float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n float_converter.optimizations = [lite.Optimize.EXPERIMENTAL_SPARSITY]\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n # Check the conversion metadata.\n metadata = get_conversion_metadata(float_tflite_model)\n self.assertIsNotNone(metadata)\n self.assertAllEqual([metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY],\n metadata.options.modelOptimizationModes)\n\n def testBlockSparsity(self):\n matrix_b_values = [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0\n ]\n root, func = self._getSparsificableModel(matrix_b_values)\n float_converter = lite.TFLiteConverterV2.from_concrete_functions([func],\n root)\n float_converter.optimizations = [lite.Optimize.EXPERIMENTAL_SPARSITY]\n float_tflite_model = float_converter.convert()\n self.assertIsNotNone(float_tflite_model)\n # Check the conversion metadata.\n metadata = get_conversion_metadata(float_tflite_model)\n self.assertIsNotNone(metadata)\n self.assertAllEqual([metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY],\n metadata.options.modelOptimizationModes)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.lite.python.lite.TFLiteConverterV2.from_keras_model", "tensorflow.ones", "tensorflow.keras.layers.Reshape", "tensorflow.lite.python.test_util.get_ops_list", "tensorflow.lite.python.lite.TFLiteConverterV2.from_concrete_functions", "numpy.random.random", "tensorflow.python.platform.test.main", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.saved_model.loader_impl.parse_saved_model", "tensorflow.compat.v1.placeholder", "tensorflow.python.ops.map_ops.tensor_map_size", "tensorflow.random_normal_initializer", "tensorflow.lite.TFLiteConverter.from_concrete_functions", "tensorflow.TensorShape", "tensorflow.Variable", "tensorflow.constant", "numpy.prod", "tensorflow.add", "tensorflow.keras.Input", "tensorflow.python.framework.ops.Graph", "tensorflow.fill", "tensorflow.random.uniform", "numpy.random.random_sample", "tensorflow.lookup.KeyValueTensorInitializer", "tensorflow.compat.v1.saved_model.signature_def_utils.build_signature_def", "tensorflow.keras.initializers.Constant", "tensorflow.lite.python.lite.TFLiteConverterV2.from_saved_model", "tensorflow.keras.layers.Flatten", "tensorflow.python.ops.map_ops.tensor_map_insert", "tensorflow.keras.layers.Activation", "numpy.mean", "tensorflow.lite.python.lite.TFLiteConverterV2", "tensorflow.compat.v1.reset_default_graph", "tensorflow.python.ops.rnn.static_rnn", "tensorflow.compat.v1.initializers.global_variables", "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.ConvLSTM2D", "tensorflow.nn.tanh", "tensorflow.broadcast_to", "tensorflow.compat.v1.tables_initializer", "tensorflow.shape", "tensorflow.python.saved_model.saved_model.simple_save", "tensorflow.raw_ops.MutableHashTableV2", "tensorflow.lite.python.interpreter.InterpreterWithCustomOps", "tensorflow.python.ops.map_ops.empty_tensor_map", "tensorflow.TensorArray", "numpy.array", "tensorflow.lite.Interpreter", "numpy.random.randn", "tensorflow.keras.layers.Dropout", "tensorflow.python.saved_model.save_options.SaveOptions", "tensorflow.reduce_sum", "tensorflow.keras.layers.RNN", "numpy.random.seed", "tensorflow.Graph", "numpy.ones", "tensorflow.compat.v1.image.resize_bilinear", "tensorflow.gather", "tensorflow.quantization.fake_quant_with_min_max_vars", "tensorflow.compat.v1.disable_eager_execution", "tensorflow.python.training.tracking.tracking.AutoTrackable", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.math.erf", "tensorflow.keras.models.Model", "tensorflow.math.ceil", "tensorflow.reshape", "tensorflow.keras.layers.LSTM", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.compat.v1.saved_model.builder.SavedModelBuilder", "tensorflow.cast", "tensorflow.compat.v1.saved_model.utils.build_tensor_info", "tensorflow.while_loop", "tensorflow.raw_ops.LookupTableFindV2", "tensorflow.keras.layers.Conv2D", "tensorflow.lite.python.schema_py_generated.Model.GetRootAsModel", "tensorflow.raw_ops.StackV2", "tensorflow.nn.relu", "tensorflow.compat.v1.Session", "tensorflow.lite.python.util.get_conversion_metadata", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.python.saved_model.save.save", "tensorflow.math.tanh", "tensorflow.lite.TFLiteConverter.from_saved_model", "tensorflow.matmul", "tensorflow.function", "tensorflow.keras.layers.Dense", "tensorflow.keras.Model", "tensorflow.lite.python.schema_py_generated.ModelT.InitFromObj", "tensorflow.raw_ops.StackPopV2", "tensorflow.lite.python.convert.mlir_quantize", "tensorflow.control_dependencies", "tensorflow.keras.layers.LSTMCell", "tensorflow.quantization.fake_quant_with_min_max_args", "numpy.full", "tensorflow.compat.v1.variables_initializer", "tensorflow.lite.python.lite.TFLiteConverterV2.experimental_from_jax", "tensorflow.lite.python.interpreter.Interpreter", "tensorflow.raw_ops.LookupTableInsertV2", "tensorflow.split", "tensorflow.lite.python.testdata.double_op.double", "numpy.int32", "tensorflow.math.add", "tensorflow.raw_ops.StackPushV2", "tensorflow.lite.python.util._convert_model_from_bytearray_to_object", "tensorflow.lite.python.testdata._pywrap_test_registerer.get_num_test_registerer_calls", "tensorflow.TensorSpec", "tensorflow.keras.layers.Input", "tensorflow.lite.TFLiteConverter.from_keras_model", "numpy.random.uniform", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.keras.layers.concatenate" ] ]
cy7533/Chinese-Text-Classification-Pytorch
[ "50fe721e779c03905e674656f85d13cd0adf7b86" ]
[ "models/HAN/word_att_model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models.HAN.utils import matrix_mul, element_wise_mul\n\n\"\"\"\nHAN 词维度\n\"\"\"\n\n\nclass WordAttNet(nn.Module):\n def __init__(self, dict_len, embed_size, word_hidden_size):\n \"\"\"\n 初始化 HAN 词维度\n :param dict_len: 词典总的词数\n :param embed_size: embedding后的向量维度\n :param word_hidden_size: 当前层编码的词向量的隐向量维度\n \"\"\"\n super(WordAttNet, self).__init__()\n\n self.word_weight = nn.Parameter(torch.zeros(2 * word_hidden_size, 2 * word_hidden_size))\n self.word_bias = nn.Parameter(torch.zeros(1, 2 * word_hidden_size))\n self.context_weight = nn.Parameter(torch.zeros(2 * word_hidden_size, 1))\n\n self.lookup = nn.Embedding(num_embeddings=dict_len, embedding_dim=embed_size)\n self.gru = nn.GRU(embed_size, word_hidden_size, bidirectional=True)\n\n self._create_weights(mean=0.0, std=0.05)\n\n def _create_weights(self, mean=0.0, std=0.05):\n self.word_weight.data.normal_(mean, std)\n self.word_bias.data.normal_(mean, std)\n self.context_weight.data.normal_(mean, std)\n\n def forward(self, x, hidden_state):\n \"\"\"\n param x: [seq_len, batch]\n hidden_state: [1 * 2, batch, word_hidden_size]\n \"\"\"\n # print('x:', torch.isnan(x).int().sum())\n # output: [seq_len, batch, embed_size]\n output = self.lookup(x)\n # print('self.lookup:', torch.isnan(output).int().sum())\n\n\n # f_output: [seq_len, batch, 2 * word_hidden_size]\n # h_output: [1 * 2, batch, word_hidden_size]\n f_output, h_output = self.gru(output.float(), hidden_state) # feature output and hidden state output\n # print('self.gru:', torch.isnan(f_output).int().sum())\n\n # context vector\n # output: [seq_len, batch, 2 * word_hidden_size]\n output = matrix_mul(f_output, self.word_weight, self.word_bias)\n # print('matrix_mul1:', torch.isnan(output).int().sum())\n # output: [seq_len, batch] => [batch, seq_len]\n output = matrix_mul(output, self.context_weight).squeeze(2).permute(1, 0)\n # print('matrix_mul2:', torch.isnan(output).int().sum())\n output = F.softmax(output, dim=1)\n # print('F.softmax:', torch.isnan(output).int().sum())\n # output: [1, batch, 2 * word_hidden_size]\n output = element_wise_mul(f_output, output.permute(1, 0))\n\n return output, h_output\n\n\nif __name__ == \"__main__\":\n abc = WordAttNet(dict_len=1000, embed_size=300, word_hidden_size=128)\n print(abc)\n" ]
[ [ "torch.zeros", "torch.nn.Embedding", "torch.nn.GRU", "torch.nn.functional.softmax" ] ]
hknozturk/yarll
[ "c5293e6455e3debe6e4d4d21f713937a24a654f3" ]
[ "yarll/agents/tf2/sac.py" ]
[ "\"\"\"\nSoft Actor-Critic\n\nBased on SAC implementation from https://github.com/rail-berkeley/softlearning\n\"\"\"\nfrom copy import deepcopy\nimport csv\nfrom pathlib import Path\nfrom typing import Union\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense\nimport tensorflow_probability as tfp\n\nfrom yarll.agents.agent import Agent\nfrom yarll.agents.env_runner import EnvRunner\nfrom yarll.memory.prealloc_memory import PreAllocMemory\nfrom yarll.misc.network_ops import Softplus, Split\nfrom yarll.misc.utils import hard_update, soft_update\nfrom yarll.misc import summary_writer\n\n# TODO: put this in separate file\nclass DeterministicPolicy:\n def __init__(self, env, policy_fn):\n self.env = env\n self.policy_fn = policy_fn\n self.initial_features = None\n\n self.action_low = env.action_space.low\n self.action_high = env.action_space.high\n\n def choose_action(self, state, features):\n res = self.policy_fn(state[None, :])[0].numpy()[0]\n return {\"action\": res}\n\n def get_env_action(self, action):\n return self.action_low + (action + 1.0) * 0.5 * (self.action_high - self.action_low)\n\n def new_trajectory(self):\n pass\n\nclass SAC(Agent):\n def __init__(self, env, monitor_path: Union[Path, str], **usercfg) -> None:\n super().__init__(**usercfg)\n self.env = env\n self.monitor_path = Path(monitor_path)\n self.monitor_path.mkdir(parents=True, exist_ok=True)\n\n self.config.update(\n max_steps=100000,\n actor_learning_rate=1e-4,\n softq_learning_rate=1e-4,\n alpha_learning_rate=1e-4,\n n_softqs=2,\n reward_scale=1.0,\n n_hidden_layers=2,\n n_hidden_units=1024,\n gamma=0.99,\n batch_size=1024,\n tau=0.005,\n init_log_alpha=0.1,\n actor_update_frequency=1,\n critic_target_update_frequency=2,\n log_scale_bounds=(-5, 2),\n target_entropy=None,\n logprob_epsilon=1e-6, # For numerical stability when computing tf.log\n n_train_steps=1, # Number of parameter update steps per iteration\n replay_buffer_size=1e6,\n replay_start_size=1000, # Required number of replay buffer entries to start training\n gradient_clip_value=1.0,\n hidden_layer_activation=\"relu\",\n normalize_inputs=False,\n summaries=True,\n checkpoints=True,\n checkpoint_every_episodes=10,\n checkpoints_max_to_keep=None,\n save_model=True,\n test_frequency=0,\n n_test_episodes=5,\n write_train_rewards=False\n )\n self.config.update(usercfg)\n\n self.state_shape: list = list(env.observation_space.shape)\n self.n_actions: int = env.action_space.shape[0]\n self.action_low = self.env.action_space.low\n self.action_high = self.env.action_space.high\n\n self.target_entropy = self.config[\"target_entropy\"]\n if self.target_entropy is None:\n self.target_entropy = -np.prod(env.action_space.shape)\n\n # Make networks\n # action_output are the squashed actions and action_original those straight from the normal distribution\n input_dim = self.state_shape[0]\n self.actor_network = ActorNetwork(input_dim,\n self.config[\"n_hidden_layers\"],\n self.config[\"n_hidden_units\"],\n self.n_actions,\n self.config[\"logprob_epsilon\"],\n self.config[\"hidden_layer_activation\"],\n self.config[\"log_scale_bounds\"])\n self.softq_networks = [make_softq_network((env.observation_space.shape, env.action_space.shape),\n self.config[\"n_hidden_layers\"],\n self.config[\"n_hidden_units\"],\n self.config[\"hidden_layer_activation\"])\n for _ in range(self.config[\"n_softqs\"])]\n self.target_softq_networks = [tf.keras.models.clone_model(net) for net in self.softq_networks]\n\n dummy_input_states = tf.random.uniform((1, *self.state_shape))\n dummy_input_actions = tf.random.uniform((1, *self.env.action_space.shape))\n for net, target_net in zip(self.softq_networks, self.target_softq_networks):\n net((dummy_input_states, dummy_input_actions))\n target_net((dummy_input_states, dummy_input_actions))\n hard_update(net.variables, target_net.variables)\n\n self._log_alpha = tf.Variable(self.config[\"init_log_alpha\"], name=\"log_alpha\")\n self._alpha = tfp.util.DeferredTensor(self._log_alpha, tf.exp, name=\"alpha\")\n\n\n # Make train ops\n self.actor_optimizer = tf.optimizers.Adam(learning_rate=self.config[\"actor_learning_rate\"],\n clipnorm=self.config[\"gradient_clip_value\"])\n self.softq_optimizers = [tf.optimizers.Adam(learning_rate=self.config[\"softq_learning_rate\"],\n clipnorm=self.config[\"gradient_clip_value\"])\n for _ in self.softq_networks]\n self.alpha_optimizer = tf.optimizers.Adam(learning_rate=self.config[\"alpha_learning_rate\"],\n clipnorm=self.config[\"gradient_clip_value\"])\n\n self.replay_buffer = PreAllocMemory(int(self.config[\"replay_buffer_size\"]), self.state_shape, env.action_space.shape)\n self.n_updates = 0\n self.total_steps = 0\n self.total_episodes = 0\n if self.config[\"summaries\"]:\n self.summary_writer = tf.summary.create_file_writer(str(self.monitor_path))\n summary_writer.set(self.summary_writer)\n else:\n self.summary_writer = tf.summary.create_noop_writer()\n\n self.env_runner = EnvRunner(self.env,\n self,\n usercfg,\n scale_states=self.config[\"normalize_inputs\"],\n summaries=self.config[\"summaries\"],\n episode_rewards_file=(\n self.monitor_path / \"train_rewards.txt\" if self.config[\"write_train_rewards\"] else None)\n )\n\n if self.config[\"checkpoints\"]:\n checkpoint_directory = self.monitor_path / \"checkpoints\"\n self.ckpt = tf.train.Checkpoint(net=self.actor_network)\n self.ckpt_manager = tf.train.CheckpointManager(self.ckpt,\n checkpoint_directory,\n max_to_keep=self.config[\"checkpoints_max_to_keep\"])\n\n if self.config[\"test_frequency\"] > 0 and self.config[\"n_test_episodes\"] > 0:\n test_env = deepcopy(env)\n unw = test_env.unwrapped\n if hasattr(unw, \"summaries\"):\n unw.summaries = False\n if hasattr(unw, \"log_data\"):\n unw.log_data = False\n deterministic_policy = DeterministicPolicy(test_env, self.actor_network.deterministic_actions)\n self.test_env_runner = EnvRunner(test_env,\n deterministic_policy,\n usercfg,\n scale_states=self.config[\"normalize_inputs\"],\n summaries=False,\n episode_rewards_file=(\n self.monitor_path / \"test_rewards.txt\")\n )\n header = [\"\"] # (epoch) id has no name in header\n header += [f\"rew_{i}\" for i in range(self.config[\"n_test_episodes\"])]\n header += [\"rew_mean\", \"rew_std\"]\n self.test_results_file = self.monitor_path / \"test_results.csv\"\n with open(self.test_results_file, \"w\") as f:\n writer = csv.writer(f)\n writer.writerow(header)\n\n self.total_rewards = np.empty((self.config[\"n_test_episodes\"],), dtype=np.float32)\n\n def deterministic_actions(self, states: np.ndarray) -> np.ndarray:\n \"\"\"Get the actions for a batch of states.\"\"\"\n return self.actor_network.deterministic_actions(tf.convert_to_tensor(states))\n\n def action(self, state: np.ndarray) -> np.ndarray:\n \"\"\"Get the action for a single state.\"\"\"\n return self.actor_network(tf.convert_to_tensor(state[None, :]))[0].numpy()[0]\n\n @tf.function\n def train_critics(self, state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch):\n # Calculate critic targets\n next_action_batch, next_logprob_batch = self.actor_network(state1_batch)\n next_qs_values = [net((state1_batch, next_action_batch)) for net in self.target_softq_networks]\n next_q_values = tf.reduce_min(next_qs_values, axis=0)\n next_values = next_q_values - self._alpha * next_logprob_batch\n next_values = (1.0 - terminal1_batch) * next_values\n softq_targets = self.config[\"reward_scale\"] * reward_batch + self.config[\"gamma\"] * next_values\n softq_targets = tf.stop_gradient(softq_targets)\n\n # Update critics\n softqs_losses = []\n softqs_values = []\n for net, optimizer in zip(self.softq_networks, self.softq_optimizers):\n with tf.GradientTape() as tape:\n softq_values = net((state0_batch, action_batch))\n softq_losses = 0.5 * tf.losses.MSE(y_true=softq_targets,\n y_pred=softq_values)\n softq_loss = tf.nn.compute_average_loss(softq_losses)\n softqs_losses.append(tf.stop_gradient(softq_losses))\n softqs_values.append(tf.stop_gradient(softq_values))\n softq_gradients = tape.gradient(softq_loss, net.trainable_weights)\n optimizer.apply_gradients(zip(softq_gradients, net.trainable_weights))\n\n tf.debugging.assert_shapes((\n (action_batch, (\"B\", \"nA\")),\n (next_action_batch, (\"B\", \"nA\")),\n (next_logprob_batch, (\"B\", 1)),\n (reward_batch, (\"B\", 1)),\n (next_q_values, (\"B\", 1)),\n (next_values, (\"B\", 1)),\n (softq_values, (\"B\", 1)),\n (softq_losses, (\"B\")),\n (softq_loss, (1,)),\n (softq_targets, (\"B\", 1))\n ))\n softq = tf.concat(softqs_values, axis=0)\n softq_mean, softq_variance = tf.nn.moments(softq, axes=[0])\n return softq_mean[0], tf.sqrt(softq_variance[0]), softq_targets, tf.reduce_mean(softq_losses)\n\n @tf.function\n def train_actor(self, state0_batch):\n with tf.GradientTape() as tape:\n actions, action_logprob = self.actor_network(state0_batch)\n softqs_pred = [net((state0_batch, actions)) for net in self.softq_networks]\n min_softq_pred = tf.reduce_min(softqs_pred, axis=0)\n actor_losses = self._alpha * action_logprob - min_softq_pred\n actor_loss = tf.nn.compute_average_loss(actor_losses)\n tf.debugging.assert_shapes((\n (actions, (\"B\", \"nA\")),\n (action_logprob, (\"B\", 1)),\n (min_softq_pred, (\"B\", 1)),\n (actor_losses, (\"B\", 1))\n ))\n actor_gradients = tape.gradient(actor_loss, self.actor_network.trainable_weights)\n self.actor_optimizer.apply_gradients(zip(actor_gradients, self.actor_network.trainable_weights))\n return tf.reduce_mean(actor_loss), tf.reduce_mean(action_logprob)\n\n @tf.function\n def train_alpha(self, state0_batch):\n _, action_logprob = self.actor_network(state0_batch)\n with tf.GradientTape() as tape:\n alpha_losses = -1.0 * self._alpha * tf.stop_gradient(action_logprob + self.target_entropy) # For batch\n alpha_loss = tf.nn.compute_average_loss(alpha_losses)\n alpha_gradients = tape.gradient(alpha_loss, [self._log_alpha])\n self.alpha_optimizer.apply_gradients(zip(alpha_gradients, [self._log_alpha]))\n\n return alpha_loss\n\n def do_test_episodes(self, step) -> None:\n for i in range(self.config[\"n_test_episodes\"]):\n test_trajectory = self.test_env_runner.get_trajectory(stop_at_trajectory_end=True)\n self.total_rewards[i] = np.sum(test_trajectory.rewards)\n test_rewards_mean = np.mean(self.total_rewards)\n test_rewards_std = np.std(self.total_rewards)\n to_write = [step] + self.total_rewards.tolist() + [test_rewards_mean, test_rewards_std]\n with open(self.test_results_file, \"a\") as f:\n writer = csv.writer(f)\n writer.writerow(to_write)\n\n def learn(self):\n # Arrays to keep results from train function over different train steps in\n softq_means = np.empty((self.config[\"n_train_steps\"],), np.float32)\n softq_stds = np.empty((self.config[\"n_train_steps\"],), np.float32)\n softq_losses = np.empty((self.config[\"n_train_steps\"],), np.float32)\n actor_losses = np.empty((self.config[\"n_train_steps\"],), np.float32)\n alpha_losses = np.empty((self.config[\"n_train_steps\"],), np.float32)\n action_logprob_means = np.empty((self.config[\"n_train_steps\"],), np.float32)\n total_episodes = 0\n summary_writer.start()\n for step in range(self.config[\"max_steps\"]):\n if self.config[\"test_frequency\"] > 0 and (step % self.config[\"test_frequency\"]) == 0 and self.config[\"n_test_episodes\"] > 0:\n self.do_test_episodes(step)\n experience = self.env_runner.get_steps(1)[0]\n self.total_steps += 1\n self.replay_buffer.add(experience.state, experience.action, experience.reward,\n experience.next_state, experience.terminal)\n if self.replay_buffer.n_entries > self.config[\"replay_start_size\"]:\n for i in range(self.config[\"n_train_steps\"]):\n sample = self.replay_buffer.get_batch(self.config[\"batch_size\"])\n states0 = tf.convert_to_tensor(sample[\"states0\"])\n softq_mean, softq_std, softq_targets, softq_loss = self.train_critics(\n states0,\n np.resize(sample[\"actions\"], [self.config[\"batch_size\"],\n self.n_actions]), # for n_actions == 1\n sample[\"rewards\"],\n sample[\"states1\"],\n sample[\"terminals1\"])\n if (step % self.config[\"actor_update_frequency\"]) == 0:\n actor_loss, action_logprob_mean = self.train_actor(states0)\n alpha_loss = self.train_alpha(states0)\n actor_losses[i] = actor_loss\n alpha_losses[i] = alpha_loss\n action_logprob_means[i] = action_logprob_mean\n else:\n print(\"WARNING: ACTOR NOT UPDATED\")\n softq_means[i] = softq_mean\n softq_stds[i] = softq_std\n softq_losses[i] = softq_loss\n # Update the target networks\n if (step % self.config[\"critic_target_update_frequency\"]) == 0:\n for net, target_net in zip(self.softq_networks, self.target_softq_networks):\n soft_update(net.variables,\n target_net.variables,\n self.config[\"tau\"])\n if self.config[\"summaries\"]:\n summary_writer.add_scalar(\"model/predicted_softq_mean\", np.mean(softq_means), self.total_steps)\n summary_writer.add_scalar(\"model/predicted_softq_std\", np.mean(softq_stds), self.total_steps)\n summary_writer.add_scalar(\"model/softq_targets\", np.mean(softq_targets), self.total_steps)\n summary_writer.add_scalar(\"model/softq_loss\", np.mean(softq_losses), self.total_steps)\n if (step % self.config[\"actor_update_frequency\"]) == 0:\n summary_writer.add_scalar(\"model/actor_loss\", np.mean(actor_losses), self.total_steps)\n summary_writer.add_scalar(\"model/alpha_loss\", np.mean(alpha_losses), self.total_steps)\n summary_writer.add_scalar(\"model/alpha\", tf.exp(self._log_alpha), self.total_steps)\n summary_writer.add_scalar(\"model/action_logprob_mean\", np.mean(action_logprob_means), self.total_steps)\n self.n_updates += 1\n if experience.terminal:\n if self.config[\"checkpoints\"] and (total_episodes % self.config[\"checkpoint_every_episodes\"]) == 0:\n self.ckpt_manager.save(total_episodes)\n total_episodes += 1\n summary_writer.flush()\n summary_writer.stop()\n if self.config[\"save_model\"]:\n self.actor_network.save_weights(str(self.monitor_path / \"actor_weights\"))\n self.softq_networks[0].save_weights(str(self.monitor_path / \"softq_weights\"))\n\n def choose_action(self, state, features):\n if self.total_steps < self.config[\"replay_start_size\"]:\n action = np.random.uniform(-1.0, 1.0, self.env.action_space.shape)\n else:\n action = self.action(state)\n return {\"action\": action}\n\n def get_env_action(self, action):\n \"\"\"\n Converts an action from self.choose_action to an action to be given to the environment.\n \"\"\"\n return self.action_low + (action + 1.0) * 0.5 * (self.action_high - self.action_low)\n\nclass ActorNetwork(Model):\n def __init__(self, input_dim, n_hidden_layers, n_hidden_units, n_actions, logprob_epsilon, hidden_layer_activation=\"relu\", log_scale_bounds=(-5, 2)):\n super().__init__()\n self.n_actions = n_actions\n self.logprob_epsilon = logprob_epsilon\n inp = tf.keras.Input((input_dim,))\n out = inp\n for i in range(n_hidden_layers):\n out = Dense(n_hidden_units,\n activation=hidden_layer_activation,\n name=f\"hidden_{i}\")(out)\n out = Dense(n_actions * 2, name=\"mean_log_scale\")(out)\n mean, scale = Split(num_or_size_splits=2, axis=-1)(out)\n\n scale = Softplus()(scale)\n self.mean_scale_model = tf.keras.Model(inputs=inp, outputs=(mean, scale))\n\n @tf.function\n def call(self, inputs, training=None, mask=None):\n mean, scale = self.mean_scale_model(inputs)\n dist = tfp.distributions.Normal(mean, scale)\n actions = dist.sample()\n log_prob = tf.reduce_sum(dist.log_prob(actions), axis=-1, keepdims=True)\n log_prob = log_prob - tf.reduce_sum(2 * (tf.math.log(2.0) - actions - tf.math.softplus(-2 * actions)), axis=-1, keepdims=True)\n squashed_actions = tf.tanh(actions)\n return squashed_actions, log_prob\n\n @tf.function\n def deterministic_actions(self, inp):\n outputs = self.mean_scale_model(inp)\n return tf.tanh(outputs[0])\n\ndef make_softq_network(input_shapes, n_hidden_layers, n_hidden_units, hidden_layer_activation=\"relu\"):\n inputs = [tf.keras.layers.Input(x, dtype=\"float32\") for x in input_shapes]\n\n out = tf.keras.layers.concatenate(inputs, axis=-1)\n\n for _ in range(n_hidden_layers):\n out = Dense(n_hidden_units, activation=hidden_layer_activation)(out)\n out = Dense(1)(out)\n return tf.keras.Model(inputs, out, name=\"SoftQ\")\n" ]
[ [ "tensorflow.exp", "tensorflow.reduce_min", "tensorflow.debugging.assert_shapes", "tensorflow.nn.moments", "numpy.mean", "tensorflow.keras.layers.Dense", "tensorflow.sqrt", "tensorflow.keras.Model", "numpy.resize", "tensorflow.keras.models.clone_model", "tensorflow.tanh", "numpy.empty", "tensorflow.concat", "tensorflow.train.CheckpointManager", "tensorflow.GradientTape", "tensorflow.Variable", "tensorflow.math.log", "numpy.prod", "tensorflow.keras.Input", "tensorflow.summary.create_noop_writer", "tensorflow.losses.MSE", "tensorflow.random.uniform", "numpy.std", "tensorflow.train.Checkpoint", "tensorflow.nn.compute_average_loss", "tensorflow.convert_to_tensor", "tensorflow.keras.layers.Input", "numpy.sum", "tensorflow.optimizers.Adam", "tensorflow.math.softplus", "numpy.random.uniform", "tensorflow.reduce_mean", "tensorflow.stop_gradient", "tensorflow.keras.layers.concatenate" ] ]
yingli2009/dtnn
[ "30ef997f69f5293ae1eee03ec24716d4f0f3ce18" ]
[ "dtnn/utils.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\n\ndef shape(x):\n if isinstance(x, tf.Tensor):\n return x.get_shape().as_list()\n return np.shape(x)\n" ]
[ [ "numpy.shape" ] ]
cocomoff/SimpleRRNG
[ "35ce2c61e2d9cb917d7df74ecdbb21c9f25760ad" ]
[ "example.py" ]
[ "import numpy as np\r\nfrom rrng.generator import generate\r\nfrom rrng.visualizer import visualize\r\nfrom rrng.util import clean_up, write_to_csv\r\n\r\nif __name__ == '__main__':\r\n np.random.seed(20201222)\r\n Dx, Dy = 50, 50\r\n N = 30\r\n coeff = 1.3\r\n G, pos = generate(Dx, Dy, N, coeff)\r\n G, pos = clean_up(G)\r\n visualize(G, pos, name=\"example.png\", dir=\"./output\", label=False)\r\n write_to_csv(G, pos)\r\n" ]
[ [ "numpy.random.seed" ] ]
rossimattia/depth-refinement-and-normal-estimation
[ "daafd1658ebbddb38dda373b6f8a6c2cf81636f9" ]
[ "refinement.py" ]
[ "# Copyright (c) 2020,\n# ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland,\n# Laboratoire de Traitement des Signaux 4 (LTS4).\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# Author: Mattia Rossi (rossi-mattia-at-gmail-com)\n\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as fun\nfrom torch import device as dev\nimport numpy as np\nfrom misc import resize_map, space2plane_normal, plane2space_normal, depth2normal\nfrom filters import gradient_filter\nfrom losses import DepthConsistencyL1, NormalConsistencyL1, PieceWisePlanarRegularization\nfrom cv2 import cvtColor, COLOR_RGB2GRAY\nfrom transforms import depth2depth_inv, depth_inv2depth, depth_range2depth_inv_range\nfrom logger import Logger\nfrom typing import Tuple, List, Dict\n\n\nclass Loss(nn.Module):\n \"\"\"It creates a loss function consisting of an inverse depth map consistency loss, a 2D normal map consistency loss\n and a joint inverse depth map and normal map regularization. The 2D normal map is 2D vector field capturing the\n orientation of the inverse depth map.\n\n The independent variables of this loss are `self.idepth` and `self.inormal`.\n \"\"\"\n\n def __init__(self,\n image: np.array, idepth: np.array, idepth_range: Tuple[float],\n loss_param: Dict[str, float],\n idepth_confidence: np.array = None,\n inormal: np.array = None,\n idepth_init: np.array = None,\n inormal_init: np.array = None,\n device: torch.device = torch.device('cpu')) -> None:\n \"\"\"`Loss` constructor. It considers the inverse depth map and the corresponding 2D normal map.\n\n Args:\n image: reference image, arranged as an `(H, W)` or `(H, W, C)` array.\n idepth: inverse depth map to refine, arranged as an `(H, W)` array.\n idepth_range: inverse depth values must belong to the interval `[depth_range[0], depth_range[1]]`.\n loss_param: dictionaries containing the loss parameters.\n idepth_confidence: confidence map associated to the inverse depth map to refine.\n It must have entries in `[0, 1]`.\n inormal: 2D normal map associated to the depth map to refine, arranged as an `(H, W, 2)` array.\n It is ignored if the normal consistency loss is off.\n idepth_init: initial guess for the refined inverse depth map.\n inormal_init: initial guess for the 2D normal map associated to the refined inverse depth map.\n device: device on which the computation will take place.\n \"\"\"\n\n # Call the parent constructor.\n super(Loss, self).__init__()\n\n # Convert the input data from `np.array` to `torch.Tensor`. In particular, arrays are converted into 4D tensors\n # of size `(1, C, H, W)` with `H`, `W` and `C` representing the height, width and channel number, respectively.\n\n # Check the inverse depth range and register it.\n if idepth_range[0] <= 0 or idepth_range[1] == float('inf') or idepth_range[0] > idepth_range[1]:\n raise ValueError('Invalid depth range.')\n self.idepth_min = idepth_range[0]\n self.idepth_max = idepth_range[1]\n\n # Register the first optimization variable, i.e., the refined inverse depth map, and initialize it.\n if idepth_init is not None:\n aux = torch.as_tensor(idepth_init[None, None, ])\n else:\n aux = torch.as_tensor(idepth[None, None, ])\n self.idepth = nn.Parameter(aux.clone(), requires_grad=True)\n # Note that the data passed to `self.idepth` is copied in order to avoid shared data between different tensors.\n\n # Register the second optimization variable, i.e., the normal map, and initialize it.\n if inormal_init is not None:\n aux = torch.as_tensor((np.transpose(inormal_init, (2, 0, 1))[None, ]).copy())\n elif inormal is not None:\n aux = torch.as_tensor((np.transpose(inormal, (2, 0, 1))[None, ]).copy())\n else:\n with torch.no_grad():\n filter_size = 5\n filter_sigma = 5.0\n grad_filter = gradient_filter(filter_size, filter_sigma)\n pad = tuple([int((filter_size - 1) / 2)] * 4)\n aux = fun.conv2d(\n fun.pad(self.idepth, pad, mode='replicate'),\n grad_filter.to(self.idepth))\n self.inormal = nn.Parameter(aux.clone(), requires_grad=True)\n # The `torch.no_grad()` block prevents PyTorch from tracking the operation.\n\n # Create the depth consistency loss.\n self.idepth_consistency_loss = DepthConsistencyL1(\n idepth, idepth_range,\n depth_confidence=idepth_confidence,\n multiplier=loss_param['lambda_depth_consistency'])\n\n # Create the 2D normal consistency loss.\n if loss_param['lambda_normal_consistency'] > 0:\n\n assert inormal is not None, 'Cannot activate the normal consistency term with no input normal map.'\n\n self.inormal_consistency_loss = NormalConsistencyL1(\n inormal,\n normal_confidence=idepth_confidence,\n multiplier=loss_param['lambda_normal_consistency'])\n\n else:\n self.inormal_consistency_loss = None\n\n # Create the depth regularization loss.\n self.regularization_loss = PieceWisePlanarRegularization(\n image,\n loss_param['gamma_regularization'],\n window_size=loss_param['window_size'],\n patch_size=loss_param['patch_size'],\n sigma_intensity=loss_param['sigma_intensity'],\n sigma_spatial=loss_param['sigma_spatial'],\n degree_max=loss_param['degree_max'],\n version=loss_param['regularization'],\n multiplier=loss_param['lambda_regularization'],\n device=device)\n\n def forward(self) -> Tuple[torch.Tensor, float, float, float]:\n \"\"\"It evaluates the loss function at (`self.idepth`, `self.inormal`).\n\n Returns:\n the loss function value, and the value of its two terms, at (`self.idepth`, `self.inormal`).\n \"\"\"\n\n # Inverse depth consistency loss.\n idepth_consistency_loss = self.idepth_consistency_loss(self.idepth)\n\n # 2D normal consistency loss.\n if self.inormal_consistency_loss is not None:\n inormal_consistency_loss = self.inormal_consistency_loss(self.inormal)\n else:\n inormal_consistency_loss = self.idepth.new_zeros(1, requires_grad=True)\n\n # Regularization loss.\n regularization_loss = self.regularization_loss(self.idepth, self.inormal)\n\n # Assemble the full loss.\n loss = idepth_consistency_loss + inormal_consistency_loss + regularization_loss\n\n return loss, idepth_consistency_loss.item(), inormal_consistency_loss.item(), regularization_loss.item()\n\n\ndef refine_depth(image: np.array, depth: np.array, depth_range: Tuple[float, float],\n camera_param: Dict[str, float], loss_param: List[Dict], opt_param: List[Dict],\n depth_confidence: np.array = None,\n normal: np.array = None,\n depth_init: np.array = None,\n normal_init: np.array = None,\n depth_gt: np.array = None,\n logger: Logger = None,\n device: dev = dev('cpu')) -> Tuple[np.array, np.array]:\n \"\"\"It refines the input depth map and estimates the corresponding normal map in a multi-scale fashion.\n\n It refines the input depth map and estimate the corresponding normal map according to the method described\n in the following article:\n\n Mattia Rossi, Mireille El Gheche, Andreas Kuhn, Pascal Frossard,\n \"Joint Graph-based Depth Refinement and Normal Estimation\",\n in IEEE Computer Vision and Pattern Recognition Conference (CVPR), Seattle, WA, USA, 2020.\n\n If the input depth map comes together with a normal map, the latter can be refined as well (rather than estimated)\n by activating the normal consistency term (not described in the article).\n\n The `loss_param` input parameter contains a list of dictionaries, one for each scale. Each dictionary must contain\n the following keys:\n - lambda_depth_consistency: depth consistency term multiplier.\n - lambda_normal_consistency: normal consistency term multiplier.\n - lambda_regularization: depth regularization term multiplier.\n - gamma_regularization: depth regularization term internal multiplier.\n - window_size: search window size (window_size x window_size) to be used in the graph construction.\n - patch_size: patch size (patch_size x patch_size) to be used in the graph construction.\n - sigma_intensity: color difference standard deviation for patch comparison in the graph construction.\n - sigma_spatial: euclidean distance standard deviation for patch comparison in the graph construction.\n - degree_max: maximum number of per pixel neighbors in the graph.\n - regularization: regularization type (0 for NLTGV, 1 for our regularization).\n\n The `opt_param` input parameter contains a list of dictionaries, one for each scale. Each dictionary must contain\n the following keys:\n - iter_max: maximum number of iterations.\n - eps_stop: minimum relative change between the current and the previous iteration depth maps.\n - attempt_max: maximum number of iterations without improving the loss.\n - learning_rate: dictionary containing the following keys:\n - lr_start: initial learning rate.\n - lr_slot_nb: number of partitions; each partition adopts a learning rate which is 1/10 of those employed at\n the previous partition; 0 excludes the relative depth map change stopping criterium.\n - plotting_step: number of steps between two plot updates of the logger.\n - depth_error_threshold: error threshold (in meters) to be used in the evaluation against the ground truth.\n\n Args:\n image: reference image, arranged as an `(H, W)` or `(H, W, C)` array.\n depth: depth map to refine, arranged as an `(H, W)` array.\n depth_range: depth values must belong to the interval `[depth_range[0], depth_range[1]]`.\n camera_param: dictionary containing `f_x`, `f_y`, `c_x`, `c_y`.\n loss_param: list of dictionaries, each one containing the loss parameters for a given scale.\n opt_param: list of dictionaries, each one containing the solver parameters for a given scale.\n depth_confidence: confidence map associated to the depth map to refine. It must have entries in `[0, 1]`.\n normal: 3D normal map to refine, arranged as an `(H, W, 3)` array. It is ignored if the normal consistency loss is off.\n depth_init: initial guess for the refined depth map.\n normal_init: initial guess for the 3D normal map associated to the refined depth map.\n depth_gt: ground truth depth map, arranged as an `(H, W)` array.\n logger: logger to plot visual results and statistics at runtime.\n device: device on which the computation will take place.\n\n Returns:\n The refined depth map and the corresponding normal map.\n \"\"\"\n\n # Number of scales in the multi-scale pyramid.\n scale_nb = len(opt_param)\n\n # Allocate the multi-scale pyramid.\n scale_pyramid = [None] * scale_nb\n camera_param_pyramid = [None] * scale_nb\n image_pyramid = [None] * scale_nb\n depth_pyramid = [None] * scale_nb\n depth_confidence_pyramid = [None] * scale_nb\n normal_pyramid = [None] * scale_nb\n depth_init_pyramid = [None] * scale_nb\n normal_init_pyramid = [None] * scale_nb\n depth_gt_pyramid = [None] * scale_nb\n\n # Build the multi-scale pyramid.\n for i in range(scale_nb):\n\n if i > 0:\n\n # Compute the image dimensions for the current scale.\n height = int(round(scale_pyramid[i - 1][0] / 2.0))\n width = int(round(scale_pyramid[i - 1][1] / 2.0))\n scale_pyramid[i] = (height, width)\n\n # Compute the camera parameters for the current scale.\n x_ratio = scale_pyramid[i][1] / scale_pyramid[i - 1][1]\n y_ratio = scale_pyramid[i][0] / scale_pyramid[i - 1][0]\n camera_param_pyramid[i] = {'f_x': camera_param_pyramid[i - 1]['f_x'] * x_ratio,\n 'f_y': camera_param_pyramid[i - 1]['f_y'] * y_ratio,\n 'c_x': camera_param_pyramid[i - 1]['c_x'] * x_ratio,\n 'c_y': camera_param_pyramid[i - 1]['c_y'] * y_ratio}\n\n # Downscale the image.\n image_pyramid[i] = resize_map(image_pyramid[i - 1], scale_pyramid[i], order=1)\n\n # Downscale the noisy/incomplete depth map.\n depth_pyramid[i] = resize_map(depth_pyramid[i - 1], scale_pyramid[i], order=0)\n\n # Downscale the noisy/incomplete depth map confidence.\n if depth_confidence_pyramid[i - 1] is not None:\n depth_confidence_pyramid[i] = resize_map(depth_confidence_pyramid[i - 1], scale_pyramid[i], order=0)\n else:\n depth_confidence_pyramid[i] = None\n\n # Downscale the noisy/incomplete normal map.\n if normal_pyramid[i - 1] is not None:\n normal_pyramid[i] = resize_map(normal_pyramid[i - 1], scale_pyramid[i], order=0)\n\n else:\n normal_pyramid[i] = None\n\n # Downscale the initial depth map estimate (we need only the lowest scale).\n if depth_init_pyramid[i - 1] is not None:\n depth_init_pyramid[i] = resize_map(depth_init_pyramid[i - 1], scale_pyramid[i], order=0)\n depth_init_pyramid[i - 1] = None\n else:\n depth_init_pyramid[i] = None\n\n # Downscale the initial normal map estimate (we need only the lowest scale).\n if normal_init_pyramid[i - 1] is not None:\n normal_init_pyramid[i] = resize_map(normal_init_pyramid[i - 1], scale_pyramid[i], order=0)\n normal_init_pyramid[i - 1] = None\n else:\n normal_init_pyramid[i] = None\n\n # Downscale the ground truth depth map.\n if depth_gt_pyramid[i - 1] is not None:\n depth_gt_pyramid[i] = resize_map(depth_gt_pyramid[i - 1], scale_pyramid[i], order=0)\n else:\n depth_gt_pyramid[i] = None\n\n else:\n\n # Store the original image dimensions.\n scale_pyramid[i] = (image.shape[0], image.shape[1])\n\n # Store the original camera parameters.\n camera_param_pyramid[i] = camera_param\n\n # The lowest scale hosts the original data.\n image_pyramid[i] = image\n depth_pyramid[i] = depth\n depth_confidence_pyramid[i] = depth_confidence\n normal_pyramid[i] = normal\n depth_init_pyramid[i] = depth_init\n normal_init_pyramid[i] = normal_init\n depth_gt_pyramid[i] = depth_gt\n\n # Reverse the multi-scale pyramid.\n scale_pyramid.reverse()\n camera_param_pyramid.reverse()\n image_pyramid.reverse()\n depth_pyramid.reverse()\n depth_confidence_pyramid.reverse()\n normal_pyramid.reverse()\n depth_init_pyramid.reverse() # It contains only the lowest scale.\n normal_init_pyramid.reverse() # It contains only the lowest scale.\n depth_gt_pyramid.reverse()\n\n # Perform the multi-scale depth refinement.\n scale_name_pyramid = [None] * scale_nb\n depth_refined_pyramid = [None] * scale_nb\n normal_refined_pyramid = [None] * scale_nb\n for i in range(scale_nb):\n\n scale_name_pyramid[i] = ('{} ({}x{})'.format(i, scale_pyramid[i][0], scale_pyramid[i][1]))\n print('Processing scale {}'.format(scale_name_pyramid[i]))\n\n # Setup a new plotting environment.\n if logger is not None:\n\n if depth_gt_pyramid[i] is not None:\n depth_plotting_range = (np.min(depth_gt_pyramid[i]).item(), np.max(depth_gt_pyramid[i]).item())\n else:\n depth_plotting_range = np.percentile(depth, [5, 95])\n logger.setup(env_name=scale_name_pyramid[i], depth_range=depth_plotting_range)\n\n # Initialize the next scale with the refined depth map and the corresponding normal map from the previous scale.\n # The two maps are up-sampled first.\n if i > 0:\n depth_init_pyramid[i] = resize_map(depth_refined_pyramid[i - 1], scale_pyramid[i], order=0)\n if normal_refined_pyramid[i - 1] is not None:\n normal_init_pyramid[i] = resize_map(normal_refined_pyramid[i - 1], scale_pyramid[i], order=0)\n\n # Refine the depth map of the current scale.\n depth_refined, normal_refined = refine(\n image_pyramid[i], depth_pyramid[i], depth_range,\n camera_param_pyramid[i], loss_param[i], opt_param[i],\n depth_confidence=depth_confidence_pyramid[i],\n depth_init=depth_init_pyramid[i],\n normal=normal_pyramid[i],\n normal_init=normal_init_pyramid[i],\n depth_gt=depth_gt_pyramid[i],\n logger=logger,\n device=device)\n\n depth_refined_pyramid[i] = depth_refined\n normal_refined_pyramid[i] = normal_refined\n\n # Extract the refined depth map and the corresponding normal map.\n depth_refined = depth_refined_pyramid[-1]\n normal_refined = normal_refined_pyramid[-1]\n\n # Delete all the plotting environments.\n if logger is not None:\n for i in range(scale_nb):\n logger.vis.delete_env(scale_name_pyramid[i])\n\n return depth_refined, normal_refined\n\n\ndef refine(image: np.array, depth: np.array, depth_range: Tuple[float, float],\n camera_param: Dict[str, float], loss_param: Dict, opt_param: Dict,\n depth_confidence: np.array = None,\n normal: np.array = None,\n depth_init: np.array = None,\n normal_init: np.array = None,\n depth_gt: np.array = None,\n logger: Logger = None,\n device: dev = dev('cpu')) -> Tuple[np.array, np.array]:\n \"\"\"It implements one scale of the multi-scale pyramid of the function `refine_depth`.\n\n Args:\n image: reference image, arranged as an `(H, W)` or `(H, W, C)` array.\n depth: depth map to refine, arranged as an `(H, W)` array.\n depth_range: depth values must belong to the interval `[depth_range[0], depth_range[1]]`.\n camera_param: dictionary containing `f_x`, `f_y`, `c_x`, `c_y`.\n loss_param: dictionary containing the loss parameters.\n opt_param: dictionary containing the solver parameters.\n depth_confidence: confidence map associated to the depth map to refine. It must have entries in `[0, 1]`.\n normal: 3D normal map to refine, arranged as an `(H, W, 3)` array. It is ignored if the normal consistency loss is off.\n depth_init: initial guess for the refined depth map.\n normal_init: initial guess for the 3D normal map associated to the refined depth map.\n depth_gt: ground truth depth map, arranged as an `(H, W)` array.\n logger: logger to plot visual results and statistics at runtime.\n device: device on which the computation will take place.\n\n Returns:\n The refined depth map and the corresponding normal map.\n \"\"\"\n\n # Check that the input maps have the same height and width of the input reference image.\n height = image.shape[0]\n width = image.shape[1]\n assert depth.shape == (height, width),\\\n 'Input depth map size not compatible with the reference image one.'\n if depth_confidence is not None:\n assert depth_confidence.shape == (height, width),\\\n 'Input depth map confidence size not compatible with the reference image one.'\n if normal is not None:\n assert normal.shape == (height, width, 3),\\\n 'Input normal map size not compatible with the reference image one.'\n if depth_init is not None:\n assert depth_init.shape == (height, width),\\\n 'Input initial depth map size not compatible with the reference image one.'\n if normal_init is not None:\n assert normal_init.shape == (height, width, 3),\\\n 'Input initial normal map size not compatible with the reference image one.'\n if depth_gt is not None:\n assert depth_gt.shape == (height, width),\\\n 'Ground truth depth size not compatible with the reference image one.'\n\n # Check the depth map data type.\n if depth.dtype == np.float32:\n depth_dtype = torch.float\n elif depth.dtype == np.float64:\n depth_dtype = torch.double\n else:\n raise TypeError('The input depth map must be either of type double or float.')\n\n # Convert the reference image to gray scale.\n image_gray = image\n if image_gray.ndim == 3:\n image_gray = cvtColor(image_gray.astype(np.float32), COLOR_RGB2GRAY)\n image_gray = image_gray.astype(image.dtype)\n # The function `cvtColor` requires an input image of type uint8, uint16 or float32. Therefore, `image_gray` is\n # first converted to float32 (to minimize the precision loss) and then back to its original data type.\n\n # Plot.\n if logger is not None:\n logger.plot(\n texture=image,\n depth=depth,\n depth_init=depth_init,\n depth_gt=depth_gt,\n normal=normal,\n normal_init=normal_init)\n\n # Convert the depth maps.\n idepth = depth2depth_inv(depth)\n idepth_init = depth2depth_inv(depth_init) if depth_init is not None else None\n idepth_range = depth_range2depth_inv_range(depth_range)\n\n # Convert the normal maps.\n inormal = None\n inormal_init = None\n if normal is not None:\n inormal = space2plane_normal(\n depth,\n normal,\n (camera_param['f_x'], camera_param['f_y']),\n (camera_param['c_x'], camera_param['c_y']))\n if normal_init is not None:\n inormal_init = space2plane_normal(\n depth_init if depth_init is not None else depth,\n normal_init,\n (camera_param['f_x'], camera_param['f_y']),\n (camera_param['c_x'], camera_param['c_y']))\n\n # Create the loss object.\n loss = Loss(image_gray, idepth, idepth_range,\n loss_param,\n idepth_confidence=depth_confidence,\n inormal=inormal,\n idepth_init=idepth_init,\n inormal_init=inormal_init,\n device=device).to(device=device, dtype=depth_dtype)\n\n # Set the maximum number of iterations.\n assert 'iter_max' in opt_param, 'Missing \\'iter_max\\' in `opt_param`.'\n iter_max = opt_param['iter_max']\n\n # Set the learning rate and define the optimization policy (i.e., with oir without scheduler).\n assert 'learning_rate' in opt_param, 'Missing \\'learning_rate\\' in `opt_param.'\n assert 'lr_start' in opt_param['learning_rate'], 'Missing \\'lr\\' in `opt_param[\\'learning_rate\\']`.'\n assert 'lr_slot_nb' in opt_param['learning_rate'], 'Missing \\'slot_nb\\' in `opt_param[\\'learning_rate\\']`.'\n learning_rate_start = opt_param['learning_rate']['lr_start']\n learning_rate_slot_nb = opt_param['learning_rate']['lr_slot_nb']\n\n # Define stopping condition.\n if learning_rate_slot_nb < 1:\n\n # The learning rate is kept constant.\n\n # The optimization terminates in one of the following event occurs:\n # - the relative depth change is smaller than `eps_stop`,\n # - the loss is not improved for more than `attempt_max` consecutive iterations,\n # - `iter_max` iterations have been performed.\n\n assert 'eps_stop' in opt_param, 'Missing \\'eps_stop\\' in `opt_param.'\n assert 'attempt_max' in opt_param, 'Missing \\'attempt_max\\' in `opt_param.'\n\n eps_stop = opt_param['eps_stop']\n attempt_max = opt_param['attempt_max']\n scheduler_step_size = iter_max * 2\n\n else:\n\n # The learning rate is dynamically updated.\n\n # The optimization terminates only when `iter_max` iterations have been performed.\n # However, in this scenario the learning rate is progressively decreased:\n # - the learning rate starts at `learning_rate_start`,\n # - it is decreased `learning_rate_slot_nb - 1` times by a factor `10`.\n\n eps_stop = 0.0\n attempt_max = float('inf')\n scheduler_step_size = int(math.ceil(float(iter_max) / float(learning_rate_slot_nb)))\n\n # Set the plotting step.\n assert 'plotting_step' in opt_param, 'Missing \\'plotting_step\\' in `opt_param.'\n plotting_step = opt_param['plotting_step']\n\n # Allocate an array to store the loss function values.\n loss_history = np.zeros(iter_max + 1)\n idepth_consistency_history = np.zeros(iter_max + 1)\n inormal_consistency_history = np.zeros(iter_max + 1) if loss_param['lambda_normal_consistency'] > 0 else None\n regularization_history = np.zeros(iter_max + 1)\n\n # Create an ADAM optimizer.\n optimizer = torch.optim.Adam(loss.parameters(), lr=learning_rate_start)\n\n # Create a learning rate scheduler.\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, scheduler_step_size, gamma=0.1)\n\n ####################################################################################################################\n ################################################# OPTIMIZATION #####################################################\n ####################################################################################################################\n\n # Lowest minimum value of the loss encountered during the optimization.\n loss_value_min = float('inf')\n\n # Number of consecutive iterations without improving `loss_value_min`.\n attempt_counter = 0\n\n # Relative change of the depth map between two consecutive iterations.\n relative_depth_change = float('inf')\n\n ################################################# CASE `i == 0` ####################################################\n\n # Evaluate the loss function.\n optimizer.zero_grad()\n loss_value, idepth_consistency_value, inormal_consistency_value, regularization_value = loss.forward()\n\n # Log operations.\n with torch.no_grad():\n\n # Store the current value of the loss.\n idepth_consistency_history[0] = idepth_consistency_value\n if inormal_consistency_history is not None:\n inormal_consistency_history[0] = inormal_consistency_value\n regularization_history[0] = regularization_value\n loss_history[0] = loss_value.item()\n\n # Log the optimization status to the standard output.\n print('Iteration: {:6}, Fails: {:3}, Rel. depth change: {:.6f}, Loss: {:.6f}'.format(\n 0, attempt_counter, relative_depth_change, loss_history[0]), flush=True)\n\n # Plot the optimization status.\n indexes = np.arange(0, 1)\n if logger is not None:\n depth_aux = depth_inv2depth(\n loss.idepth.data.to('cpu').squeeze().numpy(), depth_range)\n normal_aux = plane2space_normal(\n depth_aux,\n np.transpose(loss.inormal.data.to('cpu').squeeze().numpy(), (1, 2, 0)),\n (camera_param['f_x'], camera_param['f_y']),\n (camera_param['c_x'], camera_param['c_y']))\n logger.plot(\n depth_refined=depth_aux,\n normal_refined=normal_aux,\n idepth_consistency_loss=(indexes, idepth_consistency_history[indexes]),\n inormal_consistency_loss=((indexes, inormal_consistency_history[indexes])\n if inormal_consistency_history is not None else None),\n regularization_loss=(indexes, regularization_history[indexes]),\n global_loss=(indexes, loss_history[indexes]))\n\n ################################################# CASE `i > 0` #####################################################\n\n for i in range(1, iter_max + 1):\n\n # Compute the gradient of each parameter of the loss (i.e., the depth map and the normal maps).\n loss_value.backward()\n\n # Store a copy of the old depth map.\n idepth_old = loss.idepth.clone().detach()\n\n # Update the old depth map.\n optimizer.step()\n\n # Update the optimizer learning rate.\n scheduler.step()\n\n # Without PyTorch tracking, project the new depth map into the specified depth range.\n with torch.no_grad():\n loss.idepth.data = loss.idepth.data.clamp(idepth_range[0], idepth_range[1])\n\n # Evaluate the loss function at the new depth map and normal map.\n optimizer.zero_grad()\n loss_value, idepth_consistency_value, inormal_consistency_value, regularization_value = loss.forward()\n\n # Without PyTorch tracking, perform some routines.\n with torch.no_grad():\n\n # Store the value of the loss evaluated at the new depth map.\n idepth_consistency_history[i] = idepth_consistency_value\n if inormal_consistency_history is not None:\n inormal_consistency_history[i] = inormal_consistency_value\n regularization_history[i] = regularization_value\n loss_history[i] = loss_value.item()\n\n # Compute the relative depth map change.\n relative_depth_change = torch.norm(\n (idepth_old - loss.idepth).view(-1, 1)) / torch.norm(idepth_old.view(-1, 1))\n\n # Update the lowest encountered minimum.\n if loss_history[i] >= loss_value_min:\n attempt_counter = attempt_counter + 1\n else:\n attempt_counter = 0\n loss_value_min = loss_history[i]\n\n # Evaluate the stopping condition.\n stop_now = (relative_depth_change <= eps_stop) or (attempt_counter >= attempt_max)\n\n if (i % plotting_step == 0) or stop_now or ((i + 1) > iter_max):\n\n # Log the optimization status to the standard output.\n print('Iteration: {:6}, Fails: {:3}, Rel. depth change: {:.6f}, Loss: {:.6f}'.format(\n i, attempt_counter, relative_depth_change, loss_history[i]), flush=True)\n\n # Plot the optimization status.\n indexes = np.arange(i - (plotting_step - 1), i + 1) # The index `i` is included.\n if logger is not None:\n depth_aux = depth_inv2depth(\n loss.idepth.data.to('cpu').squeeze().numpy(), depth_range)\n normal_aux = plane2space_normal(\n depth_aux,\n np.transpose(loss.inormal.data.to('cpu').squeeze().numpy(), (1, 2, 0)),\n (camera_param['f_x'], camera_param['f_y']),\n (camera_param['c_x'], camera_param['c_y']))\n logger.plot(\n depth_refined=depth_aux,\n normal_refined=normal_aux,\n idepth_consistency_loss=(indexes, idepth_consistency_history[indexes]),\n inormal_consistency_loss=((indexes, inormal_consistency_history[indexes])\n if inormal_consistency_history is not None else None),\n regularization_loss=(indexes, regularization_history[indexes]),\n global_loss=(indexes, loss_history[indexes]))\n\n # If the stopping condition is met, terminate.\n if stop_now:\n break\n\n ####################################################################################################################\n ####################################################################################################################\n ####################################################################################################################\n\n # Extract the refined depth map.\n depth_refined = depth_inv2depth(\n loss.idepth.detach().to('cpu').numpy().squeeze(), depth_range)\n\n # Extract the normal map associated to the refined depth map.\n normal_refined = plane2space_normal(\n depth_refined,\n np.transpose(loss.inormal.detach().to('cpu').numpy().squeeze(), (1, 2, 0)),\n (camera_param['f_x'], camera_param['f_y']),\n (camera_param['c_x'], camera_param['c_y']))\n\n return depth_refined, normal_refined\n" ]
[ [ "numpy.max", "torch.device", "torch.optim.lr_scheduler.StepLR", "numpy.zeros", "numpy.percentile", "torch.no_grad", "numpy.min", "numpy.arange", "numpy.transpose", "torch.as_tensor", "torch.nn.functional.pad" ] ]
suryaavala/datasets
[ "bf04ec1ad4b889d05c1dc22bc8eebc2b1ce8ea47" ]
[ "tensorflow_datasets/core/utils/py_utils.py" ]
[ "# coding=utf-8\n# Copyright 2021 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some python utils function and classes.\"\"\"\n\nimport base64\nimport contextlib\nimport functools\nimport io\nimport itertools\nimport logging\nimport os\nimport random\nimport shutil\nimport string\nimport sys\nimport textwrap\nimport threading\nimport typing\nfrom typing import Any, Callable, Iterator, List, NoReturn, Optional, Tuple, Type, TypeVar, Union\nimport uuid\n\nfrom six.moves import urllib\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_datasets.core import constants\nfrom tensorflow_datasets.core import file_adapters\nfrom tensorflow_datasets.core.utils import type_utils\n\nTree = type_utils.Tree\n\n# NOTE: When used on an instance method, the cache is shared across all\n# instances and IS NOT per-instance.\n# See\n# https://stackoverflow.com/questions/14946264/python-lru-cache-decorator-per-instance\n# For @property methods, use @memoized_property below.\nmemoize = functools.lru_cache\n\nT = TypeVar('T')\n\nFn = TypeVar('Fn', bound=Callable[..., Any])\n\n\ndef is_notebook():\n \"\"\"Returns True if running in a notebook (Colab, Jupyter) environment.\"\"\"\n # Inspired from the tqdm autonotebook code\n try:\n # Use sys.module as we do not want to trigger import\n IPython = sys.modules['IPython'] # pylint: disable=invalid-name\n if 'IPKernelApp' not in IPython.get_ipython().config:\n return False # Run in a IPython terminal\n except: # pylint: disable=bare-except\n return False\n else:\n return True\n\n\[email protected]\ndef temporary_assignment(obj, attr, value):\n \"\"\"Temporarily assign obj.attr to value.\"\"\"\n original = getattr(obj, attr)\n setattr(obj, attr, value)\n try:\n yield\n finally:\n setattr(obj, attr, original)\n\n\ndef zip_dict(*dicts):\n \"\"\"Iterate over items of dictionaries grouped by their keys.\"\"\"\n for key in set(itertools.chain(*dicts)): # set merge all keys\n # Will raise KeyError if the dict don't have the same keys\n yield key, tuple(d[key] for d in dicts)\n\n\[email protected]\ndef disable_logging():\n \"\"\"Temporarily disable the logging.\"\"\"\n logger = logging.getLogger()\n logger_disabled = logger.disabled\n logger.disabled = True\n try:\n yield\n finally:\n logger.disabled = logger_disabled\n\n\nclass NonMutableDict(dict):\n \"\"\"Dict where keys can only be added but not modified.\n\n Will raise an error if the user try to overwrite one key. The error message\n can be customized during construction. It will be formatted using {key} for\n the overwritten key.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._error_msg = kwargs.pop(\n 'error_msg',\n 'Try to overwrite existing key: {key}',\n )\n if kwargs:\n raise ValueError('NonMutableDict cannot be initialized with kwargs.')\n super(NonMutableDict, self).__init__(*args, **kwargs)\n\n def __setitem__(self, key, value):\n if key in self:\n raise ValueError(self._error_msg.format(key=key))\n return super(NonMutableDict, self).__setitem__(key, value)\n\n def update(self, other):\n if any(k in self for k in other):\n raise ValueError(self._error_msg.format(key=set(self) & set(other)))\n return super(NonMutableDict, self).update(other)\n\n\nclass classproperty(property): # pylint: disable=invalid-name\n \"\"\"Descriptor to be used as decorator for @classmethods.\"\"\"\n\n def __get__(self, obj, objtype=None):\n return self.fget.__get__(None, objtype)() # pytype: disable=attribute-error\n\n\nclass memoized_property(property): # pylint: disable=invalid-name\n \"\"\"Descriptor that mimics @property but caches output in member variable.\"\"\"\n\n def __get__(self, obj, objtype=None):\n # See https://docs.python.org/3/howto/descriptor.html#properties\n if obj is None:\n return self\n if self.fget is None: # pytype: disable=attribute-error\n raise AttributeError('unreadable attribute')\n attr = '__cached_' + self.fget.__name__ # pytype: disable=attribute-error\n cached = getattr(obj, attr, None)\n if cached is None:\n cached = self.fget(obj) # pytype: disable=attribute-error\n setattr(obj, attr, cached)\n return cached\n\n\nif typing.TYPE_CHECKING:\n # TODO(b/171883689): There is likelly better way to annotate descriptors\n\n def classproperty(fn: Callable[[Type[Any]], T]) -> T: # pylint: disable=function-redefined\n return fn(type(None))\n\n def memoized_property(fn: Callable[[Any], T]) -> T: # pylint: disable=function-redefined\n return fn(None)\n\n\ndef map_nested(function, data_struct, dict_only=False, map_tuple=False):\n \"\"\"Apply a function recursively to each element of a nested data struct.\"\"\"\n\n # Could add support for more exotic data_struct, like OrderedDict\n if isinstance(data_struct, dict):\n return {\n k: map_nested(function, v, dict_only, map_tuple)\n for k, v in data_struct.items()\n }\n elif not dict_only:\n types_ = [list]\n if map_tuple:\n types_.append(tuple)\n if isinstance(data_struct, tuple(types_)):\n mapped = [\n map_nested(function, v, dict_only, map_tuple) for v in data_struct\n ]\n if isinstance(data_struct, list):\n return mapped\n else:\n return tuple(mapped)\n # Singleton\n return function(data_struct)\n\n\ndef zip_nested(arg0, *args, **kwargs):\n \"\"\"Zip data struct together and return a data struct with the same shape.\"\"\"\n # Python 2 do not support kwargs only arguments\n dict_only = kwargs.pop('dict_only', False)\n assert not kwargs\n\n # Could add support for more exotic data_struct, like OrderedDict\n if isinstance(arg0, dict):\n return {\n k: zip_nested(*a, dict_only=dict_only)\n for k, a in zip_dict(arg0, *args)\n }\n elif not dict_only:\n if isinstance(arg0, list):\n return [zip_nested(*a, dict_only=dict_only) for a in zip(arg0, *args)]\n # Singleton\n return (arg0,) + args\n\n\ndef flatten_nest_dict(d):\n \"\"\"Return the dict with all nested keys flattened joined with '/'.\"\"\"\n # Use NonMutableDict to ensure there is no collision between features keys\n flat_dict = NonMutableDict()\n for k, v in d.items():\n if isinstance(v, dict):\n flat_dict.update({\n '{}/{}'.format(k, k2): v2 for k2, v2 in flatten_nest_dict(v).items()\n })\n else:\n flat_dict[k] = v\n return flat_dict\n\n\n# Note: Could use `tree.flatten_with_path` instead, but makes it harder for\n# users to compile from source.\ndef flatten_with_path(\n structure: Tree[T],\n) -> Iterator[Tuple[Tuple[Union[str, int], ...], T]]: # pytype: disable=invalid-annotation\n \"\"\"Convert a TreeDict into a flat list of paths and their values.\n\n ```py\n flatten_with_path({'a': {'b': v}}) == [(('a', 'b'), v)]\n ```\n\n Args:\n structure: Nested input structure\n\n Yields:\n The `(path, value)` tuple. With path being the tuple of `dict` keys and\n `list` indexes\n \"\"\"\n if isinstance(structure, dict):\n key_struct_generator = sorted(structure.items())\n elif isinstance(structure, (list, tuple)):\n key_struct_generator = enumerate(structure)\n else:\n key_struct_generator = None # End of recursion\n\n if key_struct_generator is not None:\n for key, sub_structure in key_struct_generator:\n # Recurse into sub-structures\n for sub_path, sub_value in flatten_with_path(sub_structure):\n yield (key,) + sub_path, sub_value\n else:\n yield (), structure # Leaf, yield value\n\n\ndef dedent(text):\n \"\"\"Wrapper around `textwrap.dedent` which also `strip()` and handle `None`.\"\"\"\n return textwrap.dedent(text).strip() if text else text\n\n\ndef indent(text: str, indent: str) -> str: # pylint: disable=redefined-outer-name\n text = dedent(text)\n return text.replace('\\n', '\\n' + indent)\n\n\ndef pack_as_nest_dict(flat_d, nest_d):\n \"\"\"Pack a 1-lvl dict into a nested dict with same structure as `nest_d`.\"\"\"\n nest_out_d = {}\n for k, v in nest_d.items():\n if isinstance(v, dict):\n v_flat = flatten_nest_dict(v)\n sub_d = {\n k2: flat_d.pop('{}/{}'.format(k, k2)) for k2, _ in v_flat.items()\n }\n # Recursivelly pack the dictionary\n nest_out_d[k] = pack_as_nest_dict(sub_d, v)\n else:\n nest_out_d[k] = flat_d.pop(k)\n if flat_d: # At the end, flat_d should be empty\n raise ValueError(\n 'Flat dict strucure do not match the nested dict. Extra keys: '\n '{}'.format(list(flat_d.keys())))\n return nest_out_d\n\n\[email protected]\ndef nullcontext(enter_result: T = None) -> Iterator[T]:\n \"\"\"Backport of `contextlib.nullcontext`.\"\"\"\n yield enter_result\n\n\ndef _get_incomplete_path(filename):\n \"\"\"Returns a temporary filename based on filename.\"\"\"\n random_suffix = ''.join(\n random.choice(string.ascii_uppercase + string.digits) for _ in range(6))\n return filename + '.incomplete' + random_suffix\n\n\[email protected]\ndef incomplete_dir(dirname: type_utils.PathLike) -> Iterator[str]:\n \"\"\"Create temporary dir for dirname and rename on exit.\"\"\"\n dirname = os.fspath(dirname)\n tmp_dir = _get_incomplete_path(dirname)\n tf.io.gfile.makedirs(tmp_dir)\n try:\n yield tmp_dir\n tf.io.gfile.rename(tmp_dir, dirname)\n finally:\n if tf.io.gfile.exists(tmp_dir):\n tf.io.gfile.rmtree(tmp_dir)\n\n\[email protected]\ndef incomplete_file(\n path: type_utils.ReadWritePath,) -> Iterator[type_utils.ReadWritePath]:\n \"\"\"Writes to path atomically, by writing to temp file and renaming it.\"\"\"\n tmp_path = path.parent / f'{path.name}.incomplete.{uuid.uuid4().hex}'\n try:\n yield tmp_path\n tmp_path.replace(path)\n finally:\n # Eventually delete the tmp_path if exception was raised\n tmp_path.unlink(missing_ok=True)\n\n\[email protected]\ndef atomic_write(path, mode):\n \"\"\"Writes to path atomically, by writing to temp file and renaming it.\"\"\"\n tmp_path = '%s%s_%s' % (path, constants.INCOMPLETE_SUFFIX, uuid.uuid4().hex)\n with tf.io.gfile.GFile(tmp_path, mode) as file_:\n yield file_\n tf.io.gfile.rename(tmp_path, path, overwrite=True)\n\n\ndef reraise(\n e: Exception,\n prefix: Optional[str] = None,\n suffix: Optional[str] = None,\n) -> NoReturn:\n \"\"\"Reraise an exception with an additional message.\"\"\"\n prefix = prefix or ''\n suffix = '\\n' + suffix if suffix else ''\n\n # If unsure about modifying the function inplace, create a new exception\n # and stack it in the chain.\n if (\n # Exceptions with custom error message\n type(e).__str__ is not BaseException.__str__\n # This should never happens unless the user plays with Exception\n # internals\n or not hasattr(e, 'args') or not isinstance(e.args, tuple)):\n msg = f'{prefix}{e}{suffix}'\n # Could try to dynamically create a\n # `type(type(e).__name__, (ReraisedError, type(e)), {})`, but should be\n # carefull when nesting `reraise` as well as compatibility with external\n # code.\n # Some base exception class (ImportError, OSError) and subclasses (\n # ModuleNotFoundError, FileNotFoundError) have custom `__str__` error\n # message. We re-raise those with same type to allow except in caller code.\n if isinstance(e, (ImportError, OSError)):\n exception = type(e)(msg)\n else:\n exception = RuntimeError(f'{type(e).__name__}: {msg}')\n raise exception from e\n # Otherwise, modify the exception in-place\n elif len(e.args) <= 1:\n exception_msg = e.args[0] if e.args else ''\n e.args = (f'{prefix}{exception_msg}{suffix}',)\n raise # pylint: disable=misplaced-bare-raise\n # If there is more than 1 args, concatenate the message with other args\n else:\n e.args = tuple(\n p for p in (prefix,) + e.args + (suffix,)\n if not isinstance(p, str) or p)\n raise # pylint: disable=misplaced-bare-raise\n\n\[email protected]\ndef try_reraise(*args, **kwargs):\n \"\"\"Context manager which reraise exceptions with an additional message.\n\n Contrary to `raise ... from ...` and `raise Exception().with_traceback(tb)`,\n this function tries to modify the original exception, to avoid nested\n `During handling of the above exception, another exception occurred:`\n stacktraces.\n\n Args:\n *args: Prefix to add to the exception message\n **kwargs: Prefix to add to the exception message\n\n Yields:\n None\n \"\"\"\n try:\n yield\n except Exception as e: # pylint: disable=broad-except\n reraise(e, *args, **kwargs)\n\n\ndef rgetattr(obj, attr, *args):\n \"\"\"Get attr that handles dots in attr name.\"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))\n\n\ndef has_sufficient_disk_space(needed_bytes, directory='.'):\n try:\n free_bytes = shutil.disk_usage(os.path.abspath(directory)).free\n except OSError:\n return True\n return needed_bytes < free_bytes\n\n\ndef get_class_path(cls, use_tfds_prefix=True):\n \"\"\"Returns path of given class or object. Eg: `tfds.image.cifar.Cifar10`.\"\"\"\n if not isinstance(cls, type):\n cls = cls.__class__\n module_path = cls.__module__\n if use_tfds_prefix and module_path.startswith('tensorflow_datasets'):\n module_path = 'tfds' + module_path[len('tensorflow_datasets'):]\n return '.'.join([module_path, cls.__name__])\n\n\ndef get_class_url(cls):\n \"\"\"Returns URL of given class or object.\"\"\"\n cls_path = get_class_path(cls, use_tfds_prefix=False)\n module_path, unused_class_name = cls_path.rsplit('.', 1)\n module_path = module_path.replace('.', '/')\n return constants.SRC_BASE_URL + module_path + '.py'\n\n\ndef build_synchronize_decorator() -> Callable[[Fn], Fn]:\n \"\"\"Returns a decorator which prevents concurrent calls to functions.\n\n Usage:\n synchronized = build_synchronize_decorator()\n\n @synchronized\n def read_value():\n ...\n\n @synchronized\n def write_value(x):\n ...\n\n Returns:\n make_threadsafe (fct): The decorator which lock all functions to which it\n is applied under a same lock\n \"\"\"\n lock = threading.Lock()\n\n def lock_decorator(fn: Fn) -> Fn:\n\n @functools.wraps(fn)\n def lock_decorated(*args, **kwargs):\n with lock:\n return fn(*args, **kwargs)\n\n return lock_decorated\n\n return lock_decorator\n\n\ndef basename_from_url(url: str) -> str:\n \"\"\"Returns file name of file at given url.\"\"\"\n filename = urllib.parse.urlparse(url).path\n filename = os.path.basename(filename)\n # Replace `%2F` (html code for `/`) by `_`.\n # This is consistent with how Chrome rename downloaded files.\n filename = filename.replace('%2F', '_')\n return filename or 'unknown_name'\n\n\ndef list_info_files(dir_path: type_utils.PathLike) -> List[str]:\n \"\"\"Returns name of info files within dir_path.\"\"\"\n path = os.fspath(dir_path)\n return [\n fname for fname in tf.io.gfile.listdir(path)\n if not tf.io.gfile.isdir(os.path.join(path, fname)) and\n not file_adapters.is_example_file(fname)\n ]\n\n\ndef get_base64(write_fn: Union[bytes, Callable[[io.BytesIO], None]],) -> str:\n \"\"\"Extracts the base64 string of an object by writing into a tmp buffer.\"\"\"\n if isinstance(write_fn, bytes): # Value already encoded\n bytes_value = write_fn\n else:\n buffer = io.BytesIO()\n write_fn(buffer)\n bytes_value = buffer.getvalue()\n return base64.b64encode(bytes_value).decode('ascii') # pytype: disable=bad-return-type\n\n\[email protected]\ndef add_sys_path(path: type_utils.PathLike) -> Iterator[None]:\n \"\"\"Temporary add given path to `sys.path`.\"\"\"\n path = os.fspath(path)\n try:\n sys.path.insert(0, path)\n yield\n finally:\n sys.path.remove(path)\n" ]
[ [ "tensorflow.compat.v2.io.gfile.makedirs", "tensorflow.compat.v2.io.gfile.exists", "tensorflow.compat.v2.io.gfile.rename", "tensorflow.compat.v2.io.gfile.listdir", "tensorflow.compat.v2.io.gfile.rmtree", "tensorflow.compat.v2.io.gfile.GFile" ] ]
vishalbelsare/crosscat
[ "1f2ac5a43a50ebd7aaa89f0c5ac3815a170848c5" ]
[ "scripts/cython_code/test_multinomial_impute.py" ]
[ "#\n# Copyright (c) 2010-2016, MIT Probabilistic Computing Project\n#\n# Lead Developers: Dan Lovell and Jay Baxter\n# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka\n# Research Leads: Vikash Mansinghka, Patrick Shafto\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport argparse\nimport sys\nfrom collections import Counter\n#\nimport numpy\nimport pylab\npylab.ion()\npylab.show()\n#\nimport crosscat.utils.file_utils as fu\nimport crosscat.utils.sample_utils as su\n\n\n# parse some arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('pkl_name', type=str)\nparser.add_argument('--inf_seed', default=0, type=int)\nargs = parser.parse_args(['/usr/local/crosscat/cython_code/iter_90_pickled_state.pkl.gz'])\npkl_name = args.pkl_name\ninf_seed = args.inf_seed\n\nrandom_state = numpy.random.RandomState(inf_seed)\n# FIXME: getting weird error on conversion to int: too large from inside pyx\ndef get_next_seed(max_val=32767): # sys.maxint):\n return random_state.randint(max_val)\n\n# resume from saved name\nsave_dict = fu.unpickle(pkl_name)\nM_c = save_dict['M_c']\nX_L = save_dict['X_L']\nX_D = save_dict['X_D']\nT = save_dict['T']\nnum_cols = len(X_L['column_partition']['assignments'])\nrow_idx = 205\ncol_idx = 13\nQ = [(row_idx, col_idx)]\nimputed, confidence = su.impute_and_confidence(\n M_c, X_L, X_D, Y=None, Q=Q, n=400, get_next_seed=get_next_seed)\n\nT_array = numpy.array(T)\nwhich_view_idx = X_L['column_partition']['assignments'][col_idx]\nX_D_i = numpy.array(X_D[which_view_idx])\nwhich_cluster_idx = X_D_i[row_idx]\nwhich_rows_match_indices = numpy.nonzero(X_D_i==which_cluster_idx)[0]\ncluster_vals = T_array[which_rows_match_indices, col_idx]\nall_vals = T_array[:, col_idx]\ncluster_counter = Counter(cluster_vals)\ncluster_ratio = float(cluster_counter[imputed]) / sum(cluster_counter.values())\nall_counter = Counter(all_vals)\nall_ratio = float(all_counter[imputed]) / sum(all_counter.values())\nprint()\nprint('imputed: %s' % imputed)\nprint('all_ratio: %s' % all_ratio)\nprint('cluster_ratio: %s' % cluster_ratio)\nprint('confidence: %s' % confidence)\n" ]
[ [ "numpy.array", "numpy.nonzero", "numpy.random.RandomState" ] ]
safinsingh/manim-old
[ "ba403c02853118a7fbb3f17852a158ebef95a97a" ]
[ "manimlib/animation/transform.py" ]
[ "import inspect\n\nimport numpy as np\n\nfrom manimlib.animation.animation import Animation\nfrom manimlib.constants import DEFAULT_POINTWISE_FUNCTION_RUN_TIME\nfrom manimlib.constants import OUT\nfrom manimlib.constants import DEGREES\nfrom manimlib.mobject.mobject import Group\nfrom manimlib.mobject.mobject import Mobject\nfrom manimlib.utils.config_ops import digest_config\nfrom manimlib.utils.paths import path_along_arc\nfrom manimlib.utils.paths import straight_path\nfrom manimlib.utils.rate_functions import smooth\nfrom manimlib.utils.rate_functions import squish_rate_func\n\n\nclass Transform(Animation):\n CONFIG = {\n \"path_arc\": 0,\n \"path_arc_axis\": OUT,\n \"path_func\": None,\n \"replace_mobject_with_target_in_scene\": False,\n }\n\n def __init__(self, mobject, target_mobject=None, **kwargs):\n super().__init__(mobject, **kwargs)\n self.target_mobject = target_mobject\n self.init_path_func()\n\n def init_path_func(self):\n if self.path_func is not None:\n return\n elif self.path_arc == 0:\n self.path_func = straight_path\n else:\n self.path_func = path_along_arc(\n self.path_arc,\n self.path_arc_axis,\n )\n\n def begin(self):\n # Use a copy of target_mobject for the align_data\n # call so that the actual target_mobject stays\n # preserved.\n self.target_mobject = self.create_target()\n self.check_target_mobject_validity()\n self.target_copy = self.target_mobject.copy()\n # Note, this potentially changes the structure\n # of both mobject and target_mobject\n self.mobject.align_data(self.target_copy)\n super().begin()\n\n def create_target(self):\n # Has no meaningful effect here, but may be useful\n # in subclasses\n return self.target_mobject\n\n def check_target_mobject_validity(self):\n if self.target_mobject is None:\n message = \"{}.create_target not properly implemented\"\n raise Exception(\n message.format(self.__class__.__name__)\n )\n\n def clean_up_from_scene(self, scene):\n super().clean_up_from_scene(scene)\n if self.replace_mobject_with_target_in_scene:\n scene.remove(self.mobject)\n scene.add(self.target_mobject)\n\n def update_config(self, **kwargs):\n Animation.update_config(self, **kwargs)\n if \"path_arc\" in kwargs:\n self.path_func = path_along_arc(\n kwargs[\"path_arc\"],\n kwargs.get(\"path_arc_axis\", OUT)\n )\n\n def get_all_mobjects(self):\n return [\n self.mobject,\n self.starting_mobject,\n self.target_mobject,\n self.target_copy,\n ]\n\n def get_all_families_zipped(self):\n return zip(*[\n mob.family_members_with_points()\n for mob in [\n self.mobject,\n self.starting_mobject,\n self.target_copy,\n ]\n ])\n\n def interpolate_submobject(self, submob, start, target_copy, alpha):\n submob.interpolate(\n start, target_copy,\n alpha, self.path_func\n )\n return self\n\n\nclass ReplacementTransform(Transform):\n CONFIG = {\n \"replace_mobject_with_target_in_scene\": True,\n }\n\n\nclass TransformFromCopy(Transform):\n \"\"\"\n Performs a reversed Transform\n \"\"\"\n\n def __init__(self, mobject, target_mobject, **kwargs):\n super().__init__(target_mobject, mobject, **kwargs)\n\n def interpolate(self, alpha):\n super().interpolate(1 - alpha)\n\n\nclass ClockwiseTransform(Transform):\n CONFIG = {\n \"path_arc\": -np.pi\n }\n\n\nclass CounterclockwiseTransform(Transform):\n CONFIG = {\n \"path_arc\": np.pi\n }\n\nclass ReplaceClockwiseTransform(ClockwiseTransform):\n CONFIG = {\n \"replace_mobject_with_target_in_scene\": True,\n }\n\n\nclass ReplaceCounterclockwiseTransform(CounterclockwiseTransform):\n CONFIG = {\n \"replace_mobject_with_target_in_scene\": True,\n }\n\nclass MoveToTarget(Transform):\n def __init__(self, mobject, **kwargs):\n self.check_validity_of_input(mobject)\n super().__init__(mobject, mobject.target, **kwargs)\n\n def check_validity_of_input(self, mobject):\n if not hasattr(mobject, \"target\"):\n raise Exception(\n \"MoveToTarget called on mobject\"\n \"without attribute 'target'\"\n )\n\n\nclass ApplyMethod(Transform):\n def __init__(self, method, *args, **kwargs):\n \"\"\"\n method is a method of Mobject, *args are arguments for\n that method. Key word arguments should be passed in\n as the last arg, as a dict, since **kwargs is for\n configuration of the transform itslef\n\n Relies on the fact that mobject methods return the mobject\n \"\"\"\n self.check_validity_of_input(method)\n self.method = method\n self.method_args = args\n super().__init__(method.__self__, **kwargs)\n\n def check_validity_of_input(self, method):\n if not inspect.ismethod(method):\n raise Exception(\n \"Whoops, looks like you accidentally invoked \"\n \"the method you want to animate\"\n )\n assert(isinstance(method.__self__, Mobject))\n\n def create_target(self):\n method = self.method\n # Make sure it's a list so that args.pop() works\n args = list(self.method_args)\n\n if len(args) > 0 and isinstance(args[-1], dict):\n method_kwargs = args.pop()\n else:\n method_kwargs = {}\n target = method.__self__.copy()\n method.__func__(target, *args, **method_kwargs)\n return target\n\n\nclass ApplyPointwiseFunction(ApplyMethod):\n CONFIG = {\n \"run_time\": DEFAULT_POINTWISE_FUNCTION_RUN_TIME\n }\n\n def __init__(self, function, mobject, **kwargs):\n super().__init__(mobject.apply_function, function, **kwargs)\n\n\nclass ApplyPointwiseFunctionToCenter(ApplyPointwiseFunction):\n def __init__(self, function, mobject, **kwargs):\n self.function = function\n super().__init__(mobject.move_to, **kwargs)\n\n def begin(self):\n self.method_args = [\n self.function(self.mobject.get_center())\n ]\n super().begin()\n\n\nclass FadeToColor(ApplyMethod):\n def __init__(self, mobject, color, **kwargs):\n super().__init__(mobject.set_color, color, **kwargs)\n\n\nclass ScaleInPlace(ApplyMethod):\n def __init__(self, mobject, scale_factor, **kwargs):\n super().__init__(mobject.scale, scale_factor, **kwargs)\n\n\nclass ShrinkToCenter(ScaleInPlace):\n def __init__(self, mobject, **kwargs):\n super().__init__(mobject, 0, **kwargs)\n\n\nclass Restore(ApplyMethod):\n def __init__(self, mobject, **kwargs):\n super().__init__(mobject.restore, **kwargs)\n\n\nclass ApplyFunction(Transform):\n def __init__(self, function, mobject, **kwargs):\n self.function = function\n super().__init__(mobject, **kwargs)\n\n def create_target(self):\n target = self.function(self.mobject.copy())\n if not isinstance(target, Mobject):\n raise Exception(\"Functions passed to ApplyFunction must return object of type Mobject\")\n return target\n\n\nclass ApplyMatrix(ApplyPointwiseFunction):\n def __init__(self, matrix, mobject, **kwargs):\n matrix = self.initialize_matrix(matrix)\n\n def func(p):\n return np.dot(p, matrix.T)\n\n super().__init__(func, mobject, **kwargs)\n\n def initialize_matrix(self, matrix):\n matrix = np.array(matrix)\n if matrix.shape == (2, 2):\n new_matrix = np.identity(3)\n new_matrix[:2, :2] = matrix\n matrix = new_matrix\n elif matrix.shape != (3, 3):\n raise Exception(\"Matrix has bad dimensions\")\n return matrix\n\n\nclass ApplyComplexFunction(ApplyMethod):\n def __init__(self, function, mobject, **kwargs):\n self.function = function\n method = mobject.apply_complex_function\n super().__init__(method, function, **kwargs)\n\n def init_path_func(self):\n func1 = self.function(complex(1))\n self.path_arc = np.log(func1).imag\n super().init_path_func()\n\n###\n\n\nclass CyclicReplace(Transform):\n CONFIG = {\n \"path_arc\": 90 * DEGREES,\n }\n\n def __init__(self, *mobjects, **kwargs):\n self.group = Group(*mobjects)\n super().__init__(self.group, **kwargs)\n\n def create_target(self):\n target = self.group.copy()\n cycled_targets = [target[-1], *target[:-1]]\n for m1, m2 in zip(cycled_targets, self.group):\n m1.move_to(m2)\n return target\n\n\nclass Swap(CyclicReplace):\n pass # Renaming, more understandable for two entries\n\n\n# TODO, this may be depricated...worth reimplementing?\nclass TransformAnimations(Transform):\n CONFIG = {\n \"rate_func\": squish_rate_func(smooth)\n }\n\n def __init__(self, start_anim, end_anim, **kwargs):\n digest_config(self, kwargs, locals())\n if \"run_time\" in kwargs:\n self.run_time = kwargs.pop(\"run_time\")\n else:\n self.run_time = max(start_anim.run_time, end_anim.run_time)\n for anim in start_anim, end_anim:\n anim.set_run_time(self.run_time)\n\n if start_anim.starting_mobject.get_num_points() != end_anim.starting_mobject.get_num_points():\n start_anim.starting_mobject.align_data(end_anim.starting_mobject)\n for anim in start_anim, end_anim:\n if hasattr(anim, \"target_mobject\"):\n anim.starting_mobject.align_data(anim.target_mobject)\n\n Transform.__init__(self, start_anim.mobject,\n end_anim.mobject, **kwargs)\n # Rewire starting and ending mobjects\n start_anim.mobject = self.starting_mobject\n end_anim.mobject = self.target_mobject\n\n def interpolate(self, alpha):\n self.start_anim.interpolate(alpha)\n self.end_anim.interpolate(alpha)\n Transform.interpolate(self, alpha)\n" ]
[ [ "numpy.identity", "numpy.array", "numpy.dot", "numpy.log" ] ]
zhjpqq/Detectron.pytorch
[ "8315af319cd29b8884a7c0382c4700a96bf35bbc" ]
[ "lib/modeling/FPN.py" ]
[ "import collections\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\nfrom core.config import cfg\nimport utils.net as net_utils\nimport modeling.ResNet as ResNet\nfrom modeling.generate_anchors import generate_anchors\nfrom modeling.generate_proposals import GenerateProposalsOp\nfrom modeling.collect_and_distribute_fpn_rpn_proposals import CollectAndDistributeFpnRpnProposalsOp\nimport nn as mynn\n\n# Lowest and highest pyramid levels in the backbone network. For FPN, we assume\n# that all networks have 5 spatial reductions, each by a factor of 2. Level 1\n# would correspond to the input image, hence it does not make sense to use it.\nLOWEST_BACKBONE_LVL = 2 # E.g., \"conv2\"-like level\nHIGHEST_BACKBONE_LVL = 5 # E.g., \"conv5\"-like level\n\n\n# ---------------------------------------------------------------------------- #\n# FPN with ResNet\n# ---------------------------------------------------------------------------- #\n\ndef fpn_ResNet50_conv5_body():\n return fpn(\n ResNet.ResNet50_conv5_body, fpn_level_info_ResNet50_conv5()\n )\n\n\ndef fpn_ResNet50_conv5_P2only_body():\n return fpn(\n ResNet.ResNet50_conv5_body,\n fpn_level_info_ResNet50_conv5(),\n P2only=True\n )\n\n\ndef fpn_ResNet101_conv5_body():\n return fpn(\n ResNet.ResNet101_conv5_body, fpn_level_info_ResNet101_conv5()\n )\n\n\ndef fpn_ResNet101_conv5_P2only_body():\n return fpn(\n ResNet.ResNet101_conv5_body,\n fpn_level_info_ResNet101_conv5(),\n P2only=True\n )\n\n\ndef fpn_ResNet152_conv5_body():\n return fpn(\n ResNet.ResNet152_conv5_body, fpn_level_info_ResNet152_conv5()\n )\n\n\ndef fpn_ResNet152_conv5_P2only_body():\n return fpn(\n ResNet.ResNet152_conv5_body,\n fpn_level_info_ResNet152_conv5(),\n P2only=True\n )\n\n\n# ---------------------------------------------------------------------------- #\n# Functions for bolting FPN onto a backbone architectures\n# ---------------------------------------------------------------------------- #\nclass fpn(nn.Module):\n \"\"\"Add FPN connections based on the model described in the FPN paper.\n\n fpn_output_blobs is in reversed order: e.g [fpn5, fpn4, fpn3, fpn2]\n similarly for fpn_level_info.dims: e.g [2048, 1024, 512, 256]\n similarly for spatial_scale: e.g [1/32, 1/16, 1/8, 1/4]\n \"\"\"\n def __init__(self, conv_body_func, fpn_level_info, P2only=False):\n super().__init__()\n self.fpn_level_info = fpn_level_info\n self.P2only = P2only\n\n self.dim_out = fpn_dim = cfg.FPN.DIM\n min_level, max_level = get_min_max_levels()\n self.num_backbone_stages = len(fpn_level_info.blobs) - (min_level - LOWEST_BACKBONE_LVL)\n fpn_dim_lateral = fpn_level_info.dims\n self.spatial_scale = [] # a list of scales for FPN outputs\n\n #\n # Step 1: recursively build down starting from the coarsest backbone level\n #\n # For the coarest backbone level: 1x1 conv only seeds recursion\n self.conv_top = nn.Conv2d(fpn_dim_lateral[0], fpn_dim, 1, 1, 0)\n if cfg.FPN.USE_GN:\n self.conv_top = nn.Sequential(\n nn.Conv2d(fpn_dim_lateral[0], fpn_dim, 1, 1, 0, bias=False),\n nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,\n eps=cfg.GROUP_NORM.EPSILON)\n )\n else:\n self.conv_top = nn.Conv2d(fpn_dim_lateral[0], fpn_dim, 1, 1, 0)\n self.topdown_lateral_modules = nn.ModuleList()\n self.posthoc_modules = nn.ModuleList()\n\n # For other levels add top-down and lateral connections\n for i in range(self.num_backbone_stages - 1):\n self.topdown_lateral_modules.append(\n topdown_lateral_module(fpn_dim, fpn_dim_lateral[i+1])\n )\n\n # Post-hoc scale-specific 3x3 convs\n for i in range(self.num_backbone_stages):\n if cfg.FPN.USE_GN:\n self.posthoc_modules.append(nn.Sequential(\n nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1, bias=False),\n nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,\n eps=cfg.GROUP_NORM.EPSILON)\n ))\n else:\n self.posthoc_modules.append(\n nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1)\n )\n\n self.spatial_scale.append(fpn_level_info.spatial_scales[i])\n\n #\n # Step 2: build up starting from the coarsest backbone level\n #\n # Check if we need the P6 feature map\n if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_BACKBONE_LVL + 1:\n # Original FPN P6 level implementation from our CVPR'17 FPN paper\n # Use max pooling to simulate stride 2 subsampling\n self.maxpool_p6 = nn.MaxPool2d(kernel_size=1, stride=2, padding=0)\n self.spatial_scale.insert(0, self.spatial_scale[0] * 0.5)\n\n # Coarser FPN levels introduced for RetinaNet\n if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_BACKBONE_LVL:\n self.extra_pyramid_modules = nn.ModuleList()\n dim_in = fpn_level_info.dims[0]\n for i in range(HIGHEST_BACKBONE_LVL + 1, max_level + 1):\n self.extra_pyramid_modules(\n nn.Conv2d(dim_in, fpn_dim, 3, 2, 1)\n )\n dim_in = fpn_dim\n self.spatial_scale.insert(0, self.spatial_scale[0] * 0.5)\n\n if self.P2only:\n # use only the finest level\n self.spatial_scale = self.spatial_scale[-1]\n\n self._init_weights()\n\n # Deliberately add conv_body after _init_weights.\n # conv_body has its own _init_weights function\n self.conv_body = conv_body_func() # e.g resnet\n\n def _init_weights(self):\n def init_func(m):\n if isinstance(m, nn.Conv2d):\n mynn.init.XavierFill(m.weight)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n for child_m in self.children():\n if (not isinstance(child_m, nn.ModuleList) or\n not isinstance(child_m[0], topdown_lateral_module)):\n # topdown_lateral_module has its own init method\n child_m.apply(init_func)\n\n def detectron_weight_mapping(self):\n conv_body_mapping, orphan_in_detectron = self.conv_body.detectron_weight_mapping()\n mapping_to_detectron = {}\n for key, value in conv_body_mapping.items():\n mapping_to_detectron['conv_body.'+key] = value\n\n d_prefix = 'fpn_inner_' + self.fpn_level_info.blobs[0]\n if cfg.FPN.USE_GN:\n mapping_to_detectron['conv_top.0.weight'] = d_prefix + '_w'\n mapping_to_detectron['conv_top.1.weight'] = d_prefix + '_gn_s'\n mapping_to_detectron['conv_top.1.bias'] = d_prefix + '_gn_b'\n else:\n mapping_to_detectron['conv_top.weight'] = d_prefix + '_w'\n mapping_to_detectron['conv_top.bias'] = d_prefix + '_b'\n for i in range(self.num_backbone_stages - 1):\n p_prefix = 'topdown_lateral_modules.%d.conv_lateral' % i\n d_prefix = 'fpn_inner_' + self.fpn_level_info.blobs[i+1] + '_lateral'\n if cfg.FPN.USE_GN:\n mapping_to_detectron.update({\n p_prefix + '.0.weight' : d_prefix + '_w',\n p_prefix + '.1.weight' : d_prefix + '_gn_s',\n p_prefix + '.1.bias': d_prefix + '_gn_b'\n })\n else:\n mapping_to_detectron.update({\n p_prefix + '.weight' : d_prefix + '_w',\n p_prefix + '.bias': d_prefix + '_b'\n })\n\n for i in range(self.num_backbone_stages):\n p_prefix = 'posthoc_modules.%d' % i\n d_prefix = 'fpn_' + self.fpn_level_info.blobs[i]\n if cfg.FPN.USE_GN:\n mapping_to_detectron.update({\n p_prefix + '.0.weight': d_prefix + '_w',\n p_prefix + '.1.weight': d_prefix + '_gn_s',\n p_prefix + '.1.bias': d_prefix + '_gn_b'\n })\n else:\n mapping_to_detectron.update({\n p_prefix + '.weight': d_prefix + '_w',\n p_prefix + '.bias': d_prefix + '_b'\n })\n\n if hasattr(self, 'extra_pyramid_modules'):\n for i in len(self.extra_pyramid_modules):\n p_prefix = 'extra_pyramid_modules.%d' % i\n d_prefix = 'fpn_%d' % (HIGHEST_BACKBONE_LVL + 1 + i)\n mapping_to_detectron.update({\n p_prefix + '.weight': d_prefix + '_w',\n p_prefix + '.bias': d_prefix + '_b'\n })\n\n return mapping_to_detectron, orphan_in_detectron\n\n def forward(self, x):\n conv_body_blobs = [self.conv_body.res1(x)]\n for i in range(1, self.conv_body.convX):\n conv_body_blobs.append(\n getattr(self.conv_body, 'res%d' % (i+1))(conv_body_blobs[-1])\n )\n fpn_inner_blobs = [self.conv_top(conv_body_blobs[-1])]\n for i in range(self.num_backbone_stages - 1):\n fpn_inner_blobs.append(\n self.topdown_lateral_modules[i](fpn_inner_blobs[-1], conv_body_blobs[-(i+2)])\n )\n fpn_output_blobs = []\n for i in range(self.num_backbone_stages):\n fpn_output_blobs.append(\n self.posthoc_modules[i](fpn_inner_blobs[i])\n )\n\n if hasattr(self, 'maxpool_p6'):\n fpn_output_blobs.insert(0, self.maxpool_p6(fpn_output_blobs[0]))\n\n if hasattr(self, 'extra_pyramid_modules'):\n blob_in = conv_body_blobs[-1]\n fpn_output_blobs.insert(0, self.extra_pyramid_modules(blob_in))\n for module in self.extra_pyramid_modules[1:]:\n fpn_output_blobs.insert(0, module(F.relu(fpn_output_blobs[0], inplace=True)))\n\n if self.P2only:\n # use only the finest level\n return fpn_output_blobs[-1]\n else:\n # use all levels\n return fpn_output_blobs\n\n\nclass topdown_lateral_module(nn.Module):\n \"\"\"Add a top-down lateral module.\"\"\"\n def __init__(self, dim_in_top, dim_in_lateral):\n super().__init__()\n self.dim_in_top = dim_in_top\n self.dim_in_lateral = dim_in_lateral\n self.dim_out = dim_in_top\n if cfg.FPN.USE_GN:\n self.conv_lateral = nn.Sequential(\n nn.Conv2d(dim_in_lateral, self.dim_out, 1, 1, 0, bias=False),\n nn.GroupNorm(net_utils.get_group_gn(self.dim_out), self.dim_out,\n eps=cfg.GROUP_NORM.EPSILON)\n )\n else:\n self.conv_lateral = nn.Conv2d(dim_in_lateral, self.dim_out, 1, 1, 0)\n\n self._init_weights()\n\n def _init_weights(self):\n if cfg.FPN.USE_GN:\n conv = self.conv_lateral[0]\n else:\n conv = self.conv_lateral\n\n if cfg.FPN.ZERO_INIT_LATERAL:\n init.constant_(conv.weight, 0)\n else:\n mynn.init.XavierFill(conv.weight)\n if conv.bias is not None:\n init.constant_(conv.bias, 0)\n\n def forward(self, top_blob, lateral_blob):\n # Lateral 1x1 conv\n lat = self.conv_lateral(lateral_blob)\n # Top-down 2x upsampling\n # td = F.upsample(top_blob, size=lat.size()[2:], mode='bilinear')\n td = F.upsample(top_blob, scale_factor=2, mode='nearest')\n # Sum lateral and top-down\n return lat + td\n\n\ndef get_min_max_levels():\n \"\"\"The min and max FPN levels required for supporting RPN and/or RoI\n transform operations on multiple FPN levels.\n \"\"\"\n min_level = LOWEST_BACKBONE_LVL\n max_level = HIGHEST_BACKBONE_LVL\n if cfg.FPN.MULTILEVEL_RPN and not cfg.FPN.MULTILEVEL_ROIS:\n max_level = cfg.FPN.RPN_MAX_LEVEL\n min_level = cfg.FPN.RPN_MIN_LEVEL\n if not cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:\n max_level = cfg.FPN.ROI_MAX_LEVEL\n min_level = cfg.FPN.ROI_MIN_LEVEL\n if cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:\n max_level = max(cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.ROI_MAX_LEVEL)\n min_level = min(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.ROI_MIN_LEVEL)\n return min_level, max_level\n\n\n# ---------------------------------------------------------------------------- #\n# RPN with an FPN backbone\n# ---------------------------------------------------------------------------- #\n\nclass fpn_rpn_outputs(nn.Module):\n \"\"\"Add RPN on FPN specific outputs.\"\"\"\n def __init__(self, dim_in, spatial_scales):\n super().__init__()\n self.dim_in = dim_in\n self.spatial_scales = spatial_scales\n self.dim_out = self.dim_in\n num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)\n\n # Create conv ops shared by all FPN levels\n self.FPN_RPN_conv = nn.Conv2d(dim_in, self.dim_out, 3, 1, 1)\n dim_score = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \\\n else num_anchors\n self.FPN_RPN_cls_score = nn.Conv2d(self.dim_out, dim_score, 1, 1, 0)\n self.FPN_RPN_bbox_pred = nn.Conv2d(self.dim_out, 4 * num_anchors, 1, 1, 0)\n\n self.GenerateProposals_modules = nn.ModuleList()\n k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid\n k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid\n for lvl in range(k_min, k_max + 1):\n sc = self.spatial_scales[k_max - lvl] # in reversed order\n lvl_anchors = generate_anchors(\n stride=2.**lvl,\n sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), ),\n aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS\n )\n self.GenerateProposals_modules.append(GenerateProposalsOp(lvl_anchors, sc))\n\n self.CollectAndDistributeFpnRpnProposals = CollectAndDistributeFpnRpnProposalsOp()\n\n self._init_weights()\n\n def _init_weights(self):\n init.normal_(self.FPN_RPN_conv.weight, std=0.01)\n init.constant_(self.FPN_RPN_conv.bias, 0)\n init.normal_(self.FPN_RPN_cls_score.weight, std=0.01)\n init.constant_(self.FPN_RPN_cls_score.bias, 0)\n init.normal_(self.FPN_RPN_bbox_pred.weight, std=0.01)\n init.constant_(self.FPN_RPN_bbox_pred.bias, 0)\n\n def detectron_weight_mapping(self):\n k_min = cfg.FPN.RPN_MIN_LEVEL\n mapping_to_detectron = {\n 'FPN_RPN_conv.weight': 'conv_rpn_fpn%d_w' % k_min,\n 'FPN_RPN_conv.bias': 'conv_rpn_fpn%d_b' % k_min,\n 'FPN_RPN_cls_score.weight': 'rpn_cls_logits_fpn%d_w' % k_min,\n 'FPN_RPN_cls_score.bias': 'rpn_cls_logits_fpn%d_b' % k_min,\n 'FPN_RPN_bbox_pred.weight': 'rpn_bbox_pred_fpn%d_w' % k_min,\n 'FPN_RPN_bbox_pred.bias': 'rpn_bbox_pred_fpn%d_b' % k_min\n }\n return mapping_to_detectron, []\n\n def forward(self, blobs_in, im_info, roidb=None):\n k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid\n k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid\n assert len(blobs_in) == k_max - k_min + 1\n return_dict = {}\n rois_blobs = []\n score_blobs = []\n for lvl in range(k_min, k_max + 1):\n slvl = str(lvl)\n bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order\n\n fpn_rpn_conv = F.relu(self.FPN_RPN_conv(bl_in), inplace=True)\n fpn_rpn_cls_score = self.FPN_RPN_cls_score(fpn_rpn_conv)\n fpn_rpn_bbox_pred = self.FPN_RPN_bbox_pred(fpn_rpn_conv)\n return_dict['rpn_cls_logits_fpn' + slvl] = fpn_rpn_cls_score\n return_dict['rpn_bbox_pred_fpn' + slvl] = fpn_rpn_bbox_pred\n\n if not self.training or cfg.MODEL.FASTER_RCNN:\n # Proposals are needed during:\n # 1) inference (== not model.train) for RPN only and Faster R-CNN\n # OR\n # 2) training for Faster R-CNN\n # Otherwise (== training for RPN only), proposals are not needed\n if cfg.RPN.CLS_ACTIVATION == 'softmax':\n B, C, H, W = fpn_rpn_cls_score.size()\n fpn_rpn_cls_probs = F.softmax(\n fpn_rpn_cls_score.view(B, 2, C // 2, H, W), dim=1)\n fpn_rpn_cls_probs = fpn_rpn_cls_probs[:, 1].squeeze(dim=1)\n else: # sigmoid\n fpn_rpn_cls_probs = F.sigmoid(fpn_rpn_cls_score)\n\n fpn_rpn_rois, fpn_rpn_roi_probs = self.GenerateProposals_modules[lvl - k_min](\n fpn_rpn_cls_probs, fpn_rpn_bbox_pred, im_info)\n rois_blobs.append(fpn_rpn_rois)\n score_blobs.append(fpn_rpn_roi_probs)\n return_dict['rpn_rois_fpn' + slvl] = fpn_rpn_rois\n return_dict['rpn_rois_prob_fpn' + slvl] = fpn_rpn_roi_probs\n\n if cfg.MODEL.FASTER_RCNN:\n # CollectAndDistributeFpnRpnProposals also labels proposals when in training mode\n blobs_out = self.CollectAndDistributeFpnRpnProposals(rois_blobs + score_blobs, roidb, im_info)\n return_dict.update(blobs_out)\n\n return return_dict\n\n\ndef fpn_rpn_losses(**kwargs):\n \"\"\"Add RPN on FPN specific losses.\"\"\"\n losses_cls = []\n losses_bbox = []\n for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):\n slvl = str(lvl)\n # Spatially narrow the full-sized RPN label arrays to match the feature map shape\n b, c, h, w = kwargs['rpn_cls_logits_fpn' + slvl].shape\n rpn_labels_int32_fpn = kwargs['rpn_labels_int32_wide_fpn' + slvl][:, :, :h, :w]\n h, w = kwargs['rpn_bbox_pred_fpn' + slvl].shape[2:]\n rpn_bbox_targets_fpn = kwargs['rpn_bbox_targets_wide_fpn' + slvl][:, :, :h, :w]\n rpn_bbox_inside_weights_fpn = kwargs[\n 'rpn_bbox_inside_weights_wide_fpn' + slvl][:, :, :h, :w]\n rpn_bbox_outside_weights_fpn = kwargs[\n 'rpn_bbox_outside_weights_wide_fpn' + slvl][:, :, :h, :w]\n\n if cfg.RPN.CLS_ACTIVATION == 'softmax':\n rpn_cls_logits_fpn = kwargs['rpn_cls_logits_fpn' + slvl].view(\n b, 2, c // 2, h, w).permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)\n rpn_labels_int32_fpn = rpn_labels_int32_fpn.contiguous().view(-1).long()\n # the loss is averaged over non-ignored targets\n loss_rpn_cls_fpn = F.cross_entropy(\n rpn_cls_logits_fpn, rpn_labels_int32_fpn, ignore_index=-1)\n else: # sigmoid\n weight = (rpn_labels_int32_fpn >= 0).float()\n loss_rpn_cls_fpn = F.binary_cross_entropy_with_logits(\n kwargs['rpn_cls_logits_fpn' + slvl], rpn_labels_int32_fpn.float(), weight,\n size_average=False)\n loss_rpn_cls_fpn /= cfg.TRAIN.RPN_BATCH_SIZE_PER_IM * cfg.TRAIN.IMS_PER_BATCH\n\n # Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is\n # handled by (1) setting bbox outside weights and (2) SmoothL1Loss\n # normalizes by IMS_PER_BATCH\n loss_rpn_bbox_fpn = net_utils.smooth_l1_loss(\n kwargs['rpn_bbox_pred_fpn' + slvl], rpn_bbox_targets_fpn,\n rpn_bbox_inside_weights_fpn, rpn_bbox_outside_weights_fpn,\n beta=1/9)\n\n losses_cls.append(loss_rpn_cls_fpn)\n losses_bbox.append(loss_rpn_bbox_fpn)\n\n return losses_cls, losses_bbox\n\n\n# ---------------------------------------------------------------------------- #\n# FPN level info for stages 5, 4, 3, 2 for select models (more can be added)\n# ---------------------------------------------------------------------------- #\n\nFpnLevelInfo = collections.namedtuple(\n 'FpnLevelInfo',\n ['blobs', 'dims', 'spatial_scales']\n)\n\n\ndef fpn_level_info_ResNet50_conv5():\n return FpnLevelInfo(\n blobs=('res5_2_sum', 'res4_5_sum', 'res3_3_sum', 'res2_2_sum'),\n dims=(2048, 1024, 512, 256),\n spatial_scales=(1. / 32., 1. / 16., 1. / 8., 1. / 4.)\n )\n\n\ndef fpn_level_info_ResNet101_conv5():\n return FpnLevelInfo(\n blobs=('res5_2_sum', 'res4_22_sum', 'res3_3_sum', 'res2_2_sum'),\n dims=(2048, 1024, 512, 256),\n spatial_scales=(1. / 32., 1. / 16., 1. / 8., 1. / 4.)\n )\n\n\ndef fpn_level_info_ResNet152_conv5():\n return FpnLevelInfo(\n blobs=('res5_2_sum', 'res4_35_sum', 'res3_7_sum', 'res2_2_sum'),\n dims=(2048, 1024, 512, 256),\n spatial_scales=(1. / 32., 1. / 16., 1. / 8., 1. / 4.)\n )\n" ]
[ [ "torch.nn.functional.sigmoid", "torch.nn.functional.upsample", "torch.nn.ModuleList", "torch.nn.init.constant_", "torch.nn.MaxPool2d", "torch.nn.Conv2d", "torch.nn.init.normal_", "torch.nn.functional.cross_entropy", "torch.nn.functional.relu" ] ]
bradkav/DarkAxionPortal
[ "5716e0684cf0f7e84f0a4de00a37734deff71d7b" ]
[ "code/PlotKineticMixingProjections.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport PlotFuncs_DarkPhoton as PF\n\nimport DarkAxionPortal\nfrom Units import *\n\nimport os\nimport argparse\n\nrootdir = os.path.dirname(os.path.abspath(__file__)) + \"/\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-f_dp\", \"--f_dp\",help = \"DM fraction in Dark Photons: f_dp = [0, 1]\", type=float, default=0.1)\nargs = parser.parse_args()\nf_dp = args.f_dp\n\nDAP = DarkAxionPortal.Model(PQ_Phi = 1.0, e_D = 0.1, D_psi = 3)\n \ndef DAP_projections(f_dp=1.0):\n A_Si=28.0855 #Atomic mass of Silicon\n N_a=6.02214076e23 #Avogadro\n density=f_dp*4.5e8 #local energy-density of dark matter= 0.45 GeV/cm^3\n \n #Cross section in Mbarns\n PECross = np.loadtxt(rootdir + '../data/Si_PhotoelectricAbsorptionCrossSection.txt')\n PECross[:,1]=PECross[:,1]*10**-15*N_a/A_Si #To put it in cm^2 kg^-1\n \n BG_LBC = 3\n BG_DAMIC_M = 0.1\n \n y2 = ax.get_ylim()[1]\n plt.fill_between(PECross[:,0],np.sqrt((BG_LBC/((density/PECross[:,0])*PECross[:,1]*2.5902e15*5))),y2=y2,edgecolor='k',linewidth=2.5, linestyle=\":\", facecolor=\"sandybrown\",zorder=0.01, alpha=0.75)\n plt.fill_between(PECross[:,0],np.sqrt((BG_DAMIC_M/((density/PECross[:,0])*PECross[:,1]*2.5902e15*5))),y2=y2,edgecolor='k',linewidth=2.5, linestyle=\"-.\", facecolor=\"saddlebrown\",zorder=0.001, alpha=0.75)\n \n plt.text(1.0,4e-16/np.sqrt(f_dp),r'{\\bf LBC}'+'\\n'+'(This Work)',fontsize=21,color=\"sandybrown\",rotation=0,rotation_mode='anchor',ha='center',va='center') \n plt.text(2.5,1e-17/np.sqrt(f_dp),r'{\\bf DAMIC-M}'+'\\n'+'(This Work)',fontsize=21,color=\"saddlebrown\",rotation=0,rotation_mode='anchor',ha='center',va='center')\n plt.plot([3e0,8],[6e-16/np.sqrt(f_dp),8e-16/np.sqrt(f_dp)],'k-',lw=2.5,color=\"sandybrown\")\n plt.plot([7e0,9],[3e-17/np.sqrt(f_dp),5e-17/np.sqrt(f_dp)],'k-',lw=2.5,color=\"saddlebrown\")\n\n \n \n#--------------------------------------------------------------------------------------\n\nfig,ax = PF.FigSetup(Shape=\"Square\", chi_max = 1e-8, m_min = 1e-2, lfs=40, tfs=35, upper_xlabel=r\"Easter Egg\")\n\n# DPDM\n#DarkMatter(ax)\n\n# Axion haloscopes\n#Haloscopes(ax)\n\n# LSW/Helioscopes\n#LSW(ax)\nPF.CAST(ax, text_on=False)\nplt.text(1e3*(1-0.01),1e-9*(1+0.08),r'{\\bf CAST}',fontsize=27,color='k',rotation=0,rotation_mode='anchor',ha='center',va='center') \nplt.text(1e3,1e-9,r'{\\bf CAST}',fontsize=27,color='w',rotation=0,rotation_mode='anchor',ha='center',va='center')\n\nPF.SHIPS(ax)\n\n# Tests of coulomb law\n#Coulomb(ax)\n\n# Reactor neutrinos\n#TEXONO(ax)\n\n\n\n# DPDM searches\nPF.Xenon(ax, f_DP=f_dp, text_on=False)\nplt.text(8e2,1e-16,r'{\\bf XENON}',fontsize=22,color='crimson',rotation=0,rotation_mode='anchor',ha='center',va='center')\n\nPF.DAMIC(ax, text_on = False, f_DP=f_dp)\nplt.text(16, 0.4e-13/np.sqrt(f_dp), \"DAMIC\", fontsize=20, rotation=-90)\nPF.SENSEI(ax, f_DP=f_dp, text_on = False)\n\nplt.text(1e0,3e-14, \"SENSEI\", fontsize=20, rotation=0, color=\"Firebrick\")\nplt.plot([5e0,8e0],[3.8e-14,5.3e-14],'k-',lw=2.5,color=\"Firebrick\")\n\n\n\nPF.FUNK(ax, text_on = False, f_DP=f_dp)\nplt.text(2, 3e-12/np.sqrt(f_dp), \"FUNK\", fontsize=20, rotation=-90)\n#Tokyo(ax)\n#SHUKET(ax)\n#DarkEfield(ax)\n#WISPDMX(ax)\n#SQuAD(ax)\n#DMPathfinder(ax)\n\n# Astrophysical bounds\nPF.StellarBounds(ax, Higgsed=True, e_D=DAP.e_D)\n#COBEFIRAS(ax)\n#Jupiter(ax)\n#Earth(ax)\n#Crab(ax)\n#IGM(ax)\n#LeoT(ax)\n\n#KineticMixing_production()\n#Decay()\nDAP_projections(f_dp)\n\nplt.gcf().text(0.89,0.13,r'$f_{\\gamma^\\prime} = '+ str(int(f_dp*100)) + r'\\%; \\,\\,\\rho_0 = 0.45$ GeV cm$^{-3}$',fontsize=25,ha='right',va='bottom')\n\nPF.MySaveFig(fig,'DarkPhoton_KineticMixing_Projections_fDP_'+str(int(f_dp*100)) + 'pct')" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.plot", "numpy.loadtxt", "matplotlib.pyplot.gcf", "numpy.sqrt" ] ]
Mascariddu/vision
[ "d4d15445ecab3365c26498695861c9e773b86284" ]
[ "test/test_onnx.py" ]
[ "import io\nimport torch\nfrom torchvision import ops\nfrom torchvision import models\nfrom torchvision.models.detection.image_list import ImageList\nfrom torchvision.models.detection.transform import GeneralizedRCNNTransform\nfrom torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork\nfrom torchvision.models.detection.backbone_utils import resnet_fpn_backbone\nfrom torchvision.models.detection.roi_heads import RoIHeads\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNHeads, MaskRCNNPredictor\n\nfrom collections import OrderedDict\n\n# onnxruntime requires python 3.5 or above\ntry:\n import onnxruntime\nexcept ImportError:\n onnxruntime = None\n\nimport unittest\nfrom torchvision.ops._register_onnx_ops import _onnx_opset_version\n\n\[email protected](onnxruntime is None, 'ONNX Runtime unavailable')\nclass ONNXExporterTester(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n torch.manual_seed(123)\n\n def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None,\n output_names=None, input_names=None):\n model.eval()\n\n onnx_io = io.BytesIO()\n # export to onnx with the first input\n torch.onnx.export(model, inputs_list[0], onnx_io,\n do_constant_folding=do_constant_folding, opset_version=_onnx_opset_version,\n dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names)\n # validate the exported model with onnx runtime\n for test_inputs in inputs_list:\n with torch.no_grad():\n if isinstance(test_inputs, torch.Tensor) or \\\n isinstance(test_inputs, list):\n test_inputs = (test_inputs,)\n test_ouputs = model(*test_inputs)\n if isinstance(test_ouputs, torch.Tensor):\n test_ouputs = (test_ouputs,)\n self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch)\n\n def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False):\n\n inputs, _ = torch.jit._flatten(inputs)\n outputs, _ = torch.jit._flatten(outputs)\n\n def to_numpy(tensor):\n if tensor.requires_grad:\n return tensor.detach().cpu().numpy()\n else:\n return tensor.cpu().numpy()\n\n inputs = list(map(to_numpy, inputs))\n outputs = list(map(to_numpy, outputs))\n\n ort_session = onnxruntime.InferenceSession(onnx_io.getvalue())\n # compute onnxruntime output prediction\n ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs))\n ort_outs = ort_session.run(None, ort_inputs)\n for i in range(0, len(outputs)):\n try:\n torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05)\n except AssertionError as error:\n if tolerate_small_mismatch:\n self.assertIn(\"(0.00%)\", str(error), str(error))\n else:\n raise\n\n @unittest.skip(\"Disable test until Split w/ zero sizes is implemented in ORT\")\n def test_new_empty_tensor(self):\n class Module(torch.nn.Module):\n def __init__(self):\n super(Module, self).__init__()\n self.conv2 = ops.misc.ConvTranspose2d(16, 33, (3, 5))\n\n def forward(self, input2):\n return self.conv2(input2)\n\n input = torch.rand(0, 16, 10, 10)\n test_input = torch.rand(0, 16, 20, 20)\n self.run_model(Module(), [(input, ), (test_input,)], do_constant_folding=False)\n\n def test_nms(self):\n boxes = torch.rand(5, 4)\n boxes[:, 2:] += torch.rand(5, 2)\n scores = torch.randn(5)\n\n class Module(torch.nn.Module):\n def forward(self, boxes, scores):\n return ops.nms(boxes, scores, 0.5)\n\n self.run_model(Module(), [(boxes, scores)])\n\n def test_clip_boxes_to_image(self):\n boxes = torch.randn(5, 4) * 500\n boxes[:, 2:] += boxes[:, :2]\n size = torch.randn(200, 300)\n\n size_2 = torch.randn(300, 400)\n\n class Module(torch.nn.Module):\n def forward(self, boxes, size):\n return ops.boxes.clip_boxes_to_image(boxes, size.shape)\n\n self.run_model(Module(), [(boxes, size), (boxes, size_2)],\n input_names=[\"boxes\", \"size\"],\n dynamic_axes={\"size\": [0, 1]})\n\n def test_roi_align(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)\n model = ops.RoIAlign((5, 5), 1, 2)\n self.run_model(model, [(x, single_roi)])\n\n def test_roi_pool(self):\n x = torch.rand(1, 1, 10, 10, dtype=torch.float32)\n rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32)\n pool_h = 5\n pool_w = 5\n model = ops.RoIPool((pool_h, pool_w), 2)\n self.run_model(model, [(x, rois)])\n\n def test_resize_images(self):\n class TransformModule(torch.nn.Module):\n def __init__(self_module):\n super(TransformModule, self_module).__init__()\n self_module.transform = self._init_test_generalized_rcnn_transform()\n\n def forward(self_module, images):\n return self_module.transform.resize(images, None)[0]\n\n input = torch.rand(3, 10, 20)\n input_test = torch.rand(3, 100, 150)\n self.run_model(TransformModule(), [(input,), (input_test,)],\n input_names=[\"input1\"], dynamic_axes={\"input1\": [0, 1, 2, 3]})\n\n def test_transform_images(self):\n\n class TransformModule(torch.nn.Module):\n def __init__(self_module):\n super(TransformModule, self_module).__init__()\n self_module.transform = self._init_test_generalized_rcnn_transform()\n\n def forward(self_module, images):\n return self_module.transform(images)[0].tensors\n\n input = torch.rand(3, 100, 200), torch.rand(3, 200, 200)\n input_test = torch.rand(3, 100, 200), torch.rand(3, 200, 200)\n self.run_model(TransformModule(), [(input,), (input_test,)])\n\n def _init_test_generalized_rcnn_transform(self):\n min_size = 100\n max_size = 200\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n return transform\n\n def _init_test_rpn(self):\n anchor_sizes = ((32,), (64,), (128,), (256,), (512,))\n aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)\n out_channels = 256\n rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])\n rpn_fg_iou_thresh = 0.7\n rpn_bg_iou_thresh = 0.3\n rpn_batch_size_per_image = 256\n rpn_positive_fraction = 0.5\n rpn_pre_nms_top_n = dict(training=2000, testing=1000)\n rpn_post_nms_top_n = dict(training=2000, testing=1000)\n rpn_nms_thresh = 0.7\n\n rpn = RegionProposalNetwork(\n rpn_anchor_generator, rpn_head,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh)\n return rpn\n\n def _init_test_roi_heads_faster_rcnn(self):\n out_channels = 256\n num_classes = 91\n\n box_fg_iou_thresh = 0.5\n box_bg_iou_thresh = 0.5\n box_batch_size_per_image = 512\n box_positive_fraction = 0.25\n bbox_reg_weights = None\n box_score_thresh = 0.05\n box_nms_thresh = 0.5\n box_detections_per_img = 100\n\n box_roi_pool = ops.MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=7,\n sampling_ratio=2)\n\n resolution = box_roi_pool.output_size[0]\n representation_size = 1024\n box_head = TwoMLPHead(\n out_channels * resolution ** 2,\n representation_size)\n\n representation_size = 1024\n box_predictor = FastRCNNPredictor(\n representation_size,\n num_classes)\n\n roi_heads = RoIHeads(\n box_roi_pool, box_head, box_predictor,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights,\n box_score_thresh, box_nms_thresh, box_detections_per_img)\n return roi_heads\n\n def get_features(self, images):\n s0, s1 = images.shape[-2:]\n features = [\n ('0', torch.rand(2, 256, s0 // 4, s1 // 4)),\n ('1', torch.rand(2, 256, s0 // 8, s1 // 8)),\n ('2', torch.rand(2, 256, s0 // 16, s1 // 16)),\n ('3', torch.rand(2, 256, s0 // 32, s1 // 32)),\n ('4', torch.rand(2, 256, s0 // 64, s1 // 64)),\n ]\n features = OrderedDict(features)\n return features\n\n def test_rpn(self):\n class RPNModule(torch.nn.Module):\n def __init__(self_module):\n super(RPNModule, self_module).__init__()\n self_module.rpn = self._init_test_rpn()\n\n def forward(self_module, images, features):\n images = ImageList(images, [i.shape[-2:] for i in images])\n return self_module.rpn(images, features)\n\n images = torch.rand(2, 3, 150, 150)\n features = self.get_features(images)\n images2 = torch.rand(2, 3, 80, 80)\n test_features = self.get_features(images2)\n\n model = RPNModule()\n model.eval()\n model(images, features)\n\n self.run_model(model, [(images, features), (images2, test_features)], tolerate_small_mismatch=True,\n input_names=[\"input1\", \"input2\", \"input3\", \"input4\", \"input5\", \"input6\"],\n dynamic_axes={\"input1\": [0, 1, 2, 3], \"input2\": [0, 1, 2, 3],\n \"input3\": [0, 1, 2, 3], \"input4\": [0, 1, 2, 3],\n \"input5\": [0, 1, 2, 3], \"input6\": [0, 1, 2, 3]})\n\n def test_multi_scale_roi_align(self):\n\n class TransformModule(torch.nn.Module):\n def __init__(self):\n super(TransformModule, self).__init__()\n self.model = ops.MultiScaleRoIAlign(['feat1', 'feat2'], 3, 2)\n self.image_sizes = [(512, 512)]\n\n def forward(self, input, boxes):\n return self.model(input, boxes, self.image_sizes)\n\n i = OrderedDict()\n i['feat1'] = torch.rand(1, 5, 64, 64)\n i['feat2'] = torch.rand(1, 5, 16, 16)\n boxes = torch.rand(6, 4) * 256\n boxes[:, 2:] += boxes[:, :2]\n\n i1 = OrderedDict()\n i1['feat1'] = torch.rand(1, 5, 64, 64)\n i1['feat2'] = torch.rand(1, 5, 16, 16)\n boxes1 = torch.rand(6, 4) * 256\n boxes1[:, 2:] += boxes1[:, :2]\n\n self.run_model(TransformModule(), [(i, [boxes],), (i1, [boxes1],)])\n\n def test_roi_heads(self):\n class RoiHeadsModule(torch.nn.Module):\n def __init__(self_module):\n super(RoiHeadsModule, self_module).__init__()\n self_module.transform = self._init_test_generalized_rcnn_transform()\n self_module.rpn = self._init_test_rpn()\n self_module.roi_heads = self._init_test_roi_heads_faster_rcnn()\n\n def forward(self_module, images, features):\n original_image_sizes = [img.shape[-2:] for img in images]\n images = ImageList(images, [i.shape[-2:] for i in images])\n proposals, _ = self_module.rpn(images, features)\n detections, _ = self_module.roi_heads(features, proposals, images.image_sizes)\n detections = self_module.transform.postprocess(detections,\n images.image_sizes,\n original_image_sizes)\n return detections\n\n images = torch.rand(2, 3, 100, 100)\n features = self.get_features(images)\n images2 = torch.rand(2, 3, 150, 150)\n test_features = self.get_features(images2)\n\n model = RoiHeadsModule()\n model.eval()\n model(images, features)\n\n self.run_model(model, [(images, features), (images2, test_features)], tolerate_small_mismatch=True,\n input_names=[\"input1\", \"input2\", \"input3\", \"input4\", \"input5\", \"input6\"],\n dynamic_axes={\"input1\": [0, 1, 2, 3], \"input2\": [0, 1, 2, 3], \"input3\": [0, 1, 2, 3],\n \"input4\": [0, 1, 2, 3], \"input5\": [0, 1, 2, 3], \"input6\": [0, 1, 2, 3]})\n\n def get_image_from_url(self, url, size=None):\n import requests\n from PIL import Image\n from io import BytesIO\n from torchvision import transforms\n\n data = requests.get(url)\n image = Image.open(BytesIO(data.content)).convert(\"RGB\")\n\n if size is None:\n size = (300, 200)\n image = image.resize(size, Image.BILINEAR)\n\n to_tensor = transforms.ToTensor()\n return to_tensor(image)\n\n def get_test_images(self):\n image_url = \"http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg\"\n image = self.get_image_from_url(url=image_url, size=(100, 320))\n\n image_url2 = \"https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image05.png\"\n image2 = self.get_image_from_url(url=image_url2, size=(250, 380))\n\n images = [image]\n test_images = [image2]\n return images, test_images\n\n def test_faster_rcnn(self):\n images, test_images = self.get_test_images()\n dummy_image = [torch.ones(3, 100, 100) * 0.3]\n model = models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300)\n model.eval()\n model(images)\n # Test exported model on images of different size, or dummy input\n self.run_model(model, [(images,), (test_images,), (dummy_image,)], input_names=[\"images_tensors\"],\n output_names=[\"outputs\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2, 3], \"outputs\": [0, 1, 2, 3]},\n tolerate_small_mismatch=True)\n # Test exported model for an image with no detections on other images\n self.run_model(model, [(dummy_image,), (images,)], input_names=[\"images_tensors\"],\n output_names=[\"outputs\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2, 3], \"outputs\": [0, 1, 2, 3]},\n tolerate_small_mismatch=True)\n\n # Verify that paste_mask_in_image beahves the same in tracing.\n # This test also compares both paste_masks_in_image and _onnx_paste_masks_in_image\n # (since jit_trace witll call _onnx_paste_masks_in_image).\n def test_paste_mask_in_image(self):\n # disable profiling\n torch._C._jit_set_profiling_executor(False)\n torch._C._jit_set_profiling_mode(False)\n\n masks = torch.rand(10, 1, 26, 26)\n boxes = torch.rand(10, 4)\n boxes[:, 2:] += torch.rand(10, 2)\n boxes *= 50\n o_im_s = (100, 100)\n from torchvision.models.detection.roi_heads import paste_masks_in_image\n out = paste_masks_in_image(masks, boxes, o_im_s)\n jit_trace = torch.jit.trace(paste_masks_in_image,\n (masks, boxes,\n [torch.tensor(o_im_s[0]),\n torch.tensor(o_im_s[1])]))\n out_trace = jit_trace(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])])\n\n assert torch.all(out.eq(out_trace))\n\n masks2 = torch.rand(20, 1, 26, 26)\n boxes2 = torch.rand(20, 4)\n boxes2[:, 2:] += torch.rand(20, 2)\n boxes2 *= 100\n o_im_s2 = (200, 200)\n from torchvision.models.detection.roi_heads import paste_masks_in_image\n out2 = paste_masks_in_image(masks2, boxes2, o_im_s2)\n out_trace2 = jit_trace(masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])])\n\n assert torch.all(out2.eq(out_trace2))\n\n def test_mask_rcnn(self):\n images, test_images = self.get_test_images()\n dummy_image = [torch.ones(3, 100, 320) * 0.3]\n model = models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300)\n model.eval()\n model(images)\n # Test exported model on images of different size, or dummy input\n self.run_model(model, [(images,), (test_images,), (dummy_image,)],\n input_names=[\"images_tensors\"],\n output_names=[\"boxes\", \"labels\", \"scores\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2, 3], \"boxes\": [0, 1], \"labels\": [0],\n \"scores\": [0], \"masks\": [0, 1, 2, 3]},\n tolerate_small_mismatch=True)\n # TODO: enable this test once dynamic model export is fixed\n # Test exported model for an image with no detections on other images\n # self.run_model(model, [(images,),(test_images,)],\n # input_names=[\"images_tensors\"],\n # output_names=[\"boxes\", \"labels\", \"scores\"],\n # dynamic_axes={\"images_tensors\": [0, 1, 2, 3], \"boxes\": [0, 1], \"labels\": [0],\n # \"scores\": [0], \"masks\": [0, 1, 2, 3]},\n # tolerate_small_mismatch=True)\n\n # Verify that heatmaps_to_keypoints behaves the same in tracing.\n # This test also compares both heatmaps_to_keypoints and _onnx_heatmaps_to_keypoints\n # (since jit_trace witll call _heatmaps_to_keypoints).\n # @unittest.skip(\"Disable test until Resize bug fixed in ORT\")\n def test_heatmaps_to_keypoints(self):\n # disable profiling\n torch._C._jit_set_profiling_executor(False)\n torch._C._jit_set_profiling_mode(False)\n\n maps = torch.rand(10, 1, 26, 26)\n rois = torch.rand(10, 4)\n from torchvision.models.detection.roi_heads import heatmaps_to_keypoints\n out = heatmaps_to_keypoints(maps, rois)\n jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois))\n out_trace = jit_trace(maps, rois)\n\n assert torch.all(out[0].eq(out_trace[0]))\n assert torch.all(out[1].eq(out_trace[1]))\n\n maps2 = torch.rand(20, 2, 21, 21)\n rois2 = torch.rand(20, 4)\n from torchvision.models.detection.roi_heads import heatmaps_to_keypoints\n out2 = heatmaps_to_keypoints(maps2, rois2)\n out_trace2 = jit_trace(maps2, rois2)\n\n assert torch.all(out2[0].eq(out_trace2[0]))\n assert torch.all(out2[1].eq(out_trace2[1]))\n\n def test_keypoint_rcnn(self):\n class KeyPointRCNN(torch.nn.Module):\n def __init__(self):\n super(KeyPointRCNN, self).__init__()\n self.model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(\n pretrained=True, min_size=200, max_size=300)\n\n def forward(self, images):\n output = self.model(images)\n # TODO: The keypoints_scores require the use of Argmax that is updated in ONNX.\n # For now we are testing all the output of KeypointRCNN except keypoints_scores.\n # Enable When Argmax is updated in ONNX Runtime.\n return output[0]['boxes'], output[0]['labels'], output[0]['scores'], output[0]['keypoints']\n\n images, test_images = self.get_test_images()\n # TODO:\n # Enable test for dummy_image (no detection) once issue is\n # _onnx_heatmaps_to_keypoints_loop for empty heatmaps is fixed\n # dummy_images = [torch.ones(3, 100, 100) * 0.3]\n model = KeyPointRCNN()\n model.eval()\n model(images)\n self.run_model(model, [(images,), (test_images,)],\n input_names=[\"images_tensors\"],\n output_names=[\"outputs1\", \"outputs2\", \"outputs3\", \"outputs4\"],\n dynamic_axes={\"images_tensors\": [0, 1, 2, 3]},\n tolerate_small_mismatch=True)\n # TODO: enable this test once dynamic model export is fixed\n # Test exported model for an image with no detections on other images\n # self.run_model(model, [(dummy_images,), (test_images,)],\n # input_names=[\"images_tensors\"],\n # output_names=[\"outputs1\", \"outputs2\", \"outputs3\", \"outputs4\"],\n # dynamic_axes={\"images_tensors\": [0, 1, 2, 3]},\n # tolerate_small_mismatch=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "torch.rand", "torch._C._jit_set_profiling_executor", "torch.no_grad", "torch.ones", "torch.manual_seed", "torch.jit._flatten", "torch.tensor", "torch.onnx.export", "torch._C._jit_set_profiling_mode", "torch.jit.trace", "torch.testing.assert_allclose", "torch.randn" ] ]
ardilacarlosh/amazon-forecast-samples
[ "3a516e5b9b2af745546784178fcea115eda3008c" ]
[ "notebooks/util/fcst_utils.py" ]
[ "import time\nimport boto3\nimport json\nimport pandas as pd\nimport logging\nimport matplotlib.pyplot as plt\n\ndef wait_till_delete(callback, check_time = 5, timeout = 180):\n elapsed_time = 0\n while elapsed_time < timeout:\n try:\n out = callback()\n except Exception as e:\n # When given the resource not found exception, deletion has occured\n if e.response['Error']['Code'] == 'ResourceNotFoundException':\n logging.info('Successful delete\\n')\n return\n # Fails with other error\n logging.info(f'Deletion failed: {e}')\n return(e)\n time.sleep(check_time) # units of seconds\n elapsed_time += check_time\n\ndef wait(callback, time_interval=30):\n last_status = callback()['Status']\n time.sleep(time_interval)\n elapsed_time = time_interval\n is_failed = True\n\n while (last_status != 'ACTIVE'):\n last_status = callback()['Status']\n time.sleep(time_interval) # units of seconds\n elapsed_time += time_interval\n print('.', end='', flush=True)\n if last_status == 'CREATE_FAILED':\n break\n if last_status == \"ACTIVE\":\n is_failed = False\n job_status = \"failed\" if is_failed else \"success\"\n print('')\n logging.info(f\"Finished in {elapsed_time} seconds with status {job_status}\")\n return not is_failed\n\ndef load_exact_sol(fname, item_id, is_schema_perm=False):\n exact = pd.read_csv(fname, header = None)\n exact.columns = ['item_id', 'timestamp', 'target']\n if is_schema_perm:\n exact.columns = ['timestamp', 'target', 'item_id']\n return exact.loc[exact['item_id'] == item_id]\n\ndef get_or_create_role_arn():\n iam = boto3.client(\"iam\")\n role_name = \"ForecastRoleDemo\"\n assume_role_policy_document = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"forecast.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n role_arn = None\n try:\n create_role_response = iam.create_role(\n RoleName = role_name,\n AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)\n )\n role_arn = create_role_response[\"Role\"][\"Arn\"]\n except iam.exceptions.EntityAlreadyExistsException:\n print(\"The role \" + role_name + \"exists, ignore to create it\")\n role_arn = boto3.resource('iam').Role(role_name).arn\n policy_arn = \"arn:aws:iam::aws:policy/AmazonForecastFullAccess\"\n iam.attach_role_policy(\n RoleName = role_name,\n PolicyArn = policy_arn\n )\n iam.attach_role_policy(\n PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess',\n RoleName=role_name\n )\n time.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate\n print(role_arn)\n return role_arn\n\n\ndef plot_forecasts(fcsts, exact, freq = '1H', forecastHorizon=24, time_back = 80):\n p10 = pd.DataFrame(fcsts['Forecast']['Predictions']['p10'])\n p50 = pd.DataFrame(fcsts['Forecast']['Predictions']['p50'])\n p90 = pd.DataFrame(fcsts['Forecast']['Predictions']['p90'])\n pred_int = p50['Timestamp'].apply(lambda x: pd.Timestamp(x))\n fcst_start_date = pred_int[0]\n time_int = exact['timestamp'].apply(lambda x: pd.Timestamp(x))\n plt.plot(time_int[-time_back:],exact['target'].values[-time_back:], color = 'r')\n plt.plot(pred_int, p50['Value'].values, color = 'k');\n plt.fill_between(p50['Timestamp'].values, \n p10['Value'].values,\n p90['Value'].values,\n color='b', alpha=0.3);\n plt.axvline(x=pd.Timestamp(fcst_start_date), linewidth=3, color='g', ls='dashed');\n plt.axvline(x=pd.Timestamp(fcst_start_date, freq)+forecastHorizon-1, linewidth=3, color='g', ls='dashed');\n plt.xticks(rotation=30);\n plt.legend(['Target', 'Forecast'], loc = 'lower left')\n \n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "pandas.Timestamp", "matplotlib.pyplot.fill_between", "pandas.read_csv", "matplotlib.pyplot.xticks" ] ]
goodok/sympy
[ "de84ed2139125a755ea7b6ba91d945d9fbbe5ed9" ]
[ "sympy/matrices/expressions/matexpr.py" ]
[ "from sympy import Expr, Symbol, Mul, Add, Pow, expand, sympify, Tuple, Integer\nfrom sympy.core.basic import Basic\nfrom sympy.core.singleton import S\nfrom sympy.core.decorators import _sympifyit, call_highest_priority\nfrom sympy.matrices import ShapeError, Matrix\n\nclass MatrixExpr(Expr):\n \"\"\" Matrix Expression Class\n Matrix Expressions subclass SymPy Expr's so that\n MatAdd inherits from Add\n MatMul inherits from Mul\n MatPow inherits from Pow\n\n They use _op_priority to gain control with binary operations (+, *, -, **)\n are used\n\n They implement operations specific to Matrix Algebra.\n \"\"\"\n\n _op_priority = 11.0\n\n is_Matrix = True\n is_MatrixExpr = True\n is_Identity = None\n is_Inverse = False\n is_Transpose = False\n is_ZeroMatrix = False\n is_BlockMatrix = False\n\n is_commutative = False\n\n # The following is adapted from the core Expr object\n\n def __neg__(self):\n return MatMul(S.NegativeOne, self)\n def __abs__(self):\n raise NotImplementedError\n\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__radd__')\n def __add__(self, other):\n return MatAdd(self, other)\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__add__')\n def __radd__(self, other):\n return MatAdd(other, self)\n\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__rsub__')\n def __sub__(self, other):\n return MatAdd(self, -other)\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__sub__')\n def __rsub__(self, other):\n return MatAdd(other, -self)\n\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__rmul__')\n def __mul__(self, other):\n return MatMul(self, other)\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__mul__')\n def __rmul__(self, other):\n return MatMul(other, self)\n\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__rpow__')\n def __pow__(self, other):\n if other == -S.One:\n return Inverse(self)\n return MatPow(self, other)\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__pow__')\n def __rpow__(self, other):\n raise NotImplementedError(\"Matrix Power not defined\")\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__rdiv__')\n def __div__(self, other):\n return MatMul(self, other**S.NegativeOne)\n @_sympifyit('other', NotImplemented)\n @call_highest_priority('__div__')\n def __rdiv__(self, other):\n raise NotImplementedError()\n #return MatMul(other, Pow(self, S.NegativeOne))\n\n def __getitem__(self, key):\n raise NotImplementedError()\n\n __truediv__ = __div__\n __rtruediv__ = __rdiv__\n\n @property\n def rows(self):\n return self.shape[0]\n @property\n def cols(self):\n return self.shape[1]\n\n @property\n def is_square(self):\n return self.rows == self.cols\n\n def eval_transpose(self):\n raise NotImplementedError()\n\n def eval_inverse(self):\n raise NotImplementedError()\n\n @property\n def T(self):\n return Transpose(self)\n\n @property\n def I(self):\n return Inverse(self)\n\n def _entry(self, i, j):\n raise NotImplementedError(\"Indexing not implemented\")\n\n def valid_index(self, i, j):\n def is_valid(idx):\n return isinstance(idx, (int, Integer, Symbol))\n return (is_valid(i) and is_valid(j) and\n 0 <= i < self.rows and 0 <= j < self.cols)\n\n def __getitem__(self, key):\n if isinstance(key, tuple) and len(key)==2:\n key = sympify(key)\n i,j = key\n if self.valid_index(i, j) is not False:\n return self._entry(*key)\n else:\n raise IndexError(\"Invalid indices (%s, %s)\"%(str(i), str(j)))\n raise IndexError(\"Invalid index, wanted %s[i,j]\"%str(self))\n\n def as_explicit(self):\n \"\"\"\n Returns a dense Matrix with elements represented explicitly\n\n Returns an object of type ImmutableMatrix.\n\n See Also\n --------\n as_mutable: returns MutableMatrix type\n >>> from sympy import Identity\n >>> I = Identity(3)\n >>> I\n I\n >>> I.as_explicit()\n [1, 0, 0]\n [0, 1, 0]\n [0, 0, 1]\n \"\"\"\n from sympy.matrices.immutable_matrix import ImmutableMatrix\n return ImmutableMatrix([[ self[i,j]\n for j in range(self.cols)]\n for i in range(self.rows)])\n\n def as_mutable(self):\n \"\"\"\n Returns a dense Matrix with elements represented explicitly\n\n Returns an object of type MutableMatrix.\n\n See Also\n --------\n as_explicit: returns ImmutableMatrix\n >>> from sympy import Identity\n >>> I = Identity(3)\n >>> I\n I\n >>> I.as_mutable()\n [1, 0, 0]\n [0, 1, 0]\n [0, 0, 1]\n \"\"\"\n return self.as_explicit().as_mutable()\n\n def __array__(self):\n from numpy import empty\n a = empty(self.shape, dtype=object)\n for i in range(self.rows):\n for j in range(self.cols):\n a[i, j] = self[i, j]\n return a\n\n def equals(self, other):\n \"\"\"\n Test elementwise equality between matrices, potentially of different\n types\n\n >>> from sympy import Identity, eye\n >>> Identity(3).equals(eye(3))\n True\n \"\"\"\n return self.as_explicit().equals(other)\n\nclass MatrixSymbol(MatrixExpr, Symbol):\n \"\"\"Symbolic representation of a Matrix object\n\n Creates a SymPy Symbol to represent a Matrix. This matrix has a shape and\n can be included in Matrix Expressions\n\n >>> from sympy import MatrixSymbol, Identity\n >>> A = MatrixSymbol('A', 3, 4) # A 3 by 4 Matrix\n >>> B = MatrixSymbol('B', 4, 3) # A 4 by 3 Matrix\n >>> A.shape\n (3, 4)\n >>> 2*A*B + Identity(3)\n I + 2*A*B\n \"\"\"\n is_commutative = False\n\n def __new__(cls, name, n, m):\n n, m = sympify(n), sympify(m)\n obj = Basic.__new__(cls, name, n, m)\n return obj\n\n def _hashable_content(self):\n return(self.name, self.shape)\n\n @property\n def shape(self):\n return self.args[1:3]\n\n @property\n def name(self):\n return self.args[0]\n\n def _eval_subs(self, old, new):\n # only do substitutions in shape\n shape = Tuple(*self.shape)._subs(old, new)\n return MatrixSymbol(self.name, *shape)\n\n def __call__(self, *args):\n raise TypeError( \"%s object is not callable\" % self.__class__ )\n\n def _entry(self, i, j):\n # MatMul _entry will pass us a Dummy and ask that we remember it\n # so that it can be summed over later. We'll use the function syntax\n if i.is_Dummy or j.is_Dummy:\n return Symbol(self.name)(i,j)\n # If that isn't the case we'd really rather just make a symbol\n # They are simpler and look much nicer\n else:\n return Symbol('%s_%s%s'%(self.name, str(i), str(j)))\n\nclass Identity(MatrixSymbol):\n \"\"\"The Matrix Identity I - multiplicative identity\n >>> from sympy.matrices import Identity, MatrixSymbol\n >>> A = MatrixSymbol('A', 3, 5)\n >>> I = Identity(3)\n >>> I*A\n A\n \"\"\"\n\n is_Identity = True\n def __new__(cls, n):\n return MatrixSymbol.__new__(cls, \"I\", n, n)\n\n def transpose(self):\n return self\n\n def _entry(self, i, j):\n if i==j:\n return S.One\n else:\n return S.Zero\n\nclass ZeroMatrix(MatrixSymbol):\n \"\"\"The Matrix Zero 0 - additive identity\n >>> from sympy import MatrixSymbol, ZeroMatrix\n >>> A = MatrixSymbol('A', 3, 5)\n >>> Z = ZeroMatrix(3, 5)\n >>> A+Z\n A\n >>> Z*A.T\n 0\n \"\"\"\n is_ZeroMatrix = True\n def __new__(cls, n, m):\n return MatrixSymbol.__new__(cls, \"0\", n, m)\n def transpose(self):\n return ZeroMatrix(self.cols, self.rows)\n\n def _entry(self, i, j):\n return S.Zero\n\ndef matrix_symbols(expr):\n return [sym for sym in expr.free_symbols if sym.is_Matrix]\n\ndef matrixify(expr):\n \"\"\"\n Recursively walks down an expression tree changing Expr's to MatExpr's\n i.e. Add -> MatAdd\n Mul -> MatMul\n\n Only changes those Exprs which contain MatrixSymbols\n\n This function is useful when traditional SymPy functions which use Mul and\n Add are called on MatrixExpressions. Examples flatten, expand, simplify...\n\n Calling matrixify after calling these functions will reset classes back to\n their matrix equivalents\n \"\"\"\n class_dict = {Mul:MatMul, Add:MatAdd, MatMul:MatMul, MatAdd:MatAdd,\n Pow:MatPow, MatPow:MatPow}\n\n if expr.__class__ not in class_dict:\n return expr\n\n args = map(matrixify, expr.args) # Recursively call down the tree\n\n if not any(arg.is_Matrix for arg in args):\n return expr\n else:\n return Basic.__new__(class_dict[expr.__class__], *args)\n\ndef linear_factors(expr, *syms):\n \"\"\"Reduce a Matrix Expression to a sum of linear factors\n\n Given symbols and a matrix expression linear in those symbols return a\n dict mapping symbol to the linear factor\n\n >>> from sympy import MatrixSymbol, linear_factors, symbols\n >>> n, m, l = symbols('n m l')\n >>> A = MatrixSymbol('A', n, m)\n >>> B = MatrixSymbol('B', m, l)\n >>> C = MatrixSymbol('C', n, l)\n >>> linear_factors(2*A*B + C, B, C)\n {B: 2*A, C: I}\n \"\"\"\n\n expr = matrixify(expand(expr))\n d = {}\n if expr.is_Matrix and expr.is_Symbol:\n if expr in syms:\n d[expr] = Identity(expr.rows)\n\n if expr.is_Add:\n for sym in syms:\n total_factor = 0\n for arg in expr.args:\n factor = arg.coeff(sym)\n if not factor:\n # .coeff fails when powers are in the expression\n if sym in arg.free_symbols:\n raise ValueError(\"Expression not linear in symbols\")\n else:\n factor = 0\n factor = sympify(factor)\n if not factor.is_Matrix:\n if factor.is_zero:\n factor = ZeroMatrix(expr.rows, sym.rows)\n if not sym.cols == expr.cols:\n raise ShapeError(\n \"%s not compatible as factor of %s\"%(sym, expr))\n else:\n factor = Identity(sym.rows)*factor\n total_factor += factor\n d[sym] = total_factor\n elif expr.is_Mul:\n for sym in syms:\n factor = expr.coeff(sym)\n if not factor:\n # .coeff fails when powers are in the expression\n if sym in expr.free_symbols:\n raise ValueError(\"Expression not linear in symbols\")\n else:\n factor = 0\n factor = sympify(factor)\n if not factor.is_Matrix:\n if factor.is_zero:\n factor = ZeroMatrix(expr.rows, sym.rows)\n if not sym.cols == expr.cols:\n raise ShapeError(\"%s not compatible as factor of %s\"%\n (sym, expr))\n else:\n factor = Identity(sym.rows)*factor\n d[sym] = factor\n\n if any(sym in matrix_symbols(Tuple(*d.values())) for sym in syms):\n raise ValueError(\"Expression not linear in symbols\")\n\n return d\n\nfrom matmul import MatMul\nfrom matadd import MatAdd\nfrom matpow import MatPow\nfrom transpose import Transpose\nfrom inverse import Inverse\n" ]
[ [ "numpy.empty" ] ]
ZucchiniTang/ssd.pytorch
[ "b2b0a6f286a8d99de84fe0001a4b4f3b7e3e4ca4" ]
[ "layers/modules/multibox_loss.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom data import coco as cfg\nfrom ..box_utils import match, log_sum_exp\nimport sys\n\nclass MultiBoxLoss(nn.Module):\n \"\"\"SSD Weighted Loss Function\n Compute Targets:\n 1) Produce Confidence Target Indices by matching ground truth boxes\n with (default) 'priorboxes' that have jaccard index > threshold parameter\n (default threshold: 0.5).\n 2) Produce localization target by 'encoding' variance into offsets of ground\n truth boxes and their matched 'priorboxes'.\n 3) Hard negative mining to filter the excessive number of negative examples\n that comes with using a large number of default bounding boxes.\n (default negative:positive ratio 3:1)\n Objective Loss:\n L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n weighted by α which is set to 1 by cross val.\n Args:\n c: class confidences,\n l: predicted boxes,\n g: ground truth boxes\n N: number of matched default boxes\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n \"\"\"\n\n def __init__(self, num_classes, overlap_thresh, prior_for_matching,\n bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,\n use_gpu=True):\n super(MultiBoxLoss, self).__init__()\n self.use_gpu = use_gpu\n self.num_classes = num_classes\n self.threshold = overlap_thresh\n self.background_label = bkg_label\n self.encode_target = encode_target\n self.use_prior_for_matching = prior_for_matching\n self.do_neg_mining = neg_mining\n self.negpos_ratio = neg_pos\n self.neg_overlap = neg_overlap\n self.variance = cfg['variance']\n\n def forward(self, predictions, targets):\n \"\"\"Multibox Loss\n Args:\n predictions (tuple): A tuple containing loc preds, conf preds,\n and prior boxes from SSD net.\n conf shape: torch.size(batch_size,num_priors,num_classes)\n loc shape: torch.size(batch_size,num_priors,4)\n priors shape: torch.size(num_priors,4)\n\n targets (tensor): Ground truth boxes and labels for a batch,\n shape: [batch_size,num_objs,5] (last idx is the label).\n \"\"\"\n # initial: batch_size = 4\n loc_data, conf_data, priors = predictions\n # len(loc_data):4 len(conf_data):4 len(priors): 8732\n num = loc_data.size(0) # num = 4\n\n priors = priors[:loc_data.size(1), :] # priors.size() = torch.Size([8732, 4])\n num_priors = (priors.size(0)) #8732\n num_classes = self.num_classes #2\n\n\n # match priors (default boxes) and ground truth boxes\n loc_t = torch.Tensor(num, num_priors, 4) # (4, 8732, 4)\n conf_t = torch.LongTensor(num, num_priors) # (4, 8732)\n for idx in range(num):\n truths = targets[idx][:, :-1].data\n labels = targets[idx][:, -1].data\n defaults = priors.data\n match(self.threshold, truths, defaults, self.variance, labels,loc_t, conf_t, idx)\n if self.use_gpu:\n loc_t = loc_t.cuda()\n conf_t = conf_t.cuda()\n # wrap targets\n loc_t = Variable(loc_t, requires_grad=False)\n conf_t = Variable(conf_t, requires_grad=False)\n\n pos = conf_t > 0 # pos.shape: torch.Size([32, 8732])\n num_pos = pos.sum(dim=1, keepdim=True) # num_pos.shape: torch.Size([4, 1])\n \n\n # Localization Loss (Smooth L1)\n # Shape: [batch,num_priors,4]\n\n # loc_data: torch.Size([32, 8732, 4])\n # pos.dim(): 2\n # pos.size(): torch.Size([32, 8732])\n\n pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n loc_p = loc_data[pos_idx].view(-1, 4)\n loc_t = loc_t[pos_idx].view(-1, 4)\n loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)\n #print('loc_p',loc_p)\n #print('loc_t',loc_t)\n \n\n # Compute max conf across batch for hard negative mining\n batch_conf = conf_data.view(-1, self.num_classes)\n loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n # Hard Negative Mining\n loss_c = loss_c.view(pos.size()[0], pos.size()[1]) #add line\n \n #print('loss_c.shape:',loss_c)\n #print('pos.shape:',pos)\n \n loss_c[pos] = 0 # filter out pos boxes for now\n loss_c = loss_c.view(num, -1)\n _, loss_idx = loss_c.sort(1, descending=True)\n _, idx_rank = loss_idx.sort(1)\n num_pos = pos.long().sum(1, keepdim=True)\n num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)\n neg = idx_rank < num_neg.expand_as(idx_rank)\n\n # Confidence Loss Including Positive and Negative Examples\n pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)\n targets_weighted = conf_t[(pos+neg).gt(0)]\n loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)\n\n # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n\n N = num_pos.data.sum()\n loss_l /= N\n loss_c /= N\n return loss_l, loss_c\n" ]
[ [ "torch.nn.functional.smooth_l1_loss", "torch.autograd.Variable", "torch.nn.functional.cross_entropy", "torch.LongTensor", "torch.Tensor" ] ]
UT-Covid/episimlab
[ "3ead2e8f0934094a829004d61a4314ed88e5c217" ]
[ "examples/example_sirv.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport xsimlab as xs\nimport xarray as xr\nfrom episimlab.models import ExampleSIR, EpiModel\nfrom episimlab.foi import BaseFOI\nfrom episimlab.utils import visualize_compt_graph, coerce_to_da\nimport networkx as nx\nimport pandas as pd\n\n\n# # Episimlab Tutorial\n# ----\n# \n# <!--<badge>--><a href=\"https://colab.research.google.com/github/UT-Covid/episimlab/blob/main/examples/example_sirv.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a><!--</badge>-->\n# \n# This notebook will provide a tutorial in model building using Episimlab version 2. For more details, please refer to the [Episimlab GitHub repository](https://github.com/UT-Covid/episimlab) and the [xarray-simlab documentation](https://xarray-simlab.readthedocs.io).\n\n# ## Installation\n# \n# To install Episimlab, create and run a new code cell containing:\n# ```\n# !pip install episimlab\n# ```\n# \n# ...or install the development version from the GitHub repo:\n# ```\n# !pip install git+https://github.com/UT-Covid/episimlab\n# ```\n\n# ## Run an Existing SIR Model\n# ----\n# \n# The package includes several pre-built models as examples/starting points. Here, we show how to load and run the `ExampleSIR` model:\n\n# In[ ]:\n\n\nmodel = ExampleSIR()\nmodel\n\n\n# These pre-packaged models come with a set of default parameter values. New models should include a full set of default parameters so that users can quickly and easily use the same model (say, in a publication):\n\n# In[ ]:\n\n\nmodel.RUNNER_DEFAULTS['input_vars']\n\n\n# Of course, we can overwrite the defaults, such as the `beta` value here, and run the model:\n\n# In[ ]:\n\n\ninput_vars = {\n 'beta': 0.07,\n}\nmodel.run(input_vars=input_vars)\n\n\n# The model output is an `xarray.Dataset`, which is very similar to an N-dimensional numpy array. We can also see the compartment graph generated when the model was run. The compartment graph shows allowed transitions in the model. Here, we see that S -> I (`rate_S2I`) and I -> R (`rate_I2R`) transitions are allowed.\n# \n# We can plot the state of the simulation over time:\n\n# In[ ]:\n\n\nmodel.plot()\n\n\n# ## Modify an Existing Model: Adding a Vaccination Compartment\n# ----\n# \n# We now want to add more heterogeneity into our model. We will demonstrate this by adding a Vaccinated (`V`) compartment to the model.\n# \n# Episimlab models are highly modular, so we can easily reuse our existing `ExampleSIR` model instead of writing the new model from scratch. To do this, we will write a few new processes and add them to our existing model:\n# 1. A new process `CustomSetupComptGraph` that generates a compartment graph containing a new `V` node, with new directed edges `(S, V)` and `(V, I)`\n# 2. A new process `RateS2V` that calculates an edge weight `rate_S2V`\n# 3. A new process `RateV2I` that calculates an edge weight `rate_V2I`\n\n# ### #1: Add a `V` Compartment\n# \n# To add our \"vaccinated\" compartment, we need to write a new process `CustomSetupComptGraph` as described above. A \"process\" is just a Python class with some special features:\n# - Methods named `initialize`, `run_step`, `finalize_step`, and `finalize` are run automatically at different points in the simulation.\n# - Variables defined near the beginning of the process can be imported to exported to other processes in the model. In this case, we're exporting the variable `compt_model`.\n\n# In[ ]:\n\n\[email protected]\nclass CustomSetupComptGraph:\n \"\"\"A single process in the model. Defines the directed graph `compt_graph`\n that defines the compartments and allowed transitions between them.\n \"\"\"\n # Reference a variable defined in a different process, and tell the model\n # that this process intends to output this variable.\n compt_graph = xs.global_ref('compt_graph', intent='out')\n\n def initialize(self):\n \"\"\"This method is run once at the beginning of the simulation.\"\"\"\n self.compt_graph = self.get_compt_graph()\n \n def run_step(self):\n \"\"\"This method is run once at every step of the simulation.\"\"\"\n pass\n\n def finalize(self):\n \"\"\"This method is run once at the end of the simulation.\"\"\"\n self.visualize()\n\n def get_compt_graph(self) -> nx.DiGraph:\n \"\"\"A method that returns a compartment graph as a directed\n graph. Uses the networkx package.\n \"\"\"\n g = nx.DiGraph()\n g.add_nodes_from([\n ('S', {\"color\": \"red\"}),\n ('I', {\"color\": \"blue\"}),\n ('R', {\"color\": \"green\"}),\n ('V', {\"color\": \"purple\"}), # new\n ])\n g.add_edges_from([\n ('S', 'V', {\"priority\": 0, \"color\": \"purple\"}), # new\n ('S', 'I', {\"priority\": 0, \"color\": \"red\"}),\n ('V', 'I', {\"priority\": 1, \"color\": \"pink\"}), # new\n ('I', 'R', {\"priority\": 2, \"color\": \"blue\"}),\n ])\n return g\n \n def visualize(self):\n \"\"\"Visualize the compartment graph, saving as a file at a path.\"\"\"\n return visualize_compt_graph(self.compt_graph)\n\n\n# Before adding our new process to the model, let's test it. Processes are just Python classes (they're actually fancy Python classes called [attrs classes](https://www.attrs.org/en/stable/)), so we can use them as we would any other class in Python:\n\n# In[ ]:\n\n\nprocess = CustomSetupComptGraph()\nprint(process)\n\n\n# In[ ]:\n\n\nprocess.initialize()\nprocess.visualize()\n\n\n# Now that we've tested our new process, let's add it to our model:\n\n# In[ ]:\n\n\nmodel.setup_compt_graph\n\n\n# In[ ]:\n\n\nsirv_model = model.update_processes({\n 'setup_compt_graph': CustomSetupComptGraph\n})\nsirv_model.setup_compt_graph\n\n\n# Let's try running our model with the new compartment graph:\n\n# In[ ]:\n\n\nsirv_model.run()\n\n\n# The new model runs, and we get an informative warning at every step. We have defined a new compartment `V` and defined its edges to other departments, but the model still doesn't know what the **weight** of these edges are.\n\n# ### #2: Calculate `rate_S2V`\n# \n# To fix the warning, we need only write a new process that calculates and exports edge weights for the S -> V and V -> I transitions that we defined. Let's start by calculating the edge weight of S -> V, which by convention is named `rate_S2V`. \n\n# In[ ]:\n\n\[email protected]\nclass RateS2V:\n \"\"\"A single process in the model. Calculates a vaccination rate\n `rate_S2V`.\n \"\"\"\n # Define a variable that will be imported by other processes,\n # and tell the model that this process intends to ingest the value of this variable.\n vacc_per_day = xs.variable(global_name='vacc_per_day', intent='in')\n \n # Define a variable that we want to export\n # We include the `groups=['edge_weight']` in order for the model to recognize\n # this variable as an edge weight.\n rate_S2V = xs.variable(global_name='rate_S2V', groups=['edge_weight'], intent='out')\n \n def initialize(self):\n \"\"\"If the vaccination rate were constant, we need only need to set\n it once at the beginning of the simulation. Since we want the rate\n to change at a given time, we instead define it in the `run_step`\n method below.\n \"\"\"\n pass\n \n @xs.runtime(args=['step'])\n def run_step(self, step):\n \"\"\"Calculate the `rate_S2V` at every step of the simulation.\n Set the rate to zero after the 5th step.\n \"\"\"\n if step > 5:\n self.rate_S2V = 0.\n else:\n self.rate_S2V = self.vacc_per_day\n\n\n# We can quickly test our process as a standalone Python class to make sure everything is working as expected:\n\n# In[ ]:\n\n\nprocess = RateS2V(vacc_per_day=5.0)\nprint(process.vacc_per_day)\n\n\n# In[ ]:\n\n\nprocess.run_step(step=3)\nprint(process.rate_S2V)\n\n\n# In[ ]:\n\n\nprocess.run_step(step=7)\nprint(process.rate_S2V)\n\n\n# ### #3 Calculate `rate_V2I`\n# \n# Similarly, let's write a process that defines a `rate_V2I`. This rate will be calculated similarly to force of infection (FOI), so we will **inherit** the existing process `BaseFOI` and modify it in the **child** class `RateV2I`. The `BaseFOI` process has methods like `foi` that we can reuse in the child process.\n\n# In[ ]:\n\n\[email protected]\nclass RateV2I(BaseFOI):\n \"\"\"A single process in the model. Calculates a force of infection\n for vaccinated persons `rate_V2I`. This process inherits from the\n parent class BaseFOI.\n \"\"\"\n # Override the default behavior: calculate FOI based on the population\n # of the V compartment, instead of the S compartment\n S_COMPT_LABELS = 'V'\n \n # Like before, we define a variable that we export in this process\n rate_V2I = xs.variable(dims=('age', 'risk', 'vertex'), \n global_name='rate_V2I', groups=['edge_weight'], \n intent='out')\n \n # We also define an input variable that scales FOI\n vacc_efficacy = xs.variable(global_name='vacc_efficacy', intent='in')\n \n # Use the same values for omega and phi as the S2I transition\n # We use the same values by importing these variables using their\n # global_name\n omega = xs.global_ref('omega', intent='in')\n phi = xs.global_ref('phi', intent='in')\n \n def run_step(self):\n \"\"\"Calculate the `rate_V2I` at every step of the simulation. Here,\n we make use of the `foi` method in the parent process BaseFOI.\n \"\"\"\n self.rate_V2I = self.foi * (1 - self.vacc_efficacy)\n\n\n# Finally, add both processes to the model. \n\n# In[ ]:\n\n\nsirv_model = sirv_model.update_processes({\n 'setup_compt_graph': CustomSetupComptGraph,\n 'rate_S2V': RateS2V,\n 'rate_V2I': RateV2I\n})\n\n\n# We visualize the processes in the model as a graph:\n\n# In[ ]:\n\n\nsirv_model.visualize()\n\n\n# We can now run our model, inspect the compartment graph, and plot the results:\n\n# In[ ]:\n\n\nsirv_model.run(\n input_vars={\n 'vacc_efficacy': 0.9,\n 'vacc_per_day': 10,\n 'sto_toggle': 0,\n })\n\n\n# In[ ]:\n\n\nsirv_model.plot()\n\n\n# ## Vaccinate Differently by Age\n# ----\n# \n# Episimlab allows users to set arbitrary dimensions for parameters. We could add age heterogeneity for the `vacc_per_day` variable by modifying our existing processes:\n\n# In[ ]:\n\n\[email protected]\nclass AgeScaledRateS2V:\n \"\"\"A single process in the model. Calculates a vaccination rate\n `rate_S2V`. Ingests a `vacc_per_day` with one dimension on `age`.\n \"\"\"\n vacc_per_day = xs.variable(global_name='vacc_per_day', intent='in',\n dims=('age')) # new\n rate_S2V = xs.variable(global_name='rate_S2V', groups=['edge_weight'], intent='out')\n \n @xs.runtime(args=['step'])\n def run_step(self, step):\n \"\"\"Calculate the `rate_S2V` at every step of the simulation.\n Set the rate to zero after step 5.\n \"\"\"\n if step > 5:\n self.rate_S2V = 0.\n else:\n self.rate_S2V = xr.DataArray(data=self.vacc_per_day, dims=['age']) # new\n\n\n# In[ ]:\n\n\nage_model = sirv_model.update_processes({\n 'rate_S2V': AgeScaledRateS2V,\n})\n\n\n# In[ ]:\n\n\nage_model\n\n\n# We run the model as usual. Note that we can specify a dictionary of output variables if we want additional data in the output array. In addition to the `state` variable from `compt_model` process, we also want to retrieve the `rate_V2I` variable from the `rate_V2I` process for one of our analyses.\n\n# In[ ]:\n\n\nage_model.run(\n input_vars={\n 'vacc_efficacy': 0.9,\n # Now stratified by age group:\n # ['0-4', '5-17', '18-49', '50-64', '65+']\n 'vacc_per_day': [0, 0, 5, 10, 10] # new\n },\n output_vars={\n # `state` of the `compt_model` over time (`step`)\n 'compt_model__state': 'step',\n # `rate_V2I` over time (`step`)\n 'rate_V2I__rate_V2I': 'step' # new\n })\n\n\n# ### Plotting\n# \n# Let's look at some more advanced plotting while we're here. We want to plot:\n# - Population of `V` compartment over time\n# - Incidence of escape infections, effectively the `rate_V2I`\n\n# In[ ]:\n\n\n(age_model\n # we want to track the `state` variable from the `compt_model` process\n .out_ds['compt_model__state']\n # only looking at the V compartment\n .loc[dict(compt='V')]\n # sum over the other dimensions, leaving one `age` dimension\n .sum(['risk', 'vertex'])\n # plot over time\n .plot.line(x='step', aspect=2, size=9))\n\n\n# In[ ]:\n\n\n(age_model\n # we want to track the `rate_V2I` variable from the `rate_V2I` process\n .out_ds['rate_V2I__rate_V2I']\n # sum over the other dimensions, leaving one `age` dimension\n .sum(['risk', 'vertex'])\n # plot over time\n .plot.line(x='step', aspect=2, size=9))\n\n\n# ## Package the Final Model\n# ----\n# \n# Now that we're finished with model development, we will package it into a model like `ExampleSIR` so that others can easily use it. This will involve writing a Python class (but not a \"process\") that contains:\n# - The model with its default processes. Most of the processes are the same as `ExampleSIR`\n# - Default parameter values\n# - Custom methods such as plotting\n# \n# In short, we will package all of our work thus far into a standardized format that makes it easy to reproduce. We could then distribute the model by putting the model in a [dedicated Python module](https://github.com/UT-Covid/episimlab/blob/main/episimlab/models/example_sirv.py#L215) in the Episimlab repository.\n\n# In[ ]:\n\n\nfrom episimlab.models import example_sir\n\n\n# In[ ]:\n\n\nclass ExampleSIRV(EpiModel):\n \"\"\"A short description of our new model goes here.\n \"\"\"\n AUTHORS = ('Ethan Ho <[email protected]>',)\n LICENCE = 'MIT'\n DOI = 'https://doi.org/10.5281/zenodo.591296'\n VERSION = '1.0.0'\n \n # Optional: include some tags so that future users\n # could sort by model metadata\n TAGS = ('SIRV', 'compartments::4')\n \n # Define all the processes in this model\n PROCESSES = {\n # Core processes\n 'compt_model': example_sir.ComptModel,\n 'setup_sto': example_sir.SetupStochasticFromToggle,\n 'setup_seed': example_sir.SeedGenerator,\n 'setup_coords': example_sir.SetupCoords,\n 'setup_state': example_sir.SetupState,\n 'setup_phi': example_sir.SetupPhi,\n 'setup_omega': example_sir.SetupOmega,\n\n # Edge weight processes from ExampleSIR\n 'rate_S2I': example_sir.FOI,\n 'rate_I2R': example_sir.RecoveryRate,\n \n # Distinct from ExampleSIR\n 'setup_compt_graph': CustomSetupComptGraph,\n 'rate_S2V': AgeScaledRateS2V,\n 'rate_V2I': RateV2I\n }\n \n # Define defaults that can be overwritten by user\n RUNNER_DEFAULTS = {\n 'clocks': {\n 'step': pd.date_range(start='3/1/2020', end='3/15/2020', freq='24H')\n },\n 'input_vars': {\n 'sto_toggle': 0, \n 'seed_entropy': 12345,\n 'beta': 0.08,\n 'gamma': 0.5,\n 'vacc_efficacy': 0.9,\n 'vacc_per_day': [0, 0, 5, 10, 10]\n },\n 'output_vars': {\n 'compt_model__state': 'step',\n 'rate_V2I__rate_V2I': 'step'\n }\n }\n \n # Define custom plotting methods\n def plot(self):\n \"\"\"Plot compartment populations over time.\"\"\"\n return (self\n .out_ds['compt_model__state']\n .sum(['age', 'risk', 'vertex'])\n .plot.line(x='step', aspect=2, size=9))\n \n def plot_vacc(self):\n \"\"\"Plot population of the vaccinated (V) compartment over time,\n stratified by age group.\n \"\"\"\n return (self\n .out_ds['compt_model__state']\n .loc[dict(compt='V')]\n .sum(['risk', 'vertex'])\n .plot.line(x='step', aspect=2, size=9))\n \n def plot_rate_V2I(self):\n \"\"\"Plot incident escape infections (`rate_V2I` over time),\n stratified by age group.\n \"\"\"\n return (self\n .out_ds['rate_V2I__rate_V2I']\n .sum(['risk', 'vertex'])\n .plot.line(x='step', aspect=2, size=9))\n\n\n# Now, running our SIRV model is as easy as:\n\n# In[ ]:\n\n\npackaged_model = ExampleSIRV()\npackaged_model.run(input_vars={\n # Optional: overwrite defaults\n 'vacc_per_day': [0, 0, 0, 5, 20]\n})\npackaged_model.plot_rate_V2I()\n\n" ]
[ [ "pandas.date_range" ] ]
matt-peters/apex
[ "28097c9999d86bd889a1f03c963e29e3384f3996" ]
[ "apex/amp/scaler.py" ]
[ "import torch\nfrom ..multi_tensor_apply import multi_tensor_applier\nfrom ._amp_state import _amp_state, master_params, maybe_print\nfrom itertools import product\n\ndef scale_check_overflow_python(model_grad, master_grad, scale, check_overflow=False):\n # Exception handling for 18.04 compatibility\n if check_overflow:\n cpu_sum = float(model_grad.float().sum())\n if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:\n return True\n\n if master_grad is not model_grad: # copy_ probably internally short-circuits this\n master_grad.copy_(model_grad)\n if scale != 1.0:\n master_grad.mul_(scale)\n return False\n\ndef axpby_check_overflow_python(model_grad, stashed_grad, master_grad, scale, check_overflow=False):\n # Exception handling for 18.04 compatibility\n if check_overflow:\n cpu_sum = float(model_grad.float().sum())\n if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:\n return True\n\n # if master_grad is not model_grad: # copy_ probably internally short-circuits this\n # master_grad.copy_(model_grad)\n assert stashed_grad.dtype == master_grad.dtype\n converted_model_grad = model_grad.to(master_grad.dtype)\n stashed_grad.add_(scale, converted_model_grad)\n master_grad.data = stashed_grad.data\n return False\n\nclass LossScaler(object):\n warned_no_fused_kernel = False\n warned_unscaling_non_fp32_grad = False\n has_fused_kernel = False\n\n def __init__(self,\n loss_scale,\n init_scale=2.**16,\n scale_factor=2.,\n scale_window=2000):\n if loss_scale == \"dynamic\":\n self.dynamic = True\n self._loss_scale = init_scale\n else:\n self.dynamic = False\n self._loss_scale = loss_scale\n self._max_loss_scale = 2.**24\n self._scale_seq_len = scale_window\n self._unskipped = 0\n self._has_overflow = False\n self._overflow_buf = torch.cuda.IntTensor([0])\n if multi_tensor_applier.available:\n import amp_C\n LossScaler.has_fused_kernel = multi_tensor_applier.available\n LossScaler.multi_tensor_scale_cuda = amp_C.multi_tensor_scale\n LossScaler.multi_tensor_axpby_cuda = amp_C.multi_tensor_axpby\n else:\n if not LossScaler.warned_no_fused_kernel:\n maybe_print(\n \"Warning: multi_tensor_applier fused unscale kernel is unavailable, \"\n \"possibly because apex was installed without --cuda_ext --cpp_ext. \"\n \"Using Python fallback. Original ImportError was: \" +\n repr(multi_tensor_applier.import_err),\n True)\n LossScaler.has_fused_kernel = False\n LossScaler.warned_no_fused_kernel = True\n\n def loss_scale(self):\n return self._loss_scale\n\n def unscale_python(self, model_grads, master_grads, scale):\n for model, master in zip(model_grads, master_grads):\n if model is not None:\n if not LossScaler.warned_unscaling_non_fp32_grad:\n if master.dtype != torch.float32:\n maybe_print(\n \"Attempting to unscale a grad with type {} \".format(master.type()) +\n \"Unscaling non-fp32 grads may indicate an error. \"\n \"When using Amp, you don't need to call .half() on your model.\")\n LossScaler.warned_unscaling_non_fp32_grad = True\n self._has_overflow = scale_check_overflow_python(model,\n master,\n 1./scale,\n self.dynamic)\n if self._has_overflow and self.dynamic:\n break\n\n # unused_scale keeps some of the old API alive for hopefully a short time.\n def unscale(self, model_grads, master_grads, unused_scale, models_are_masters=False):\n if self._has_overflow:\n return\n\n scale = self._loss_scale\n\n if scale == 1.0 and models_are_masters and not self.dynamic:\n return\n\n if LossScaler.has_fused_kernel:\n # if (not LossScaler.warned_unscaling_non_fp32_grad\n # and master_grads[0].dtype == torch.float16):\n # print(\"Warning: unscaling grads that are not FP32. \"\n # \"Unscaling non-fp32 grads may indicate an error. \"\n # \"When using Amp, you don't need to call .half() on your model.\")\n # # Setting this to True unconditionally allows the possibility of an escape\n # # if never-before-seen non-fp32 grads are created in some later iteration.\n # LossScaler.warned_unscaling_non_fp32_grad = True\n multi_tensor_applier(LossScaler.multi_tensor_scale_cuda,\n self._overflow_buf,\n [model_grads, master_grads],\n 1./scale)\n else:\n self.unscale_python(model_grads, master_grads, scale)\n\n # Defer to update_scale\n # If the fused kernel is available, we only need one D2H memcopy and sync.\n # if LossScaler.has_fused_kernel and self.dynamic and not self._has_overflow:\n # self._has_overflow = self._overflow_buf.item()\n\n def unscale_with_stashed_python(self,\n model_grads,\n stashed_master_grads,\n master_grads,\n scale):\n for model, stashed, master in zip(model_grads, stashed_master_grads, master_grads):\n if model is None and stashed is None:\n continue\n else:\n if not LossScaler.warned_unscaling_non_fp32_grad:\n if master.dtype != torch.float32:\n maybe_print(\n \"Attempting to unscale a grad with type {} \".format(master.type()) +\n \"Unscaling non-fp32 grads may indicate an error. \"\n \"When using Amp, you don't need to call .half() on your model.\")\n LossScaler.warned_unscaling_non_fp32_grad = True\n self._has_overflow = axpby_check_overflow_python(model,\n stashed,\n master,\n 1./scale,\n self.dynamic)\n if self._has_overflow and self.dynamic:\n break\n\n def unscale_with_stashed(self,\n model_grads,\n stashed_master_grads,\n master_grads):\n if self._has_overflow:\n return\n\n scale = self._loss_scale\n\n if LossScaler.has_fused_kernel:\n if (not LossScaler.warned_unscaling_non_fp32_grad\n and master_grads[0].dtype == torch.float16):\n print(\"Warning: unscaling grads that are not FP32. \"\n \"Unscaling non-fp32 grads may indicate an error. \"\n \"When using Amp, you don't need to call .half() on your model.\")\n # Setting this to True unconditionally allows the possibility of an escape\n # if never-before-seen non-fp32 grads are created in some later iteration.\n LossScaler.warned_unscaling_non_fp32_grad = True\n multi_tensor_applier(LossScaler.multi_tensor_axpby_cuda,\n self._overflow_buf,\n [model_grads, stashed_master_grads, master_grads],\n 1./scale,\n 1.0,\n 0) # check only arg 0, aka the incoming model grads, for infs\n else:\n self.unscale_with_stashed_python(model_grads,\n stashed_master_grads,\n master_grads,\n scale)\n\n # Defer to update_scale\n # If the fused kernel is available, we only need one D2H memcopy and sync.\n # if LossScaler.has_fused_kernel and self.dynamic and not self._has_overflow:\n # self._has_overflow = self._overflow_buf.item()\n\n def clear_overflow_state(self):\n self._has_overflow = False\n if self.has_fused_kernel:\n self._overflow_buf.zero_()\n\n # Separate so unscale() can be called more that once before updating.\n def update_scale(self):\n # If the fused kernel is available, we only need one D2H memcopy and sync.\n if LossScaler.has_fused_kernel and self.dynamic and not self._has_overflow:\n self._has_overflow = self._overflow_buf.item()\n\n if self._has_overflow and self.dynamic:\n should_skip = True\n self._loss_scale /= 2.\n self._unskipped = 0\n else:\n should_skip = False\n self._unskipped += 1\n\n if self._unskipped == self._scale_seq_len and self.dynamic:\n self._loss_scale = min(self._max_loss_scale, self._loss_scale * 2.)\n self._unskipped = 0\n\n return should_skip\n" ]
[ [ "torch.cuda.IntTensor" ] ]