code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import cv2
import numpy as np
img = cv2.imread("test1.jpg")
emptyImage = np.zeros(img.shape, np.uint8)
emptyImage2 = img.copy()
emptyImage3=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("EmptyImage3", emptyImage3)
cv2.waitKey (0)
cv2.destroyAllWindows()
|
[
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.zeros",
"cv2.imread",
"cv2.imshow"
] |
[((43, 66), 'cv2.imread', 'cv2.imread', (['"""test1.jpg"""'], {}), "('test1.jpg')\n", (53, 66), False, 'import cv2\n'), ((82, 111), 'numpy.zeros', 'np.zeros', (['img.shape', 'np.uint8'], {}), '(img.shape, np.uint8)\n', (90, 111), True, 'import numpy as np\n'), ((159, 196), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (171, 196), False, 'import cv2\n'), ((201, 239), 'cv2.imshow', 'cv2.imshow', (['"""EmptyImage3"""', 'emptyImage3'], {}), "('EmptyImage3', emptyImage3)\n", (211, 239), False, 'import cv2\n'), ((242, 256), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (253, 256), False, 'import cv2\n'), ((260, 283), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (281, 283), False, 'import cv2\n')]
|
#!/usr/bin/env python
#
# atlaspanel.py - The AtlasPanel class.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`AtlasPanel`, a *FSLeyes control* panel
which allows the user to browse the FSL atlas images. See the
:mod:`~fsleyes` package documentation for more details on control panels,
and the :mod:`.atlases` module for more details on the atlases available in
FSL.
"""
import logging
import numpy as np
import wx
import fsl.data.image as fslimage
import fsl.data.atlases as atlases
import fsl.data.constants as constants
import fsl.utils.idle as idle
import fsleyes_props as props
import fsleyes_widgets.notebook as notebook
import fsleyes_widgets.utils.status as status
import fsleyes.views.canvaspanel as canvaspanel
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.strings as strings
from . import atlasmanagementpanel
from . import atlasoverlaypanel
from . import atlasinfopanel
log = logging.getLogger(__name__)
class AtlasPanel(ctrlpanel.ControlPanel):
"""An ``AtlasPanel`` is a :class:`.ControlPanel` which allows the user to
view atlas information, and to browse through the atlases that come shipped
with FSL. The ``AtlasPanel`` interface is provided by some sub-panels,
which are displayed in a :class:`fsleyes_widgets.Notebook` panel. The
``AtlasPanel`` itself provides a number of convenience methods that are
used by these sub-panels:
============================== ===========================================
:class:`.AtlasInfoPanel` Displays information for the current
:attr:`.DisplayContext.location` from
atlases selected by the user.
:class:`.AtlasOverlayPanel` Allows the user to search through all
atlases for specific regions, and to toggle
on/off overlays for those regions.
:class:`.AtlasManagementPanel` Allows the user to add/remove atlases.
============================== ===========================================
**Loading atlases**
The :class:`AtlasPanel` class provides the :meth:`loadAtlas` method, which
is used by sub-panels to load atlas images.
.. _atlas-panel-atlas-overlays:
**Toggling atlas overlays**
Both of the sub-panels allow the user to add/remove overlays to/from the
:class:`.OverlayList`. The following overlay types can be added:
- A complete summary :class:`.LabelAtlas`, which is a 3D image where
each region has a discrete integer label. These images are added with
a :attr:`.Display.overlayType` of ``label``.
- A mask image containing a single region, extracted from a
:class:`.LabelAtlas`. These images are added with a
:attr:`.Display.overlayType` of ``mask``.
- A 3D image containing the statistic image for a single region,
extracted from a :class:`.StatisticAtlas`. These images are added
with a :attr:`.Display.overlayType` of ``volume``.
The following methods allow these overlays to be toggled on/off, and to
query their state:
.. autosummary::
:nosignatures:
toggleOverlay
getOverlayName
getOverlayState
.. _atlas-panel-overlay-names:
**Atlas overlay names**
When an atlas overlay is added, its :attr:`.Image.name` (and subsequently
its :attr:`.Display.name`) are set to a name which has the following
structure::
atlasID/overlayType/regionName
where:
- ``atlasID`` is the atlas identifier (see the :mod:`.atlases` module).
- ``overlayType`` is either ``label``, ``prob``, or ``stat``, depending on
whether the overlay is a discrete label image, a probaility image, or
a statistic image..
- ``regionName`` is the name of the region, or ``all`` if the overlay
is a complete :class:`.LabelAtlas`.
.. image:: images/atlaspanel_overlay_names.png
:scale: 50%
:align: center
This name is used by the ``AtlasPanel`` to identify the overlay in the
:class:`.OverlayList`.
.. warning:: If the name of these overlays is changed, the ``AtlasPanel``
will not be able to find them in the :class:`.OverlayList`,
and the :meth:`toggleOverlay` and :meth:`getOverlayState`
methods will stop working properly. So don't change the
atlas overlay names!
**Locating regions**
Finally, the :meth:`locateRegion` method allows the
:attr:`.DisplayContext.location` to be moved to the location of a specific
region in a specific atlas.
"""
@staticmethod
def supportedViews():
"""The ``MelodicClassificationPanel`` is restricted for use with
:class:`.OrthoPanel`, :class:`.LightBoxPanel` and
:class:`.Scene3DPanel` viewws.
"""
return [canvaspanel.CanvasPanel]
@staticmethod
def defaultLayout():
"""Returns a dictionary of arguments to be passed to the
:meth:`.ViewPanel.togglePanel` method.
"""
return {'location' : wx.BOTTOM}
def __init__(self, parent, overlayList, displayCtx, viewPanel):
"""Create an ``AtlasPanel``.
:arg parent: The :mod:`wx` parent object.
:arg overlayList: The :class:`.OverlayList` instance.
:arg displayCtx: The :class:`.DisplayContext` instance.
:arg viewPanel: The :class:`.ViewPanel` instance.
"""
ctrlpanel.ControlPanel.__init__(
self, parent, overlayList, displayCtx, viewPanel)
# Make sure the atlas
# registry is up to date
atlases.rescanAtlases()
# See the enableAtlasPanel method
# for info about this attribute.
self.__atlasPanelEnableStack = 0
# Cache of loaded atlases
# and enabled atlas overlays.
self.__enabledOverlays = {}
self.__loadedAtlases = {}
self.__notebook = notebook.Notebook(self)
self.__sizer = wx.BoxSizer(wx.HORIZONTAL)
self.__sizer.Add(self.__notebook, flag=wx.EXPAND, proportion=1)
self.SetSizer(self.__sizer)
self.__infoPanel = atlasinfopanel.AtlasInfoPanel(
self.__notebook, overlayList, displayCtx, self)
# Overlay panel, containing a list of regions,
# allowing the user to add/remove overlays
self.__overlayPanel = atlasoverlaypanel.AtlasOverlayPanel(
self.__notebook, overlayList, displayCtx, self)
self.__managePanel = atlasmanagementpanel.AtlasManagementPanel(
self.__notebook, overlayList, displayCtx, self)
self.__notebook.AddPage(self.__infoPanel,
strings.titles[self.__infoPanel])
self.__notebook.AddPage(self.__overlayPanel,
strings.titles[self.__overlayPanel])
self.__notebook.AddPage(self.__managePanel,
strings.titles[self.__managePanel])
self.overlayList.addListener('overlays',
self.name,
self.__overlayListChanged)
self.Layout()
self.SetMinSize(self.__sizer.GetMinSize())
def destroy(self):
"""Must be called on when this ``AtlasPanel`` is no longer needed.
Calls the ``destroy`` methods of the :class:`.AtlasInfoPanel` and
:class:`.AtlasOverlayPanel`, and then calls
:meth:`.ControlPanel.destroy`.
"""
self.__loadedAtlases = None
self.__enabledOverlays = None
self.__infoPanel .destroy()
self.__overlayPanel .destroy()
self.__managePanel .destroy()
self.overlayList.removeListener('overlays', self.name)
ctrlpanel.ControlPanel.destroy(self)
def Enable(self, enable=True):
"""Enables/disables this ``AtlasPanel``. """
self.__infoPanel .Enable(enable)
self.__overlayPanel.Enable(enable)
self.__managePanel .Enable(enable)
def Disable(self):
"""Disables this ``AtlasPanel``. """
self.Enable(False)
def enableAtlasPanel(self, enable=True):
"""Disables/enables the :class:`.AtlasPanel` which contains this
``AtlasOverlayPanel``. This method is used by
:class:`OverlayListWidget` instances.
This method keeps a count of the number of times that it has been
called - the count is increased every time a request is made
to disable the ``AtlasPanel``, and decreased on requests to
enable it. The ``AtlasPanel`` is only enabled when the count
reaches 0.
This ugly method solves an awkward problem - the ``AtlasOverlayPanel``
disables the ``AtlasPanel`` when an atlas overlay is toggled on/off
(via an ``OverlayListWidget``), and when an atlas region list is being
generated (via the :meth:`__onAtlasSelect` method). If both of these
things occur at the same time, the ``AtlasPanel`` could be prematurely
re-enabled. This method overcomes this problem.
"""
count = self.__atlasPanelEnableStack
log.debug('enableAtlasPanel({}, count={})'.format(enable, count))
if enable:
count -= 1
if count <= 0:
count = 0
self.Enable()
else:
count += 1
self.Disable()
self.__atlasPanelEnableStack = count
def loadAtlas(self,
atlasID,
summary,
onLoad=None,
onError=None,
matchResolution=True):
"""Loads the atlas image with the specified ID. The atlas is loaded
asynchronously (via the :mod:`.idle` module), as it can take some
time. Use the `onLoad` argument if you need to do something when the
atlas has been loaded.
:arg onLoad: Optional. A function which is called when the
atlas has been loaded, and which is passed the
loaded :class:`.Atlas` image.
:arg onError: Optional. A function which is called if the
atlas loading job raises an error. Passed the
``Exception`` that was raised.
:arg matchResolution: If ``True`` (the default), the version of the
atlas with the most suitable resolution, based
on the current contents of the
:class:`.OverlayList`, is loaded.
See the :func:`.atlases.loadAtlas` function for details on the other
arguments.
"""
# Get the atlas description, and the
# most suitable resolution to load.
desc = atlases.getAtlasDescription(atlasID)
res = self.__getSuitableResolution(desc, matchResolution)
if desc.atlasType == 'label':
summary = True
atlas = self.__loadedAtlases.get((atlasID, summary, res), None)
if atlas is None:
log.debug('Loading atlas {}/{}'.format(
atlasID, 'label' if summary else 'prob'))
status.update('Loading atlas {}...'.format(atlasID), timeout=None)
def load():
# the panel might get destroyed
# before this function is called
if self.destroyed:
return
atlas = atlases.loadAtlas(atlasID, summary, resolution=res)
# The atlas panel may be destroyed
# before the atlas is loaded.
if not self or self.destroyed:
return
self.__loadedAtlases[atlasID, summary, res] = atlas
status.update('Atlas {} loaded.'.format(atlasID))
if onLoad is not None:
idle.idle(onLoad, atlas)
idle.run(load, onError=onError)
# If the atlas has already been loaded,
# pass it straight to the onload function
elif onLoad is not None:
onLoad(atlas)
def __getSuitableResolution(self, desc, matchResolution=True):
"""Used by the :meth:`loadAtlas` method. Determines a suitable
atlas resolution to load, based on the current contents of the
:class:`.OverlayList`.
"""
niftis = [o for o in self.overlayList
if (isinstance(o, fslimage.Nifti) and
o.getXFormCode() == constants.NIFTI_XFORM_MNI_152)]
# No overlays to match resolution against
if len(niftis) == 0:
matchResolution = False
# If we don't need to match resolution,
# return the highest available resolution
# (the lowest value).
if not matchResolution:
return np.concatenate(desc.pixdims).min()
# Find the highest resolution
# in the overlay list
pixdims = [o.pixdim[:3] for o in niftis]
res = np.concatenate(pixdims).min()
# identify the atlas with the
# nearest resolution to the
# requested resolution
reses = np.concatenate(desc.pixdims)
res = reses[np.argmin(np.abs(reses - res))]
return res
def getOverlayName(self, atlasID, labelIdx, summary):
"""Returns a name to be used for the specified atlas (see the section
on :ref:`atlas names <atlas-panel-overlay-names>`).
:arg atlasID: Atlas identifier
:arg labelIdx: Label index, or ``None`` for a complete atlas.
:arg summary: ``True`` corresponds to a label atlas, ``False`` to a
probabilistic atlas.
"""
atlasDesc = atlases.getAtlasDescription(atlasID)
if atlasDesc.atlasType == 'summary' or labelIdx is None:
summary = True
if summary: overlayType = 'label'
else: overlayType = 'prob'
if labelIdx is None:
overlayName = '{}/{}/all'.format(atlasID, overlayType)
else:
overlayName = '{}/{}/{}' .format(atlasID,
overlayType,
atlasDesc.labels[labelIdx].name)
return overlayName, summary
def getOverlayState(self, atlasID, labelIdx, summary):
"""Returns ``True`` if the specified atlas overlay is in the
:class:`.OverlayList`, ``False`` otherwise. See
:meth:`getOverlayName` for details on the arguments.
"""
name, _ = self.getOverlayName(atlasID, labelIdx, summary)
return self.overlayList.find(name) is not None
def toggleOverlay(self,
atlasID,
labelIdx,
summary,
onLoad=None,
onError=None):
"""Adds or removes the specified overlay to/from the
:class:`.OverlayList`.
:arg onLoad: Optional function to be called when the overlay has been
added/removed.
:arg onError: Optional function to be called if an error occurs while
loading an overlay.
See :meth:`getOverlayName` for details on the other arguments.
"""
atlasDesc = atlases.getAtlasDescription(atlasID)
overlayName, summary = self.getOverlayName(atlasID, labelIdx, summary)
overlay = self.overlayList.find(overlayName)
if overlay is not None:
self.overlayList.disableListener('overlays', self.name)
self.overlayList.remove(overlay)
self.overlayList.enableListener('overlays', self.name)
self.__enabledOverlays.pop(overlayName, None)
self.__overlayPanel.setOverlayState(
atlasDesc, labelIdx, summary, False)
log.debug('Removed overlay {}'.format(overlayName))
if onLoad is not None:
onLoad()
return
def realOnLoad(atlas):
initprops = {}
# label image
if labelIdx is None:
overlay = fslimage.Image(atlas)
initprops['overlayType'] = 'label'
else:
# regional label image
if summary:
overlay = atlas.get(index=labelIdx, binary=False)
initprops['overlayType'] = 'mask'
initprops['colour'] = np.random.random(3)
# regional statistic/probability image
else:
overlay = atlas.get(index=labelIdx)
initprops['overlayType'] = 'volume'
initprops['cmap'] = 'hot'
initprops['displayRange'] = (atlasDesc.lower,
atlasDesc.upper)
initprops['clippingRange'] = (atlasDesc.lower,
atlasDesc.upper)
overlay.name = overlayName
with props.suppress(self.overlayList, 'overlays', self.name):
self.overlayList.append(overlay, **initprops)
self.__overlayPanel.setOverlayState(
atlasDesc, labelIdx, summary, True)
self.__enabledOverlays[overlayName] = (overlay,
atlasID,
labelIdx,
summary)
log.debug('Added overlay {}'.format(overlayName))
if onLoad is not None:
onLoad()
self.loadAtlas(atlasID, summary, onLoad=realOnLoad, onError=onError)
def locateRegion(self, atlasID, labelIdx):
"""Moves the :attr:`.DisplayContext.location` to the specified
region in the specified atlas. See the :class:`.AtlasDescription`
class for details on atlas identifiers/label indices.
:arg atlasID: Atlas identifier
:arg labelIdx: Label index
"""
atlasDesc = atlases.getAtlasDescription(atlasID)
label = atlasDesc.labels[labelIdx]
overlay = self.displayCtx.getReferenceImage(
self.displayCtx.getSelectedOverlay())
if overlay is None:
log.warn('No reference image available - cannot locate region')
opts = self.displayCtx.getOpts(overlay)
worldLoc = (label.x, label.y, label.z)
dispLoc = opts.transformCoords([worldLoc], 'world', 'display')[0]
self.displayCtx.location.xyz = dispLoc
def __overlayListChanged(self, *a):
"""Called when the :class:`.OverlayList` changes.
Makes sure that the :class:`.AtlasOverlayPanel` state is up to date -
see the :meth:`.AtlasOverlayPanel.setOverlayState` method.
"""
for overlayName in list(self.__enabledOverlays.keys()):
overlay, atlasID, labelIdx, summary = \
self.__enabledOverlays[overlayName]
if overlay not in self.overlayList:
self.__enabledOverlays.pop(overlayName)
atlasDesc = atlases.getAtlasDescription(atlasID)
self.__overlayPanel.setOverlayState(
atlasDesc, labelIdx, summary, False)
|
[
"fsleyes.controls.controlpanel.ControlPanel.destroy",
"fsleyes.controls.controlpanel.ControlPanel.__init__",
"wx.BoxSizer",
"numpy.concatenate",
"numpy.abs",
"fsl.utils.idle.idle",
"fsl.data.atlases.getAtlasDescription",
"fsleyes_props.suppress",
"fsl.data.atlases.loadAtlas",
"fsl.data.image.Image",
"fsl.data.atlases.rescanAtlases",
"numpy.random.random",
"fsl.utils.idle.run",
"fsleyes_widgets.notebook.Notebook",
"logging.getLogger"
] |
[((1125, 1152), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1142, 1152), False, 'import logging\n'), ((5725, 5810), 'fsleyes.controls.controlpanel.ControlPanel.__init__', 'ctrlpanel.ControlPanel.__init__', (['self', 'parent', 'overlayList', 'displayCtx', 'viewPanel'], {}), '(self, parent, overlayList, displayCtx,\n viewPanel)\n', (5756, 5810), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((5892, 5915), 'fsl.data.atlases.rescanAtlases', 'atlases.rescanAtlases', ([], {}), '()\n', (5913, 5915), True, 'import fsl.data.atlases as atlases\n'), ((6213, 6236), 'fsleyes_widgets.notebook.Notebook', 'notebook.Notebook', (['self'], {}), '(self)\n', (6230, 6236), True, 'import fsleyes_widgets.notebook as notebook\n'), ((6261, 6287), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (6272, 6287), False, 'import wx\n'), ((8025, 8061), 'fsleyes.controls.controlpanel.ControlPanel.destroy', 'ctrlpanel.ControlPanel.destroy', (['self'], {}), '(self)\n', (8055, 8061), True, 'import fsleyes.controls.controlpanel as ctrlpanel\n'), ((11077, 11113), 'fsl.data.atlases.getAtlasDescription', 'atlases.getAtlasDescription', (['atlasID'], {}), '(atlasID)\n', (11104, 11113), True, 'import fsl.data.atlases as atlases\n'), ((13446, 13474), 'numpy.concatenate', 'np.concatenate', (['desc.pixdims'], {}), '(desc.pixdims)\n', (13460, 13474), True, 'import numpy as np\n'), ((14013, 14049), 'fsl.data.atlases.getAtlasDescription', 'atlases.getAtlasDescription', (['atlasID'], {}), '(atlasID)\n', (14040, 14049), True, 'import fsl.data.atlases as atlases\n'), ((15589, 15625), 'fsl.data.atlases.getAtlasDescription', 'atlases.getAtlasDescription', (['atlasID'], {}), '(atlasID)\n', (15616, 15625), True, 'import fsl.data.atlases as atlases\n'), ((18390, 18426), 'fsl.data.atlases.getAtlasDescription', 'atlases.getAtlasDescription', (['atlasID'], {}), '(atlasID)\n', (18417, 18426), True, 'import fsl.data.atlases as atlases\n'), ((12206, 12237), 'fsl.utils.idle.run', 'idle.run', (['load'], {'onError': 'onError'}), '(load, onError=onError)\n', (12214, 12237), True, 'import fsl.utils.idle as idle\n'), ((11748, 11799), 'fsl.data.atlases.loadAtlas', 'atlases.loadAtlas', (['atlasID', 'summary'], {'resolution': 'res'}), '(atlasID, summary, resolution=res)\n', (11765, 11799), True, 'import fsl.data.atlases as atlases\n'), ((13294, 13317), 'numpy.concatenate', 'np.concatenate', (['pixdims'], {}), '(pixdims)\n', (13308, 13317), True, 'import numpy as np\n'), ((13507, 13526), 'numpy.abs', 'np.abs', (['(reses - res)'], {}), '(reses - res)\n', (13513, 13526), True, 'import numpy as np\n'), ((16435, 16456), 'fsl.data.image.Image', 'fslimage.Image', (['atlas'], {}), '(atlas)\n', (16449, 16456), True, 'import fsl.data.image as fslimage\n'), ((17359, 17414), 'fsleyes_props.suppress', 'props.suppress', (['self.overlayList', '"""overlays"""', 'self.name'], {}), "(self.overlayList, 'overlays', self.name)\n", (17373, 17414), True, 'import fsleyes_props as props\n'), ((19469, 19505), 'fsl.data.atlases.getAtlasDescription', 'atlases.getAtlasDescription', (['atlasID'], {}), '(atlasID)\n', (19496, 19505), True, 'import fsl.data.atlases as atlases\n'), ((12168, 12192), 'fsl.utils.idle.idle', 'idle.idle', (['onLoad', 'atlas'], {}), '(onLoad, atlas)\n', (12177, 12192), True, 'import fsl.utils.idle as idle\n'), ((13123, 13151), 'numpy.concatenate', 'np.concatenate', (['desc.pixdims'], {}), '(desc.pixdims)\n', (13137, 13151), True, 'import numpy as np\n'), ((16766, 16785), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (16782, 16785), True, 'import numpy as np\n')]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines testing utility functions."""
import jax.test_util as jtu
import jax.numpy as jnp
import numpy as onp
from jax.config import config as jax_config
from jax_md import dataclasses
FLAGS = jax_config.FLAGS
f32 = jnp.float32
def update_test_tolerance(f32_tolerance=None, f64_tolerance=None):
if f32_tolerance is not None:
jtu._default_tolerance[onp.dtype(onp.float32)] = f32_tolerance
if f64_tolerance is not None:
jtu._default_tolerance[onp.dtype(onp.float64)] = f64_tolerance
def default_tolerance():
if jtu.device_under_test() != 'tpu':
return jtu._default_tolerance
tol = jtu._default_tolerance.copy()
tol[onp.dtype(onp.float32)] = 5e-2
return tol
jtu.default_tolerance = default_tolerance
def _load_silica_data(filename: str) -> jnp.ndarray:
filename = FLAGS.test_srcdir + filename
with open(filename, 'rb') as f:
return jnp.array(onp.load(f))
def load_silica_data() -> jnp.ndarray:
try:
filename = 'tests/data/silica_positions.npy'
return _load_silica_data(filename)
except FileNotFoundError:
filename = '/google3/third_party/py/jax_md/tests/data/silica_positions.npy'
return _load_silica_data(filename)
@dataclasses.dataclass
class JammedTestState:
fractional_position: jnp.ndarray
real_position: jnp.ndarray
species: jnp.ndarray
sigma: jnp.ndarray
box: jnp.ndarray
energy: jnp.ndarray
pressure: jnp.ndarray
def _load_jammed_state(filename: str, dtype) -> JammedTestState:
filename = FLAGS.test_srcdir + filename
with open(filename, 'rb') as f:
return JammedTestState(
fractional_position=onp.load(f).astype(dtype),
real_position=onp.load(f).astype(dtype),
species=onp.load(f),
sigma=onp.load(f).astype(dtype),
box=onp.load(f).astype(dtype),
energy=onp.load(f).astype(dtype),
pressure=onp.load(f).astype(dtype), # pytype: disable=wrong-keyword-args
)
def load_jammed_state(filename: str, dtype) -> JammedTestState:
try:
full_filename = f'tests/data/{filename}'
return _load_jammed_state(full_filename, dtype)
except FileNotFoundError:
full_filename = f'/google3/third_party/py/jax_md/tests/data/{filename}'
return _load_jammed_state(full_filename, dtype)
def load_lammps_stress_data(dtype):
def parse_state(filename):
with open(filename) as f:
data = f.read()
data = data.split('\n')
t = int(data[1])
n = int(data[3])
box = float(data[5].split(' ')[-1])
R = []
V = []
for l in data[9:-1]:
R += [[float(xx) for xx in l.split(' ')[:3]]]
V += [[float(xx) for xx in l.split(' ')[3:]]]
return f32(box), jnp.array(R, dtype), jnp.array(V, dtype)
def parse_results(filename):
with open(filename) as f:
data = f.read()
data = [[float(dd) for dd in d.split(' ') if dd != ' ' and dd != '']
for d in data.split('\n')]
step = jnp.array([int(d[0]) for d in data if len(d) > 0])
Es = jnp.array([d[1] for d in data if len(d) > 0], dtype)
C = jnp.array([d[2:] for d in data if len(d) > 0], dtype)
C = jnp.array([[C[0, 0], C[0, 3], C[0, 4]],
[C[0, 3], C[0, 1], C[0, 5]],
[C[0, 4], C[0, 5], C[0, 2]]], dtype)
return Es[0], C
try:
return (parse_state('tests/data/lammps_lj_stress_test_states'),
parse_results('tests/data/lammps_lj_stress_test'))
except FileNotFoundError:
return (parse_state('/google3/third_party/py/jax_md/tests/data/'
'lammps_lj_stress_test'),
parse_results('/google3/third_party/py/jax_md/tests/data/'
'lammps_lj_stress'))
|
[
"jax.numpy.array",
"numpy.load",
"numpy.dtype",
"jax.test_util._default_tolerance.copy",
"jax.test_util.device_under_test"
] |
[((1191, 1220), 'jax.test_util._default_tolerance.copy', 'jtu._default_tolerance.copy', ([], {}), '()\n', (1218, 1220), True, 'import jax.test_util as jtu\n'), ((3660, 3770), 'jax.numpy.array', 'jnp.array', (['[[C[0, 0], C[0, 3], C[0, 4]], [C[0, 3], C[0, 1], C[0, 5]], [C[0, 4], C[0, 5\n ], C[0, 2]]]', 'dtype'], {}), '([[C[0, 0], C[0, 3], C[0, 4]], [C[0, 3], C[0, 1], C[0, 5]], [C[0, \n 4], C[0, 5], C[0, 2]]], dtype)\n', (3669, 3770), True, 'import jax.numpy as jnp\n'), ((938, 960), 'numpy.dtype', 'onp.dtype', (['onp.float32'], {}), '(onp.float32)\n', (947, 960), True, 'import numpy as onp\n'), ((1037, 1059), 'numpy.dtype', 'onp.dtype', (['onp.float64'], {}), '(onp.float64)\n', (1046, 1059), True, 'import numpy as onp\n'), ((1111, 1134), 'jax.test_util.device_under_test', 'jtu.device_under_test', ([], {}), '()\n', (1132, 1134), True, 'import jax.test_util as jtu\n'), ((1229, 1251), 'numpy.dtype', 'onp.dtype', (['onp.float32'], {}), '(onp.float32)\n', (1238, 1251), True, 'import numpy as onp\n'), ((1471, 1482), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (1479, 1482), True, 'import numpy as onp\n'), ((3229, 3248), 'jax.numpy.array', 'jnp.array', (['R', 'dtype'], {}), '(R, dtype)\n', (3238, 3248), True, 'import jax.numpy as jnp\n'), ((3250, 3269), 'jax.numpy.array', 'jnp.array', (['V', 'dtype'], {}), '(V, dtype)\n', (3259, 3269), True, 'import jax.numpy as jnp\n'), ((2279, 2290), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2287, 2290), True, 'import numpy as onp\n'), ((2187, 2198), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2195, 2198), True, 'import numpy as onp\n'), ((2236, 2247), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2244, 2247), True, 'import numpy as onp\n'), ((2306, 2317), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2314, 2317), True, 'import numpy as onp\n'), ((2345, 2356), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2353, 2356), True, 'import numpy as onp\n'), ((2387, 2398), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2395, 2398), True, 'import numpy as onp\n'), ((2431, 2442), 'numpy.load', 'onp.load', (['f'], {}), '(f)\n', (2439, 2442), True, 'import numpy as onp\n')]
|
# Version 1.0.0 Released: 14/11/21
# <NAME>
# <EMAIL>
# License Apache 2.0
# ==================================================================================================================================================================================
#LaharZ v0.3 - working
#Laharz v0.4 - temporary version - not tested
#Laharz v0.5 - based on LaharZ3 - working
#Laharz v0.6 - using points, volume and total summaries - working
#Laharz v0.7 - rework section area - working
#Laharz v0.8 - add graphic of crosssection, working but tidied up parameters
#Laharz v0.9 - add reading of tif files directly - not finished
#Laharz v0.10 - add reading of tif files directly and adding in pyproj
#Laharz v0.11 - general tidy up and restructure
#Laharz v0.12 - moving to use QGIS r.stream.extract for thalwegs and flow direction. Dropping Accumulation file. Dropping Channel Paths. Removing old routines replaced by pyproj.
#Laharz v0.13 - adding in projection of landscape and energy cone. General tidy up
#Laharz v0.14 - adding in screen for parameters. Intermediate version
#Laharz v0.15 - adding in screen for parameters
#Laharz v0.16 - minor fixes; scroll bars for window
#Laharz v1.0.0 - first public release
#==================================================================================================================================================================================
#imports
from PIL import Image, ImageDraw, ImageFont
from rasterio.rio.helpers import resolve_inout
from scipy.ndimage import binary_erosion, binary_fill_holes
import csv
import datetime
import gmsh
import numpy as np
import os
import pickle
import pyproj
import rasterio as rio
import simplekml
import sys
import sys
import tkinter as tk
from tkinter import ttk
# import tqdm
# Global ##########################################################################################################
#global dem_f, dem_crs
# Classes ##########################################################################################################
# XMing must be running
class Application(tk.Frame):
"""Main Application Frame"""
pwdir = 'Working Directory'
pdem_fn = 'dem.tif'
pthal_fn = 'stream.tif'
pflow_fn = 'flow.tif'
pvolume = '1e5, 1e6'
phlratio = 0.2
ppeak_entry = '16.05, 61.66'
phprange = 5000
psealevel = 0
plahar_dir = 'lahar'
pinitpoints_fn = 'initpoints.csv'
plog_fn = 'log.txt'
pecraw_fn = 'ecraw.tif'
pecfilled_fn = 'ecfilled.tif'
pecline_fn = 'ecline.tif'
pplotmesh = False
pmesh_dir = 'mesh'
pmeshres = 200
pmeshsize = 1.3
pcalcinitpoints = 'calc'
puserowcol = 'True'
piponly = 'n'
pwdir_prev = 'Guad'
pdem_fn_prev = 'dem.tif'
pthal_fn_prev = 'stream.tif'
pflow_fn_prev = 'flow.tif'
pvolume_prev = '1e5, 1e6'
phlratio_prev = 0.2
ppeak_entry_prev = '16.05, 61.66'
phprange_prev = 5000
psealevel_prev = 0
plahar_dir_prev = 'lahar'
pinitpoints_fn_prev = 'initpoints.csv'
plog_fn_prev = 'log.txt'
pecraw_fn_prev = 'ecraw.tif'
pecfilled_fn_prev = 'ecfilled.tif'
pecline_fn_prev = 'ecline.tif'
pplotmesh_prev = False
pmesh_dir_prev = 'mesh'
pmeshres_prev = 200
pmeshsize_prev = 1.3
pcalcinitpoints_prev = 'calc'
puserowcol_prev = 'True'
piponly_prev = 'n'
pparameters = {}
def __init__(self, master):
"""initialise the frame"""
tk.Frame.__init__(self, master)
# self.canvas = tk.Canvas(self, borderwidth=0, background="#ffffff")
self.canvas = tk.Canvas(self, borderwidth=0)
# self.frame = tk.Frame(self.canvas, background="#ffffff")
self.frame = tk.Frame(self.canvas)
self.vsb = tk.Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((20,20), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.onFrameConfigure)
self.create_widgets()
def create_widgets(self):
# LaharZ
tk.Label(self.frame, text='LaharZ', font=('Helvetica', 14, 'bold')).grid(row=0, column=0, columnspan=2, sticky='W')
# Blank line
tk.Label(self.frame, text='', font=('Helvetica', 12)).grid(row=1, column=0, columnspan=2, sticky='W')
# Working Directory
tk.Label(self.frame, text='Working Directory', font=('Helvetica', 12)).grid(row=2, column=0, columnspan=2, sticky='W')
self.tk_pwdir = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pwdir.grid(row=2, column=3, columnspan=2, sticky='W')
self.tk_pwdir.insert(0, self.pwdir)
self.tk_pwdir_msg = tk.Label(self.frame, text='Working directory for this run. Should contain the input files. ', font=('Helvetica', 12))
self.tk_pwdir_msg.grid(row=2, column=9, columnspan=4, sticky='W')
# Load Parameters
tk.Label(self.frame, text='Load Parameters', font=('Helvetica', 12)).grid(row=3, column=0, columnspan=2, sticky='W')
self.tk_pload_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pload_fn.grid(row=3, column=3, columnspan=2, sticky='W')
self.tk_pload_fn.insert(0, "parameters.pickle")
self.tk_pload_fn_msg = tk.Label(self.frame, text='File name of where you load the parameters from, if you wish to ', font=('Helvetica', 12))
self.tk_pload_fn_msg.grid(row=3, column=9, columnspan=4, sticky='W')
# Load
self.tk_load_params = tk.Button(self.frame, text='Load', font=('Helvetica', 12))
self.tk_load_params.grid(row=3, column=6, columnspan=2, sticky='W')
self.tk_load_params['command'] = self.load_params
# Blank line
tk.Label(self.frame, text='', font=('Helvetica', 12)).grid(row=4, column=0, columnspan=2, sticky='W')
# Inputs
tk.Label(self.frame, text='Inputs', font=('Helvetica', 14, 'bold')).grid(row=5, column=0, columnspan=2, sticky='W')
# DEM File
tk.Label(self.frame, text='DEM File', font=('Helvetica', 12)).grid(row=6, column=0, columnspan=2, sticky='W')
self.tk_pdem_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pdem_fn.grid(row=6, column=3, columnspan=2, sticky='W')
self.tk_pdem_fn.insert(0, self.pdem_fn)
self.tk_pdem_fn_msg = tk.Label(self.frame, text='Name of your DEM file in your working directory ', font=('Helvetica', 12))
self.tk_pdem_fn_msg.grid(row=6, column=9, columnspan=4, sticky='W')
# Stream File
tk.Label(self.frame, text='Stream File', font=('Helvetica', 12)).grid(row=7, column=0, columnspan=2, sticky='W')
self.tk_pthal_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pthal_fn.grid(row=7, column=3, columnspan=2, sticky='W')
self.tk_pthal_fn.insert(0, self.pthal_fn)
self.tk_pthal_fn_msg = tk.Label(self.frame, text='Name of your Stream file in your working directory ', font=('Helvetica', 12))
self.tk_pthal_fn_msg.grid(row=7, column=9, columnspan=4, sticky='W')
# Flow File
tk.Label(self.frame, text='Flow File', font=('Helvetica', 12)).grid(row=8, column=0, columnspan=2, sticky='W')
self.tk_pflow_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pflow_fn.grid(row=8, column=3, columnspan=2, sticky='W')
self.tk_pflow_fn.insert(0, self.pflow_fn)
self.tk_pflow_fn_msg = tk.Label(self.frame, text='Name of your Flow file in your working directory ', font=('Helvetica', 12))
self.tk_pflow_fn_msg.grid(row=8, column=9, columnspan=4, sticky='W')
# Volume
tk.Label(self.frame, text='Volume', font=('Helvetica', 12)).grid(row=9, column=0, columnspan=2, sticky='W')
self.tk_pvolume = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pvolume.grid(row=9, column=3, columnspan=2, sticky='W')
self.tk_pvolume.insert(0, self.pvolume)
self.tk_pvolume_msg = tk.Label(self.frame, text='Volumes (m^3) in a list separated by commas ', font=('Helvetica', 12))
self.tk_pvolume_msg.grid(row=9, column=9, columnspan=4, sticky='W')
# H/L Ratio
tk.Label(self.frame, text='H/L Ratio', font=('Helvetica', 12)).grid(row=10, column=0, columnspan=2, sticky='W')
self.tk_phlratio = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_phlratio.grid(row=10, column=3, columnspan=2, sticky='W')
self.tk_phlratio.insert(0, self.phlratio)
self.tk_phlratio_msg = tk.Label(self.frame, text='H/L Ratios normally between 0.2 and 0.3 ', font=('Helvetica', 12))
self.tk_phlratio_msg.grid(row=10, column=9, columnspan=4, sticky='W')
# Peak
tk.Label(self.frame, text='Peak', font=('Helvetica', 12)).grid(row=11, column=0, columnspan=2, sticky='W')
self.tk_ppeak_entry = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_ppeak_entry.grid(row=11, column=3, columnspan=2, sticky='W')
self.tk_ppeak_entry.insert(0, self.ppeak_entry)
self.tk_ppeak_entry_msg = tk.Label(self.frame, text='Approximate latitude and longitude of the peak ', font=('Helvetica', 12))
self.tk_ppeak_entry_msg.grid(row=11, column=9, columnspan=4, sticky='W')
# Search Diagonal
tk.Label(self.frame, text='Search Diagonal', font=('Helvetica', 12)).grid(row=12, column=0, columnspan=2, sticky='W')
self.tk_phprange = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_phprange.grid(row=12, column=3, columnspan=2, sticky='W')
self.tk_phprange.insert(0, self.phprange)
self.tk_phprange_msg = tk.Label(self.frame, text='Length of search diagonal in m ', font=('Helvetica', 12))
self.tk_phprange_msg.grid(row=12, column=9, columnspan=4, sticky='W')
# Sea Level
tk.Label(self.frame, text='Sea Level', font=('Helvetica', 12)).grid(row=13, column=0, columnspan=2, sticky='W')
self.tk_psealevel = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_psealevel.grid(row=13, column=3, columnspan=2, sticky='W')
self.tk_psealevel.insert(0, self.psealevel)
self.tk_psealevel_msg = tk.Label(self.frame, text='Sea Level in m ', font=('Helvetica', 12))
self.tk_psealevel_msg.grid(row=13, column=9, columnspan=4, sticky='W')
# Blank line
tk.Label(self.frame, text='', font=('Helvetica', 12)).grid(row=14, column=0, columnspan=2, sticky='W')
# Outputs
tk.Label(self.frame, text='Outputs', font=('Helvetica', 14, 'bold')).grid(row=15, column=0, columnspan=2, sticky='W')
# Lahar Directory
tk.Label(self.frame, text='Lahar Directory', font=('Helvetica', 12)).grid(row=16, column=0, columnspan=2, sticky='W')
self.tk_plahar_dir = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_plahar_dir.grid(row=16, column=3, columnspan=2, sticky='W')
self.tk_plahar_dir.insert(0, self.plahar_dir)
self.tk_plahar_dir_msg = tk.Label(self.frame, text='Directory which contains the lahar files in your working directory ', font=('Helvetica', 12))
self.tk_plahar_dir_msg.grid(row=16, column=9, columnspan=4, sticky='W')
# Initiation Points
tk.Label(self.frame, text='Initiation Points', font=('Helvetica', 12)).grid(row=17, column=0, columnspan=2, sticky='W')
self.tk_pinitpoints_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pinitpoints_fn.grid(row=17, column=3, columnspan=2, sticky='W')
self.tk_pinitpoints_fn.insert(0, self.pinitpoints_fn)
self.tk_pinitpoints_fn_msg = tk.Label(self.frame, text='File name of the initiation points ', font=('Helvetica', 12))
self.tk_pinitpoints_fn_msg.grid(row=17, column=9, columnspan=4, sticky='W')
# Log File
tk.Label(self.frame, text='Log File', font=('Helvetica', 12)).grid(row=18, column=0, columnspan=2, sticky='W')
self.tk_plog_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_plog_fn.grid(row=18, column=3, columnspan=2, sticky='W')
self.tk_plog_fn.insert(0, self.plog_fn)
self.tk_plog_fn_msg = tk.Label(self.frame, text='File name of the log of all details ', font=('Helvetica', 12))
self.tk_plog_fn_msg.grid(row=18, column=9, columnspan=4, sticky='W')
# Raw Energy Cone
tk.Label(self.frame, text='Raw Energy Cone', font=('Helvetica', 12)).grid(row=19, column=0, columnspan=2, sticky='W')
self.tk_pecraw_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pecraw_fn.grid(row=19, column=3, columnspan=2, sticky='W')
self.tk_pecraw_fn.insert(0, self.pecraw_fn)
self.tk_pecraw_fn_msg = tk.Label(self.frame, text='File name of the Raw Energy Cone ', font=('Helvetica', 12))
self.tk_pecraw_fn_msg.grid(row=19, column=9, columnspan=4, sticky='W')
# Filled Energy Cone
tk.Label(self.frame, text='Filled Energy Cone', font=('Helvetica', 12)).grid(row=20, column=0, columnspan=2, sticky='W')
self.tk_pecfilled_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pecfilled_fn.grid(row=20, column=3, columnspan=2, sticky='W')
self.tk_pecfilled_fn.insert(0, self.pecfilled_fn)
self.tk_pecfilled_fn_msg = tk.Label(self.frame, text='File name of the Filled Energy Cone ', font=('Helvetica', 12))
self.tk_pecfilled_fn_msg.grid(row=20, column=9, columnspan=4, sticky='W')
# Energy Cone Line
tk.Label(self.frame, text='Energy Cone Line', font=('Helvetica', 12)).grid(row=21, column=0, columnspan=2, sticky='W')
self.tk_pecline_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pecline_fn.grid(row=21, column=3, columnspan=2, sticky='W')
self.tk_pecline_fn.insert(0, self.pecline_fn)
self.tk_pecline_fn_msg = tk.Label(self.frame, text='File name of the Energy Cone Line ', font=('Helvetica', 12))
self.tk_pecline_fn_msg.grid(row=21, column=9, columnspan=4, sticky='W')
# Blank line
tk.Label(self.frame, text='', font=('Helvetica', 12)).grid(row=22, column=0, columnspan=2, sticky='W')
# Mesh
tk.Label(self.frame, text='Mesh', font=('Helvetica', 14, 'bold')).grid(row=23, column=0, columnspan=2, sticky='W')
# Plot Mesh
self.tk_pplotmesh = tk.BooleanVar(value = self.pplotmesh)
tk.Checkbutton(self.frame, text='Plot Mesh', font=('Helvetica', 12), variable=self.tk_pplotmesh).grid(row=24, column=0, columnspan=2, sticky='W')
self.tk_pplotmesh_msg = tk.Label(self.frame, text='Check if you wish to create 3D mesh files ', font=('Helvetica', 12))
self.tk_pplotmesh_msg.grid(row=24, column=9, columnspan=4, sticky='W')
# Mesh Directory
tk.Label(self.frame, text='Mesh Directory', font=('Helvetica', 12)).grid(row=25, column=0, columnspan=2, sticky='W')
self.tk_pmesh_dir = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pmesh_dir.grid(row=25, column=3, columnspan=2, sticky='W')
self.tk_pmesh_dir.insert(0, self.pmesh_dir)
self.tk_pmesh_dir_msg = tk.Label(self.frame, text='Directory which contains the mesh files in your working directory ', font=('Helvetica', 12))
self.tk_pmesh_dir_msg.grid(row=25, column=9, columnspan=4, sticky='W')
# Mesh Resolution
tk.Label(self.frame, text='Mesh Resolution', font=('Helvetica', 12)).grid(row=26, column=0, columnspan=2, sticky='W')
self.tk_pmeshres = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pmeshres.grid(row=26, column=3, columnspan=2, sticky='W')
self.tk_pmeshres.insert(0, self.pmeshres)
self.tk_pmeshres_msg = tk.Label(self.frame, text='Mesh resolution (number of points in x & y direction) ', font=('Helvetica', 12))
self.tk_pmeshres_msg.grid(row=26, column=9, columnspan=4, sticky='W')
# Mesh Extent
tk.Label(self.frame, text='Mesh Extent', font=('Helvetica', 12)).grid(row=27, column=0, columnspan=2, sticky='W')
self.tk_pmeshsize = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_pmeshsize.grid(row=27, column=3, columnspan=2, sticky='W')
self.tk_pmeshsize.insert(0, self.pmeshsize)
self.tk_pmeshsize_msg = tk.Label(self.frame, text='What extent to plot the mesh (1.3 = 130% of the area of the energy cone line) ', font=('Helvetica', 12))
self.tk_pmeshsize_msg.grid(row=27, column=9, columnspan=4, sticky='W')
# Blank line
tk.Label(self.frame, text='', font=('Helvetica', 12)).grid(row=28, column=0, columnspan=2, sticky='W')
# Controls
tk.Label(self.frame, text='Controls', font=('Helvetica', 14, 'bold')).grid(row=29, column=0, columnspan=2, sticky='W')
# Control Radio Buttons
self.tk_pcalcinitpoints = tk.StringVar()
self.tk_pcalcinitpoints.set(self.pcalcinitpoints)
# Calculate Initiation Ponts
tk.Radiobutton(self.frame, text='Calculate Initiation Ponts', font=('Helvetica', 12), variable=self.tk_pcalcinitpoints, value='calc').grid(row=30, column=0, columnspan=2, sticky='W')
# Load Initiation Points
tk.Radiobutton(self.frame, text='Load Initiation Points', font=('Helvetica', 12), variable=self.tk_pcalcinitpoints, value='load').grid(row=31, column=0, columnspan=2, sticky='W')
# Use Row/Col
self.tk_puserowcol = tk.BooleanVar(value=self.puserowcol)
tk.Checkbutton(self.frame, text='Use Row/Col', font=('Helvetica', 12), variable=self.tk_puserowcol).grid(row=32, column=0, columnspan=2, sticky='W')
self.tk_puserowcol_msg = tk.Label(self.frame, text='Check to use rows and columns if loading initiation points; otherwise uses latitude and longitude ', font=('Helvetica', 12))
self.tk_puserowcol_msg.grid(row=32, column=9, columnspan=4, sticky='W')
# Initiation Points Only
self.tk_piponly = tk.BooleanVar()
tk.Checkbutton(self.frame, text='Initiation Points Only', font=('Helvetica', 12), variable=self.tk_piponly).grid(row=33, column=0, columnspan=2, sticky='W')
# Blank line
tk.Label(self.frame, text='', font=('Helvetica', 12)).grid(row=34, column=0, columnspan=2, sticky='W')
# Save Parameters
tk.Label(self.frame, text='Save Parameters', font=('Helvetica', 12)).grid(row=35, column=0, columnspan=2, sticky='W')
self.tk_psave_fn = tk.Entry(self.frame, font=('Helvetica', 12))
self.tk_psave_fn.grid(row=35, column=3, columnspan=2, sticky='W')
self.tk_psave_fn.insert(0, "parameters.pickle")
self.tk_psave_fn_msg = tk.Label(self.frame, text='File name of where you load the parameters from, if you wish to ', font=('Helvetica', 12))
self.tk_psave_fn_msg.grid(row=35, column=9, columnspan=4, sticky='W')
# Save
self.tk_save_params = tk.Button(self.frame, text='Save', font=('Helvetica', 12))
self.tk_save_params.grid(row=35, column=6, columnspan=2, sticky='W')
self.tk_save_params['command'] = self.save_params
# Submit
self.tk_submit = tk.Button(self.frame, text='Submit', font=('Helvetica', 12))
self.tk_submit.grid(row=36, column=0, columnspan=2, sticky='W')
self.tk_submit['command'] = self.submit
# Status
self.tk_statusmsg = tk.Label(self.frame, font=('Helvetica', 12))
self.tk_statusmsg.grid(row=37, column=0, columnspan=12, sticky='W')
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def submit(self):
error = False
change = False
warning = False
# Validate tk_pwdir
pwdir = self.tk_pwdir.get()
if pwdir != self.pwdir_prev:
change = True
self.pwdir_prev = pwdir
if os.path.isdir("../" + pwdir) and pwdir != "":
self.tk_pwdir_msg['text'] = ""
self.pwdir = pwdir
else:
self.tk_pwdir_msg['text'] = "Error: Directory does not exist"
self.tk_pwdir_msg['fg'] = "red"
error = True
# Validate tk_pdem_fn
pdem_fn = self.tk_pdem_fn.get()
if pdem_fn != self.pdem_fn_prev:
change = True
self.pdem_fn_prev = pdem_fn
pdem_fn = "../" + pwdir + "/" + pdem_fn
if os.path.isfile(pdem_fn):
self.tk_pdem_fn_msg['text'] = ""
self.pdem_fn = pdem_fn
else:
self.tk_pdem_fn_msg['text'] = "Error: File does not exist"
self.tk_pdem_fn_msg['fg'] = "red"
error = True
# Validate tk_pthal_fn
pthal_fn = self.tk_pthal_fn.get()
if pthal_fn != self.pthal_fn_prev:
change = True
self.pthal_fn_prev = pthal_fn
pthal_fn = "../" + pwdir + "/" + pthal_fn
if os.path.isfile(pthal_fn):
self.tk_pthal_fn_msg['text'] = ""
self.pthal_fn = pthal_fn
else:
self.tk_pthal_fn_msg['text'] = "Error: File does not exist"
self.tk_pthal_fn_msg['fg'] = "red"
error = True
# Validate tk_pflow_fn
pflow_fn = self.tk_pflow_fn.get()
if pflow_fn != self.pflow_fn_prev:
change = True
self.pflow_fn_prev = pflow_fn
pflow_fn = "../" + pwdir + "/" + pflow_fn
if os.path.isfile(pflow_fn):
self.tk_pflow_fn_msg['text'] = ""
self.pflow_fn = pflow_fn
else:
self.tk_pflow_fn_msg['text'] = "Error: File does not exist"
self.tk_pflow_fn_msg['fg'] = "red"
error = True
# Validate tk_pvolume
pvolume = self.tk_pvolume.get()
if pvolume != self.pvolume_prev:
change = True
self.pvolume_prev = pvolume
#convert to numeric
pvolume = self.tk_pvolume.get().split(",")
pvolume2 = []
pvolume_error = False
for i in pvolume:
try:
pvolume2 += [float(i),]
except:
self.tk_pvolume_msg['text'] = "Error: Values must be numeric"
self.tk_pvolume_msg['fg'] = "red"
error = True
pvolume_error = True
break
if not pvolume_error:
self.tk_pvolume_msg['text'] = ""
pvolume = pvolume2
for i in pvolume:
if i<1e4 or i>1e12:
self.tk_pvolume_msg['text'] = "Warning: Values outside of normal range of 1e4 - 1e12"
self.tk_pvolume_msg['fg'] = "#b36b00"
warning = True
break
self.pvolume = pvolume
# Validate tk_phlratio
phlratio = self.tk_phlratio.get()
if phlratio != self.phlratio_prev:
change = True
self.phlratio_prev = phlratio
phlratio_error = False
try:
phlratio = float(phlratio)
except:
self.tk_phlratio_msg['text'] = "Error: Value must be numeric"
self.tk_phlratio_msg['fg'] = "red"
error = True
phlratio_error = True
if not phlratio_error:
self.tk_phlratio_msg['text'] = ""
if phlratio < 0.2 or phlratio > 0.3:
self.tk_phlratio_msg['text'] = "Warning: Values outside of normal range of 0.2 - 0.3"
self.tk_phlratio_msg['fg'] = "#b36b00"
warning = True
self.phlratio = phlratio
# Validate tk_ppeak_entry
ppeak_entry = self.tk_ppeak_entry.get().split(",")
if ppeak_entry != self.ppeak_entry_prev:
change = True
self.ppeak_entry_prev = ppeak_entry
#convert to numeric
ppeak_entry = self.tk_ppeak_entry.get().split(",")
ppeak_entry2 = []
ppeak_entry_error = False
for i in ppeak_entry:
try:
ppeak_entry2 += [float(i),]
except:
self.tk_ppeak_entry_msg['text'] = "Error: Values must be numeric"
self.tk_ppeak_entry_msg['fg'] = "red"
error = True
ppeak_entry_error = True
break
if not ppeak_entry_error:
self.tk_ppeak_entry_msg['text'] = ""
ppeak_entry = ppeak_entry2
self.ppeak_entry = ppeak_entry
if len(ppeak_entry) != 2:
self.tk_ppeak_entry_msg['text'] = "Error: Only 2 values (Latitude and Longitude) accepted"
self.tk_ppeak_entry_msg['fg'] = "red"
error = True
else:
if ppeak_entry[0] <-90 or ppeak_entry[0]>90:
self.tk_ppeak_entry_msg['text'] = "Error: Latitude outside of normal range of -90 to 90 degrees"
self.tk_ppeak_entry_msg['fg'] = "red"
error = True
if ppeak_entry[1] <-180 or ppeak_entry[0]>180:
self.tk_ppeak_entry_msg['text'] = "Error: Longitude outside of normal range of -180 to 180 degrees"
self.tk_ppeak_entry_msg['fg'] = "red"
error = True
# Validate tk_phprange
phprange = self.tk_phprange.get()
if phprange != self.phprange_prev:
self.phprange_prev = phprange
change = True
try:
phprange = float(phprange)
self.tk_phprange_msg['text'] = ""
except:
self.tk_phprange_msg['text'] = "Error: Value must be numeric"
self.tk_phprange_msg['fg'] = "red"
error = True
self.phprange = phprange
# Validate tk_psealevel
psealevel = self.tk_psealevel.get()
if psealevel != self.psealevel_prev:
self.psealevel_prev = psealevel
change = True
try:
psealevel = float(psealevel)
self.tk_psealevel_msg['text'] = ""
except:
self.tk_psealevel_msg['text'] = "Error: Value must be numeric"
self.tk_psealevel_msg['fg'] = "red"
error = True
self.psealevel = psealevel
# Validate tk_plahar_dir
plahar_dir = self.tk_plahar_dir.get()
if plahar_dir != self.plahar_dir_prev:
self.plahar_dir_prev = plahar_dir
change = True
if not plahar_dir.isalnum():
self.tk_plahar_dir_msg['text'] = "Error: Value must be alphanumeric"
self.tk_plahar_dir_msg['fg'] = "red"
error = True
else:
self.tk_plahar_dir_msg['text'] = ""
self.plahar_dir = plahar_dir
# Validate tk_pinitpoints_fn
pinitpoints_fn = self.tk_pinitpoints_fn.get()
if pinitpoints_fn != self.pinitpoints_fn_prev:
change = True
self.pinitpoints_fn_prev = pinitpoints_fn
pinitpoints_fn = self.tk_pinitpoints_fn.get().split(".")
if len(pinitpoints_fn)!=2:
self.tk_pinitpoints_fn_msg['text'] = "Error: Invalid file format"
self.tk_pinitpoints_fn_msg['fg'] = "red"
error = True
elif not pinitpoints_fn[0].isalnum():
self.tk_pinitpoints_fn_msg['text'] = "Error: Invalid file format"
self.tk_pinitpoints_fn_msg['fg'] = "red"
error = True
elif pinitpoints_fn[1] !="csv":
self.tk_pinitpoints_fn_msg['text'] = "Error: csv extension only"
self.tk_pinitpoints_fn_msg['fg'] = "red"
error = True
else:
self.tk_pinitpoints_fn_msg['text'] = ""
self.pinitpoints_fn = self.tk_pinitpoints_fn.get()
# Validate tk_plog_fn
plog_fn = self.tk_plog_fn.get()
if plog_fn != self.plog_fn_prev:
change = True
self.plog_fn_prev = plog_fn
plog_fn = self.tk_plog_fn.get().split(".")
if len(plog_fn)!=2:
self.tk_plog_fn_msg['text'] = "Error: Invalid file format"
self.tk_plog_fn_msg['fg'] = "red"
error = True
elif not plog_fn[0].isalnum():
self.tk_plog_fn_msg['text'] = "Error: Invalid file format"
self.tk_plog_fn_msg['fg'] = "red"
error = True
elif plog_fn[1] !="txt":
self.tk_plog_fn_msg['text'] = "Error: txt extension only"
self.tk_plog_fn_msg['fg'] = "red"
error = True
else:
self.tk_plog_fn_msg['text'] = ""
self.plog_fn = self.tk_plog_fn.get()
# Validate tk_pecraw_fn
pecraw_fn = self.tk_pecraw_fn.get()
if pecraw_fn != self.pecraw_fn_prev:
change = True
self.pecraw_fn_prev = pecraw_fn
pecraw_fn = self.tk_pecraw_fn.get().split(".")
if len(pecraw_fn)!=2:
self.tk_pecraw_fn_msg['text'] = "Error: Invalid file format"
self.tk_pecraw_fn_msg['fg'] = "red"
error = True
elif not pecraw_fn[0].isalnum():
self.tk_pecraw_fn_msg['text'] = "Error: Invalid file format"
self.tk_pecraw_fn_msg['fg'] = "red"
error = True
elif pecraw_fn[1] !="tif":
self.tk_pecraw_fn_msg['text'] = "Error: tif extension only"
self.tk_pecraw_fn_msg['fg'] = "red"
error = True
else:
self.tk_pecraw_fn_msg['text'] = ""
self.pecraw_fn = self.tk_pecraw_fn.get()
# Validate tk_pecfilled_fn
pecfilled_fn = self.tk_pecfilled_fn.get()
if pecfilled_fn != self.pecfilled_fn_prev:
change = True
self.pecfilled_fn_prev = pecfilled_fn
pecfilled_fn = self.tk_pecfilled_fn.get().split(".")
if len(pecfilled_fn)!=2:
self.tk_pecfilled_fn_msg['text'] = "Error: Invalid file format"
self.tk_pecfilled_fn_msg['fg'] = "red"
error = True
elif not pecfilled_fn[0].isalnum():
self.tk_pecfilled_fn_msg['text'] = "Error: Invalid file format"
self.tk_pecfilled_fn_msg['fg'] = "red"
error = True
elif pecfilled_fn[1] !="tif":
self.tk_pecfilled_fn_msg['text'] = "Error: tif extension only"
self.tk_pecfilled_fn_msg['fg'] = "red"
error = True
else:
self.tk_pecfilled_fn_msg['text'] = ""
self.pecfilled_fn = self.tk_pecfilled_fn.get()
# Validate tk_pecline_fn
pecline_fn = self.tk_pecline_fn.get()
if pecline_fn != self.pecline_fn_prev:
change = True
self.pecline_fn_prev = pecline_fn
pecline_fn = self.tk_pecline_fn.get().split(".")
if len(pecline_fn)!=2:
self.tk_pecline_fn_msg['text'] = "Error: Invalid file format"
self.tk_pecline_fn_msg['fg'] = "red"
error = True
elif not pecline_fn[0].isalnum():
self.tk_pecline_fn_msg['text'] = "Error: Invalid file format"
self.tk_pecline_fn_msg['fg'] = "red"
error = True
elif pecline_fn[1] !="tif":
self.tk_pecline_fn_msg['text'] = "Error: tif extension only"
self.tk_pecline_fn_msg['fg'] = "red"
error = True
else:
self.tk_pecline_fn_msg['text'] = ""
self.pecline_fn = self.tk_pecline_fn.get()
# Validate tk_pplotmesh
pplotmesh = self.tk_pplotmesh.get()
if pplotmesh != self.pplotmesh_prev:
change = True
self.pplotmesh_prev = pplotmesh
self.tk_pplotmesh_msg['text'] = ""
self.pplotmesh = pplotmesh
# Validate tk_pmesh_dir
pmesh_dir = self.tk_pmesh_dir.get()
if pmesh_dir != self.pmesh_dir_prev:
change = True
self.pmesh_dir_prev = pmesh_dir
if not pmesh_dir.isalnum():
self.tk_pmesh_dir_msg['text'] = "Error: Value must be alphanumeric"
self.tk_pmesh_dir_msg['fg'] = "red"
error = True
else:
self.tk_pmesh_dir_msg['text'] = ""
self.pmesh_dir = pmesh_dir
# Validate tk_pmeshres
pmeshres = self.tk_pmeshres.get()
if pmeshres != self.pmeshres_prev:
change = True
self.pmeshres_prev = pmeshres
try:
pmeshres = int(pmeshres)
self.tk_pmeshres_msg['text'] = ""
self.pmeshres = pmeshres
except:
self.tk_pmeshres_msg['text'] = "Error: Value must an integer. Usually 50 - 400"
self.tk_pmeshres_msg['fg'] = "red"
error = True
# Validate tk_pmeshsize
pmeshsize = self.tk_pmeshsize.get()
if pmeshsize != self.pmeshsize_prev:
change = True
self.pmeshsize_prev = pmeshsize
try:
pmeshsize = float(pmeshsize)
self.tk_pmeshsize_msg['text'] = ""
self.pmeshsize = pmeshsize
except:
self.tk_pmeshsize_msg['text'] = "Error: Value must be numeric. Usually 1.3"
self.tk_pmeshsize_msg['fg'] = "red"
error = True
# Validate tk_pcalcinitpoints
pcalcinitpoints = self.tk_pcalcinitpoints.get()
if pcalcinitpoints != self.pcalcinitpoints_prev:
change = True
self.pcalcinitpoints_prev = pcalcinitpoints
self.pcalcinitpoints = pcalcinitpoints
# Validate tk_puserowcol
puserowcol = self.tk_puserowcol.get()
if puserowcol != self.puserowcol_prev:
change = True
self.puserowcol_prev = puserowcol
self.tk_puserowcol_msg['text'] = ""
self.puserowcol = puserowcol
# Validate tk_piponly
piponly = self.tk_piponly.get()
if piponly != self.piponly_prev:
change = True
self.piponly_prev = piponly
self.piponly = piponly
# Finalise
if error:
self.tk_statusmsg["text"] = "Errors exist"
self.tk_statusmsg['fg'] = "red"
elif warning and change:
self.tk_statusmsg["text"] = "Warnings exist. Press submit to continue"
self.tk_statusmsg['fg'] = "#b36b00"
else:
self.tk_statusmsg["text"] = "Lets go..."
self.tk_statusmsg['fg'] = "black"
self.update()
self.uploaddict()
laharz()
def uploaddict(self):
self.pparameters['pwdir'] = self.tk_pwdir.get()
self.pparameters['pdem_fn'] = self.tk_pdem_fn.get()
self.pparameters['pthal_fn'] = self.tk_pthal_fn.get()
self.pparameters['pflow_fn'] = self.tk_pflow_fn.get()
self.pparameters['pvolume'] = self.tk_pvolume.get()
self.pparameters['phlratio'] = self.tk_phlratio.get()
self.pparameters['ppeak_entry'] = self.tk_ppeak_entry.get()
self.pparameters['phprange'] = self.tk_phprange.get()
self.pparameters['psealevel'] = self.tk_psealevel.get()
self.pparameters['plahar_dir'] = self.tk_plahar_dir.get()
self.pparameters['pinitpoints_fn'] = self.tk_pinitpoints_fn.get()
self.pparameters['plog_fn'] = self.tk_plog_fn.get()
self.pparameters['pecraw_fn'] = self.tk_pecraw_fn.get()
self.pparameters['pecfilled_fn'] = self.tk_pecfilled_fn.get()
self.pparameters['pecline_fn'] = self.tk_pecline_fn.get()
if self.tk_pplotmesh.get(): # bit bizarre but you can't pickle tkinter data
self.pparameters['pplotmesh'] = True
else:
self.pparameters['pplotmesh'] = False
self.pparameters['pmesh_dir'] = self.tk_pmesh_dir.get()
self.pparameters['pmeshres'] = self.tk_pmeshres.get()
self.pparameters['pmeshsize'] = self.tk_pmeshsize.get()
self.pparameters['pcalcinitpoints'] = self.tk_pcalcinitpoints.get()
if self.tk_puserowcol.get(): # bit bizarre but you can't pickle tkinter data
self.pparameters['puserowcol'] = True
else:
self.pparameters['puserowcol'] = False
if self.tk_piponly.get(): # bit bizarre but you can't pickle tkinter data
self.pparameters['piponly'] = True
else:
self.pparameters['piponly'] = False
def downloaddict(self):
#update master variables
self.pwdir = self.pparameters['pwdir']
self.pdem_fn = self.pparameters['pdem_fn']
self.pthal_fn = self.pparameters['pthal_fn']
self.pflow_fn = self.pparameters['pflow_fn']
self.pvolume = self.pparameters['pvolume']
self.phlratio = self.pparameters['phlratio']
self.ppeak_entry = self.pparameters['ppeak_entry']
self.phprange = self.pparameters['phprange']
self.psealevel = self.pparameters['psealevel']
self.plahar_dir = self.pparameters['plahar_dir']
self.pinitpoints_fn = self.pparameters['pinitpoints_fn']
self.plog_fn = self.pparameters['plog_fn']
self.pecraw_fn = self.pparameters['pecraw_fn']
self.pecfilled_fn = self.pparameters['pecfilled_fn']
self.pecline_fn = self.pparameters['pecline_fn']
self.pplotmesh = self.pparameters['pplotmesh'] #boolean on boolean
self.pmesh_dir = self.pparameters['pmesh_dir']
self.pmeshres = self.pparameters['pmeshres']
self.pmeshsize = self.pparameters['pmeshsize']
self.pcalcinitpoints = self.pparameters['pcalcinitpoints']
self.puserowcol = self.pparameters['puserowcol']
self.piponly = self.pparameters['piponly']
#update text variables
self.tk_pwdir.delete(0, tk.END)
self.tk_pdem_fn.delete(0, tk.END)
self.tk_pthal_fn.delete(0, tk.END)
self.tk_pflow_fn.delete(0, tk.END)
self.tk_pvolume.delete(0, tk.END)
self.tk_phlratio.delete(0, tk.END)
self.tk_ppeak_entry.delete(0, tk.END)
self.tk_phprange.delete(0, tk.END)
self.tk_psealevel.delete(0, tk.END)
self.tk_plahar_dir.delete(0, tk.END)
self.tk_pinitpoints_fn.delete(0, tk.END)
self.tk_plog_fn.delete(0, tk.END)
self.tk_pecraw_fn.delete(0, tk.END)
self.tk_pecfilled_fn.delete(0, tk.END)
self.tk_pecline_fn.delete(0, tk.END)
self.tk_pmesh_dir.delete(0, tk.END)
self.tk_pmeshres.delete(0, tk.END)
self.tk_pmeshsize.delete(0, tk.END)
self.tk_pwdir.insert(0, self.pwdir)
self.tk_pdem_fn.insert(0, self.pdem_fn)
self.tk_pthal_fn.insert(0, self.pthal_fn)
self.tk_pflow_fn.insert(0, self.pflow_fn)
self.tk_pvolume.insert(0, self.pvolume)
self.tk_phlratio.insert(0, self.phlratio)
self.tk_ppeak_entry.insert(0, self.ppeak_entry)
self.tk_phprange.insert(0, self.phprange)
self.tk_psealevel.insert(0, self.psealevel)
self.tk_plahar_dir.insert(0, self.plahar_dir)
self.tk_pinitpoints_fn.insert(0, self.pinitpoints_fn)
self.tk_plog_fn.insert(0, self.plog_fn)
self.tk_pecraw_fn.insert(0, self.pecraw_fn)
self.tk_pecfilled_fn.insert(0, self.pecfilled_fn)
self.tk_pecline_fn.insert(0, self.pecline_fn)
self.tk_pmesh_dir.insert(0, self.pmesh_dir)
self.tk_pmeshres.insert(0, self.pmeshres)
self.tk_pmeshsize.insert(0, self.pmeshsize)
#update tk variables
self.tk_pplotmesh.set(self.pparameters['pplotmesh'])
self.tk_puserowcol.set(self.pparameters['puserowcol'])
self.tk_piponly.set(self.pparameters['piponly'])
self.tk_pcalcinitpoints.set(self.pparameters['pcalcinitpoints'])
def load_params(self):
pwdir = self.tk_pwdir.get()
if os.path.isdir("../" + pwdir) and pwdir != "":
self.tk_pwdir_msg['text'] = ""
pload_fn = self.tk_pload_fn.get()
pload_fn = "../" + pwdir + "/" + pload_fn
if os.path.isfile(pload_fn):
self.tk_pload_fn_msg['text'] = ""
try:
self.pparameters = pickle.load(open(pload_fn, "rb"))
except:
self.tk_pload_fn_msg['text'] = "Error: File does not appear to be a pickle file"
self.tk_pload_fn_msg['fg'] = "red"
self.downloaddict()
else:
self.tk_pload_fn_msg['text'] = "Error: File does not exist"
self.tk_pload_fn_msg['fg'] = "red"
else:
self.tk_pload_fn_msg['text'] = "Error: Specify valid working directory"
self.tk_pload_fn_msg['fg'] = "red"
self.tk_pwdir_msg['text'] = "Error: Directory does not exist"
self.tk_pwdir_msg['fg'] = "red"
pload_fn = self.tk_pload_fn.get()
pload_fn = "../" + self.pwdir + "/" + pload_fn
if os.path.isfile(pload_fn):
try:
self.pparameters = pickle.load(open(pload_fn, "rb"))
self.downloaddict()
self.tk_pwdir_msg["text"] = ''
self.tk_pdem_fn_msg["text"] = ''
self.tk_pthal_fn_msg["text"] = ''
self.tk_pflow_fn_msg["text"] = ''
self.tk_pvolume_msg["text"] = ''
self.tk_phlratio_msg["text"] = ''
self.tk_ppeak_entry_msg["text"] = ''
self.tk_phprange_msg["text"] = ''
self.tk_psealevel_msg["text"] = ''
self.tk_plahar_dir_msg["text"] = ''
self.tk_pinitpoints_fn_msg["text"] = ''
self.tk_plog_fn_msg["text"] = ''
self.tk_pecraw_fn_msg["text"] = ''
self.tk_pecfilled_fn_msg["text"] = ''
self.tk_pecline_fn_msg["text"] = ''
self.tk_pplotmesh_msg["text"] = ''
self.tk_pmesh_dir_msg["text"] = ''
self.tk_pmeshres_msg["text"] = ''
self.tk_pmeshsize_msg["text"] = ''
self.tk_puserowcol_msg["text"] = ''
self.tk_pload_fn_msg["text"] = ''
self.tk_psave_fn_msg["text"] = ''
self.tk_statusmsg["text"] = "Parameters Loaded"
self.tk_statusmsg['fg'] = "black"
except:
self.tk_pload_fn_msg['text'] = "Error: File does not appear to be a pickle file"
self.tk_pload_fn_msg['fg'] = "red"
else:
self.tk_pload_fn_msg['text'] = "Error: File does not exist"
self.tk_pload_fn_msg['fg'] = "red"
def save_params(self):
def save_pickle(destroy = False):
self.uploaddict()
pickle.dump(self.pparameters, open(psave_fn, "wb"))
self.tk_pwdir_msg["text"] = ''
self.tk_pdem_fn_msg["text"] = ''
self.tk_pthal_fn_msg["text"] = ''
self.tk_pflow_fn_msg["text"] = ''
self.tk_pvolume_msg["text"] = ''
self.tk_phlratio_msg["text"] = ''
self.tk_ppeak_entry_msg["text"] = ''
self.tk_phprange_msg["text"] = ''
self.tk_psealevel_msg["text"] = ''
self.tk_plahar_dir_msg["text"] = ''
self.tk_pinitpoints_fn_msg["text"] = ''
self.tk_plog_fn_msg["text"] = ''
self.tk_pecraw_fn_msg["text"] = ''
self.tk_pecfilled_fn_msg["text"] = ''
self.tk_pecline_fn_msg["text"] = ''
self.tk_pplotmesh_msg["text"] = ''
self.tk_pmesh_dir_msg["text"] = ''
self.tk_pmeshres_msg["text"] = ''
self.tk_pmeshsize_msg["text"] = ''
self.tk_puserowcol_msg["text"] = ''
self.tk_pload_fn_msg["text"] = ''
self.tk_psave_fn_msg["text"] = ''
self.tk_statusmsg["text"] = "Parameters Saved"
self.tk_statusmsg['fg'] = "black"
if destroy:
window.destroy()
# Validate tk_psave_fn
psave_fn = self.tk_psave_fn.get().split(".")
if len(psave_fn)!=2:
self.tk_psave_fn_msg['text'] = "Error: Invalid file format"
self.tk_psave_fn_msg['fg'] = "red"
self.error = True
elif not psave_fn[0].isalnum():
self.tk_psave_fn_msg['text'] = "Error: Invalid file format"
self.tk_psave_fn_msg['fg'] = "red"
self.error = True
elif psave_fn[1] !="pickle":
self.tk_psave_fn_msg['text'] = "Error: pickle extension only"
self.tk_psave_fn_msg['fg'] = "red"
self.error = True
else:
pwdir = self.tk_pwdir.get()
if os.path.isdir("../" + pwdir) and pwdir != "":
self.pwdir = pwdir
psave_fn = "../" + pwdir + "/" + self.tk_psave_fn.get()
if os.path.isfile(psave_fn):
window = tk.Toplevel()
label1 = tk.Label(window, text="File already exists")
label1.pack(fill='x', padx=50, pady=5)
button_save = tk.Button(window, text="Save", command=lambda: save_pickle(destroy=True))
button_save.pack(fill='x')
button_cancel = tk.Button(window, text="Cancel", command=window.destroy)
button_cancel.pack(fill='x')
else:
save_pickle()
else:
self.tk_psave_fn_msg['text'] = "Error: Specify valid working directory"
self.tk_psave_fn_msg['fg'] = "red"
self.tk_pwdir_msg['text'] = "Error: Directory does not exist"
self.tk_pwdir_msg['fg'] = "red"
def laharz():
class Point(object):
# defines a 'Point' object which is any (row, column) pair on the cell matrix. The benefit of the class is to be
# able to move to the next point in the matrix without worrying about reaching the edges. Using this class you can
# add or subject from the point in any direction but if you reach the edge, the result will just be the point on
# the edge. A warning will produce an warning message if the edge is reached. If the warning flag (which defaults
# to "Y") is set to anything but "Y" o check on the boundary edge is not performed.
def __init__(self, p):
self.p = p
def __str__(self):
rep = "Point:" + str(self.p)
return rep
def plus(self, p, warn="Y"):
r1 = self.p[0]
r2 = p.p[0]
c1 = self.p[1]
c2 = p.p[1]
if warn == "Y":
r3 = min(r1 + r2, nrows - 1)
r3 = max(r3, 0) # in case of addition of negitive amount
c3 = min(c1 + c2, ncols - 1)
c3 = max(c3, 0) # in case of addition of negitive amount
if r1 + r2 > nrows - 1 or r1 + r2 < 0:
log_msg("Warning: potential overflow as {} is added to row {}".format(r2, r1))
if c1 + c2 > ncols - 1 or c1 + c2 < 0:
log_msg("Warning: potential overflow as {} is added to column {}".format(c2, c1))
else:
r3 = r1 + r2
c3 = c1 + c2
return Point([r3, c3])
def minus(self, p, warn="Y"):
r1 = self.p[0]
r2 = p.p[0]
c1 = self.p[1]
c2 = p.p[1]
if warn == "Y":
r3 = max(r1 - r2, 0)
r3 = min(r3, nrows - 1) # in case of subtraction of negitive amount
c3 = max(c1 - c2, 0)
c3 = min(c3, ncols - 1)
if r1 - r2 > nrows - 1 or r1 - r2 < 0:
log_msg("Warning: potential overflow as {} is subtracted from row {}".format(r2, r1))
if c1 - c2 > ncols - 1 or c1 - c2 < 0:
log_msg("Warning: potential overflow as {} is subtracted from column {}".format(c2, c1))
else:
r3 = r1 - r2
c3 = c1 - c2
return Point([r3, c3])
def vector(self):
"""returns a list of the components of the point to allow it to be used for indexing numpy arrays"""
return (self.p[0], self.p[1])
def __eq__(self, other):
if not isinstance(other, Point):
# don't attempt to compare against unrelated types
return NotImplemented
return self.p == other.p
# Functions
def log_msg(msg, screen_op = True, file_op = True, errmsg = False, initfile = False):
"""logs message"""
dt = datetime.datetime.now()
if screen_op:
app.tk_statusmsg["text"] = msg
if not errmsg:
app.tk_statusmsg["fg"] = "black"
else:
app.tk_statusmsg["fg"] = "red"
app.update()
msg = "{:02d}:{:02d}:{:02d}: ".format(dt.hour, dt.minute, dt.second) + msg
if file_op:
if initfile:
f_log = open(plog_fn, 'w')
else:
f_log = open(plog_fn, 'a')
f_log.write(msg +'\n')
f_log.close
#print(msg)
def LoadFile(fn):
""" Loads a .tif or text file (.asc or .txt)"""
try:
f = rio.open(fn)
except:
log_msg("File not found: " + fn)
sys.exit() #shouldnt happen
if fn[-3:] == "txt" or fn[-3] == "asc": # ascii file
fcrs = pyproj.Proj(ptextcrs)
elif fn[-3:] == "tif": # tif file
fcrs = pyproj.Proj(f.crs)
else:
log_msg("Unrecognised file type for file name:{}. Terminating".format(fn))
sys.exit() #shouldnt happen
v = f.read(1)
return f, fcrs, v
def SaveFile(fn, ref_f, v):
""" Saves a matrix as a .tif file, an Ascii file (.txt) or a .csv file"""
# Although possible to save an as Ascii file there seems no particular reason to do so. Savingg as a csv file is convenient
# for debugging. Recommend that the file is saved as a .tif file
fn = fn.split(".")[0]
# entry screen provides file names with extensions. These are ignored in program and the paramters pwritexxx are used instead to control output.
# As the entry screen does not allow the user to enter the pwritexxx parameters, the pwritexxx parameters as specified in the program are for
# tif files only. Adopting this method leaves an expert user to use the pwritexxx parameters in the program to control output and a general user
# to rely on .tif files
if pwritetif:
resolve_inout(overwrite=True)
profile = ref_f.profile
# profile.update(dtype=rio.uint8, count=1, compress='lzw', nodata = 255)
profile.update(dtype=rio.uint8, count=1, nodata=255)
with rio.Env():
with rio.open("../" + pwdir + "/" + fn + ".tif", 'w', **profile) as dst:
dst.write(v.astype(rio.uint8), 1)
if pwriteascii:
f = open("../" + pwdir + "/" + fn + ".txt", "w")
f.write("ncols " + str(v.shape[1]) + "\n")
f.write("nrows " + str(v.shape[0]) + "\n")
row = col = 0
east, north = ref_f.xy(row, col, offset='ll') # spatial --> image coordinates
lon, lat = dem_crs(east, north, inverse=True)
f.write("xllcorner " + str(lon) + "\n")
f.write("yllcorner " + str(lat) + "\n")
f.write("cellsize " + str(ref_f.transform[0]) + "\n")
f.write("NODATA_value 255")
for i in reversed(v):
f.write(" ".join(map(str, map(int, i))) + "\n")
f.close()
if pwritecsv:
np.savetxt("../" + pwdir + "/" + fn + ".csv", v, delimiter=",")
def AddPointKML(kml, ll, name):
"""Adds a point to a .kml file for display in Google Earth"""
kml.newpoint(name=name, coords=[ll])
def AddPolyKML(kml, llnw, llse, name, colour):
"""Adds a ploygon to a .kml file for display in Google Earth"""
llne = [llse[0], llnw[1]]
llsw = [llnw[0], llse[1]]
pol = kml.newpolygon(name=name, outerboundaryis=[llnw, llne, llse, llsw])
pol.style.polystyle.color = colour
def PlotFile(fn, arr):
""" saves the matrix as a .png image file. Used for debugging"""
data = np.zeros((np.shape(arr)[0], np.shape(arr)[1], 3), dtype=np.uint8) # height (num rows), width (num columns)
for i in range(arr.shape[0]): # rows
for j in range(arr.shape[1]): # cols
if arr[i, j]:
data[i, j] = [255, 0, 0]
img = Image.fromarray(data, 'RGB')
img.save(fn + '.png')
def Plot_xsecarea(ppos, pneg, pathpoint, level, direction, seq, innund):
""""Plots an image of the cross sectional area of the Lahar at a particular point in a particular direction"""
# Mostly usefule for debuggig but supports a bit more in depth analysis of a peculiar point on a lahar
global draw1
global font
xsecarea = 0
# plot from 2 cells to the 'positive' and two cells to the 'negative'
if direction == "N-S":
inc = Point([1, 0])
dist = dem_cell_size
elif direction == "W-E":
inc = Point([0, 1])
dist = dem_cell_size
elif direction == "SW-NE":
inc = Point([1, 1])
dist = dem_cell_size * 2 ** 0.5
elif direction == "NW-SE":
inc = Point([-1, 1])
dist = dem_cell_size * 2 ** 0.5
pneg = pneg.minus(inc, "N")
pneg = pneg.minus(inc, "N")
ppos = ppos.plus(inc, "N")
ppos = ppos.plus(inc, "N")
# get number of cells
rl = abs(ppos.vector()[0] - pneg.vector()[0]) + 1 # number of point along a row
cl = abs(ppos.vector()[1] - pneg.vector()[1]) + 1 # number of points along a column
ncells = max(rl, cl)
# get bottom of channel
chan_base = dem_v[pathpoint.vector()]
pgborder = 50
maxh = dem_v[pathpoint.vector()]
minh = dem_v[pathpoint.vector()]
p = pneg
for i in range(ncells):
if dem_v[p.vector()] > maxh:
maxh = dem_v[p.vector()]
if dem_v[p.vector()] < minh:
minh = dem_v[p.vector()]
p = p.plus(inc, "N")
hrange = max(maxh - minh, level - minh)
# set up image
pheight = 1080
pwidth = 1920
pborder = 50
pskyborder = 100
img = Image.new('RGB', (pwidth, pheight), color='#000000')
draw1 = ImageDraw.Draw(img, "RGBA")
font = ImageFont.truetype("arial.ttf", 16)
hh = (pheight - pborder * 2 - pgborder - pskyborder) / hrange # height scaling factor
ww = (pwidth - pborder * 2) / ncells # width scaling factor
p = pneg
for i in range(ncells):
# draw ground
xsw = int(i * ww) + pborder
xne = int((i + 1) * ww) - 1 + pborder
ysw = pheight - pborder
yne = pheight - (int((dem_v[p.vector()] - minh) * hh) + pgborder + pborder)
draw1.rectangle([(xsw, ysw), (xne, yne)], fill='#548235')
draw1.line([(xsw, ysw), (xsw, yne), (xne, yne), (xne, ysw), xsw, ysw], fill='#000000')
PlotMsg("R:{} C:{}".format(p.vector()[0], p.vector()[1]), (xsw + xne) / 2, ysw + 1, "tc", "#FFFFFF")
PlotMsg("Elev:{}".format(dem_v[p.vector()]), (xsw + xne) / 2, yne + 1, "tc", "#FFFFFF")
# draw lahar
if innund[p.vector()] and i in range(2, ncells - 2):
xsecarea += (level - dem_v[p.vector()]) * dist
xsw = int(i * ww) + pborder
xne = int((i + 1) * ww) - 1 + pborder
ysw = pheight - (int((dem_v[p.vector()] - minh) * hh) + pgborder + pborder)
yne = pheight - (int((level - minh) * hh) + pgborder + pborder)
draw1.rectangle([(xsw, ysw), (xne, yne)], fill='#8C3838')
draw1.line([(xsw, ysw), (xsw, yne), (xne, yne), (xne, ysw), (xsw, ysw)], fill='#000000')
if p == pathpoint:
PlotMsg("Level:{}".format(level), (xsw + xne) / 2, yne + 1, "tc", "#FFFFFF")
# draw sky - assumes cordinates from either ground or lahar
ysw = yne
yne = pheight - (pheight - pborder)
draw1.rectangle([(xsw, ysw), (xne, yne)], fill='#7DF5FB')
draw1.line([(xsw, ysw), (xsw, yne), (xne, yne), (xne, ysw), (xsw, ysw)], fill='#000000')
p = p.plus(inc, "N")
PlotMsg("Cross Sectional Area Calculated: {:.2f} Cross Sectional Area Limit: {:.2f} Cell width: {:.2f}".format(xsecarea, xsec_area_limit, dist), pborder, pheight - (pheight - 10), "tl",
"#FFFFFF")
fn = "Point P{} R{}-C{}-{}".format(seq, pathpoint.vector()[0], pathpoint.vector()[1], direction)
img.save(pxsecareafolder + "/" + fn + '.png')
img.close
def PlotMsg(msg, x, y, anchor="tl", fill="#000000"):
"""Plots text on the Cross Section Chart"""
# anchor
# t - top, m - middle, b - bottom
# l = left, c = centre, r = right
# assumes x,y origin of image in bottom left corner and the inversion to top left corner is handled elsewhere
wt, ht = font.getsize(msg)
if anchor[0] == 't':
ht = 0
elif anchor == "m":
ht = ht / 2
if anchor[1] == 'l':
wt = 0
elif anchor[1] == "c":
wt = wt / 2
draw1.text((x - wt, y - ht), msg, fill=fill, font=font)
def ll2rc(ll):
"""converts lon and lat to row and column"""
east, north = dem_crs(ll[0], ll[1])
row, col = dem_f.index(east, north) # spatial --> image coordinates
return (row, col)
def rc2ll(rc):
"""converts row and column to lon and lat"""
# places at centre of cell - not se corner
east, north = dem_f.xy(rc[0], rc[1]) # spatial --> image coordinates
lon, lat = dem_crs(east, north, inverse=True)
return (lon, lat)
def lltransform(ll, dist, bearing):
"""returns new lon, lat of point [dist] away from [ll] at bearing [bearing]"""
end_lon, end_lat, backaz = geod.fwd(ll[0], ll[1], bearing, dist)
return (end_lon, end_lat)
def rcdist(rc1, rc2):
"""returns the distance between two points based on row and column"""
ll1 = rc2ll(rc1)
ll2 = rc2ll(rc2)
forward_az, backward_az, distance = geod.inv(ll1[0], ll1[1], ll2[0], ll2[1])
return distance
def PolygonDEM(kml, llnw, llse):
"""draw square on kml file for DEM scope"""
AddPolyKML(kml, llnw, llse, "DEM", '4f00ff00')
AddPointKML(kml, llnw, "DEM NW")
AddPointKML(kml, llse, "DEM SE")
def CreateSurfaceMesh():
"""Creates a mesh file of the DEM matrix surface"""
log_msg("Preparing surface mesh domain...")
a = sys.argv
gmsh.initialize(sys.argv)
gmsh.model.add("3DDEM")
log_msg("Adding surface points...")
p1 = [] # list of all points
c_step = mesh_cols / pmeshrescol
r_step = mesh_rows / pmeshresrow
for i in range(pmeshrescol): # cols
for j in range(pmeshresrow): # rows
# Technically the point in the mesh is in the bottom left corner but with the elevation of the centre of the pixel. Probably.
# As this is intended for visualisation of the landscape and energy cone, a uniform translation of half a pixel is probably not
# that significant. Plus presented in paraview where it is in metres not lat/long
x = (i * c_step + mesh_lc) * dem_cell_size
y = ((pmeshresrow - 1 - j) * r_step + mesh_lr) * dem_cell_size # reverse rows
p1 += [gmsh.model.geo.addPoint(x, y, dem_v[int(j * r_step) + mesh_lr, int(i * c_step) + mesh_lc,])] # todo assumes square pixels
log_msg("Connecting Surface Lines...")
hor_line = [] # list of all horizontal lines
ver_line = [] # list of all vertical lines
dia_line = [] # list of all diagonals
for i in range(pmeshrescol): # cols
for j in range(pmeshresrow): # rows
base_index = i * pmeshresrow + j
hplus_index = (i + 1) * pmeshresrow + j
vplus_index = (i) * pmeshresrow + j + 1
dplus_index = (i + 1) * pmeshresrow + j + 1
if i < pmeshrescol - 1:
hor_line += [gmsh.model.geo.addLine(p1[base_index], p1[hplus_index]), ]
if j < pmeshresrow - 1:
ver_line += [gmsh.model.geo.addLine(p1[base_index], p1[vplus_index]), ]
if i < pmeshrescol - 1 and j < pmeshresrow - 1:
if p_diagonal == "SW-NE":
dia_line += [gmsh.model.geo.addLine(p1[base_index], p1[dplus_index]), ]
else:
dia_line += [gmsh.model.geo.addLine(p1[vplus_index], p1[hplus_index]), ]
log_msg("Adding surface lines to loops...")
curve_loop = [] # list of all curve loops
for i in range(0, pmeshrescol - 1):
for j in range(0, pmeshresrow - 1):
h_index = i * pmeshresrow + j
hplus_index = h_index + 1
v_index = i * (pmeshresrow - 1) + j
vplus_index = (i + 1) * (pmeshresrow - 1) + j
d_index = i * (pmeshresrow - 1) + j
if p_diagonal == "SW-NE":
curve_loop += [gmsh.model.geo.addCurveLoop([hor_line[h_index], ver_line[vplus_index], -dia_line[d_index]]), ]
curve_loop += [gmsh.model.geo.addCurveLoop([ver_line[v_index], hor_line[hplus_index], -dia_line[d_index]]), ]
else:
curve_loop += [gmsh.model.geo.addCurveLoop([hor_line[h_index], -dia_line[d_index], -ver_line[v_index]]), ]
curve_loop += [gmsh.model.geo.addCurveLoop([hor_line[hplus_index], -ver_line[vplus_index], -dia_line[d_index]]), ]
log_msg("Creating surface plane surfaces...")
plane_surface = [] # list of all plane surfaces
for i, j in enumerate(curve_loop):
plane_surface += [gmsh.model.geo.addPlaneSurface([j]), ]
# log_msg("Completing Surface Loop...")
# gmsh.model.geo.removeAllDuplicates()
# surface_loop = gmsh.model.geo.addSurfaceLoop(plane_surface)
# log_msg("Creating Volume...")
# vol = gmsh.model.geo.addVolume([surface_loop])
log_msg("Generating surface mesh...")
gmsh.model.geo.synchronize()
gmsh.model.mesh.generate(3)
log_msg("Saving surface mesh files...")
# msh file for gmash
gmsh.write("../" + pwdir + "/" + pmesh_dir + "/Surface.vtk")
log_msg("../" + pwdir + "/" + pmesh_dir + "/Surface.vtk")
gmsh.finalize()
def CreateEnergyConeMesh():
"""Creates a mesh file of the energy cone"""
log_msg("Preparing energy cone mesh domain...")
a = sys.argv
gmsh.initialize(sys.argv)
gmsh.model.add("3DCone")
log_msg("Adding cone points...")
p1 = [] # list of all points
c_step = mesh_cols / pmeshrescol
r_step = mesh_rows / pmeshresrow
px = peakrc[1] * dem_cell_size
py = peakrc[0] * dem_cell_size
for i in range(pmeshrescol): # cols
for j in range(pmeshresrow): # rows
# Technically the point in the mesh is in the bottom left corner but with the elevation of the centre of the pixel. Probably.
# As this is intended for visualisation of the landscape and energy cone, a uniform translation of half a pixel is probably not
# that significant. Plus presented in paraview where it is in metres not lat/long
x = ((i * c_step) + mesh_lc) * dem_cell_size
y = ((j * r_step) + mesh_lr) * dem_cell_size # used for calculation
r = ((px - x) ** 2 + (py - y) ** 2) ** 0.5
y = (((pmeshresrow - 1 - j) * r_step) + mesh_lr) * dem_cell_size # reverse rows
h = -r * phlratio + peak_height
p1 += [gmsh.model.geo.addPoint(x, y, h, )] # todo assumes square pixels with dem_cell_size; mesh resolution can be different in x and y
log_msg("Connecting cone lines...")
hor_line = [] # list of all horizontal lines
ver_line = [] # list of all vertical lines
dia_line = [] # list of all diagonals
for i in range(pmeshrescol): # cols
for j in range(pmeshresrow): # rows
base_index = i * pmeshresrow + j
hplus_index = (i + 1) * pmeshresrow + j
vplus_index = (i) * pmeshresrow + j + 1
dplus_index = (i + 1) * pmeshresrow + j + 1
if i < pmeshrescol - 1:
hor_line += [gmsh.model.geo.addLine(p1[base_index], p1[hplus_index]), ]
if j < pmeshresrow - 1:
ver_line += [gmsh.model.geo.addLine(p1[base_index], p1[vplus_index]), ]
if i < pmeshrescol - 1 and j < pmeshresrow - 1:
if p_diagonal == "SW-NE":
dia_line += [gmsh.model.geo.addLine(p1[base_index], p1[dplus_index]), ]
else:
dia_line += [gmsh.model.geo.addLine(p1[vplus_index], p1[hplus_index]), ]
log_msg("Adding cone lines to loops...")
curve_loop = [] # list of all curve loops
for i in range(0, pmeshrescol - 1):
for j in range(0, pmeshresrow - 1):
h_index = i * pmeshresrow + j
hplus_index = h_index + 1
v_index = i * (pmeshresrow - 1) + j
vplus_index = (i + 1) * (pmeshresrow - 1) + j
d_index = i * (pmeshresrow - 1) + j
if p_diagonal == "SW-NE":
curve_loop += [gmsh.model.geo.addCurveLoop([hor_line[h_index], ver_line[vplus_index], -dia_line[d_index]]), ]
curve_loop += [gmsh.model.geo.addCurveLoop([ver_line[v_index], hor_line[hplus_index], -dia_line[d_index]]), ]
else:
curve_loop += [gmsh.model.geo.addCurveLoop([hor_line[h_index], -dia_line[d_index], -ver_line[v_index]]), ]
curve_loop += [gmsh.model.geo.addCurveLoop([hor_line[hplus_index], -ver_line[vplus_index], -dia_line[d_index]]), ]
log_msg("Creating cone plane surfaces...")
plane_surface = [] # list of all plane surfaces
for i, j in enumerate(curve_loop):
plane_surface += [gmsh.model.geo.addPlaneSurface([j]), ]
# log_msg("Completing Surface Loop...")
# gmsh.model.geo.removeAllDuplicates()
# surface_loop = gmsh.model.geo.addSurfaceLoop(plane_surface)
# log_msg("Creating Volume...")
# vol = gmsh.model.geo.addVolume([surface_loop])
log_msg("Generating cone mesh...")
gmsh.model.geo.synchronize()
gmsh.model.mesh.generate(3)
log_msg("Saving cone mesh files...")
# msh file for gmash
gmsh.write("../" + pwdir + "/" + pmesh_dir + "/Cone.vtk")
gmsh.finalize()
def GenLahar(v, ip):
"""Generates the lahar for a particular volume and initiation point"""
global xsec_area_limit
xsec_area_limit = 0.05 * v ** (2 / 3) # Cross sectional area in m2
plan_area_limit = 200 * v ** (2 / 3) # Planimetric area in m2
dy = 1 # Increment of elevation in m
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
xseccsv = csv.writer(open(pxsec_fn, "w"), delimiter=',', quoting=csv.QUOTE_ALL)
xseccsv.writerow(["Point", "Latitude", "Longitude", "Row", "Col"])
innund = np.zeros_like(dem_v) # Defines a zeros rasters where the inundation cells will be writen as 1's values.
plan_area = 0
# cycle down the stream until the planometric area limit is exceeded
# Stops when it reaches the sea
## Channel paths terminate 1 cell from the
## map edge so no danger of overflow.
current_point = Point([ip[1], ip[2]])
point_number = 1
flow_direction = flowdir_v[current_point.vector()] # 1 = NE, 2 = N, 3 = NW...continuing ACW until 8 = E. Some minus error values can exist on the edges
while plan_area <= plan_area_limit and dem_v[current_point.vector()] > psealevel and flow_direction > 0:
if flow_direction % 4 == 2: # North or South ie 2 or 6
ignore = "N-S"
elif flow_direction % 4 == 0: # East or West ie 4 or 8
ignore = "W-E"
elif flow_direction % 4 == 3: # North West or South East ie 3 or 7
ignore = "NW-SE"
elif flow_direction % 4 == 1: # North East or South West ie 1 or 5:
ignore = "SW-NE"
else:
log_msg("Error: flow direction at point {} has value {} #1. Expecting values between 1-8. "
"Possibly this is because the point is at the very edge of the DEM. Terminating".format(current_point.vector(), flow_direction))
sys.exit()
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
ipll = rc2ll((current_point.vector()[0], current_point.vector()[1]))
xseccsv.writerow(["P{:02d}".format(point_number), ipll[1], ipll[0], current_point.vector()[0], current_point.vector()[1]])
seq = str(point_number)
plan_area, innund = EvalPoint(current_point, plan_area, plan_area_limit, v, ip, xsec_area_limit, innund, ignore, seq)
# also evaluate adjacent point if flowing in a diagonal
# if flowing NW, evaluate 1 point left (West)
if flow_direction == 3 and dem_v[current_point.plus(Point([0, -1])).vector()] > psealevel:
seq += "+W"
plan_area, innund = EvalPoint(current_point.plus(Point([0, -1])), plan_area, plan_area_limit, v, ip, xsec_area_limit, innund, ignore, seq)
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
ipll = rc2ll((current_point.plus(Point([0, -1])).vector()))
xseccsv.writerow(["P{:02d}W".format(point_number), ipll[1], ipll[0], current_point.plus(Point([0, -1])).vector()[0], current_point.plus(Point([0, -1])).vector()[1]])
# if flowing SW, evaluate 1 point below (South) - note that the DEM is inverted hence the vector for south is 1,0 rather than -1,0
elif flow_direction == 5 and dem_v[current_point.plus(Point([1, 0])).vector()] > psealevel:
seq += "+S"
plan_area, innund = EvalPoint(current_point.plus(Point([1, 0])), plan_area, plan_area_limit, v, ip, xsec_area_limit, innund, ignore, seq)
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
ipll = rc2ll((current_point.plus(Point([1, 0])).vector()))
xseccsv.writerow(["P{:02d}S".format(point_number), ipll[1], ipll[0], current_point.plus(Point([1, 0])).vector()[0], current_point.plus(Point([1, 0])).vector()[1]])
# if flowing SE, evaluate 1 point left (East)
elif flow_direction == 7 and dem_v[current_point.plus(Point([0, 1])).vector()] > psealevel:
seq += "+E"
plan_area, innund = EvalPoint(current_point.plus(Point([0, 1])), plan_area, plan_area_limit, v, ip, xsec_area_limit, innund, ignore, seq)
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
ipll = rc2ll((current_point.plus(Point([0, 1])).vector()))
xseccsv.writerow(["P{:02d}E".format(point_number), ipll[1], ipll[0], current_point.plus(Point([0, 1])).vector()[0], current_point.plus(Point([0, 1])).vector()[1]])
# if flowing NE, evaluate 1 point up (North)
elif flow_direction == 1 and dem_v[current_point.plus(Point([-1, 0])).vector()] > psealevel:
plan_area, innund = EvalPoint(current_point.plus(Point([-1, 0])), plan_area, plan_area_limit, v, ip, xsec_area_limit, innund, ignore, seq)
seq += "+N"
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
ipll = rc2ll((current_point.plus(Point([-1, 0])).vector()))
xseccsv.writerow(["P{:02d}N".format(point_number), ipll[1], ipll[0], current_point.plus(Point([-1, 0])).vector()[0], current_point.plus(Point([-1, 0])).vector()[1]])
# next point
if flow_direction == 1:
current_point = current_point.plus(Point([-1, 1]))
elif flow_direction == 2:
current_point = current_point.plus(Point([-1, 0]))
elif flow_direction == 3:
current_point = current_point.plus(Point([-1, -1]))
elif flow_direction == 4:
current_point = current_point.plus(Point([0, -1]))
elif flow_direction == 5:
current_point = current_point.plus(Point([1, -1]))
elif flow_direction == 6:
current_point = current_point.plus(Point([1, 0]))
elif flow_direction == 7:
current_point = current_point.plus(Point([1, 1]))
elif flow_direction == 8:
current_point = current_point.plus(Point([0, 1]))
else:
log_msg("Error: flow direction at point {} has value {} #2. Expecting values between 1-8. "
"Possibly this is because the point is at the very edge of the DEM. Terminating".format(current_point.vector(), flow_direction))
sys.exit()
flow_direction = flowdir_v[current_point.vector()] # 1 = NE, 2 = N, 3 = NW...continuing ACW until 8 = E. Some minus error values can exist on the edges
point_number += 1
if flow_direction <= 0:
log_msg("Warning: flow direction at point {} has value {} #2. Expecting values between 1-8. "
"This is because the lahar has reached the very edge of the DEM.".format(current_point.vector(), flow_direction))
return innund
def EvalPoint(pathpoint, plan_area, plan_area_limit, v, ip, xsec_area_limit, innund, ignore, seq):
"""Evaluates a point on the lahar"""
# increments the lahar level from the elevation of the channel point being considered until the cross sectional area
# limit is met. Does this four times - ie in each of the following directions: N-S, E-W, NW-SE and SW-NE. Cross
# sectional area is the sum of the area in all four directions.
innund[pathpoint.vector()] = 1
directions = ["N-S", "W-E", "NW-SE", "SW-NE"]
for i in directions:
# cycle through each direction. Sets a vector to move in a positive direction and a negative direction away from the
# initial point. Positive and negative are arbitrary terms to describe the opposite directions. If direction is
# diagonal then each step has a distance of root 2.
# Cross sectional areas where the profile is in the direction of the stream are ignored
#
# If the calculation of the cross sectional area over tops a local maximum, the area of the downstream section is
# not considered. This doesn't seem to be a case considered in Schilling
#
# The class Point is used as the plus and minus operators prevent overflow if the cross sectional area is close to
# the boundary. In this case the positive or negitive point remains on the boundary as the level increases.
# To be technically correct, a larger DEM should be used to allow the cross sectional area and innundation to
# be completed. A warning is printed in the Plus or Minus operations in the class.
if i != ignore:
if i == "N-S":
pos_vect = Point([1, 0])
neg_vect = Point([-1, 0])
inc_dist = dem_cell_size
elif i == "W-E":
pos_vect = Point([0, 1])
neg_vect = Point([0, -1])
inc_dist = dem_cell_size
elif i == "NW-SE":
pos_vect = Point([-1, 1])
neg_vect = Point([1, -1])
inc_dist = 2 ** 0.5 * dem_cell_size
elif i == "SW-NE":
pos_vect = Point([1, 1])
neg_vect = Point([-1, -1])
inc_dist = 2 ** 0.5 * dem_cell_size
dy = 1
dist = inc_dist
xsec_area = inc_dist + dy
level = dem_v[pathpoint.vector()] + dy # initial level of flow
p_pos = pathpoint # set both points to the initial path point
p_neg = pathpoint # set both points to the initial path point
while xsec_area <= xsec_area_limit and plan_area <= plan_area_limit:
raise_level = True
p_pos_new = p_pos.plus(pos_vect)
p_pos_new_level = dem_v[p_pos_new.vector()]
if level > p_pos_new_level and p_pos_new_level > psealevel:
p_pos = p_pos_new
dist += inc_dist
xsec_area += inc_dist * (level - dem_v[p_pos.vector()])
innund[p_pos.vector()] = 1
plan_area += dem_cell_size ** 2
raise_level = False
if xsec_area <= xsec_area_limit:
p_neg_new = p_neg.plus(neg_vect)
p_neg_new_level = dem_v[p_neg_new.vector()]
if level > p_neg_new_level and p_neg_new_level > psealevel:
p_neg = p_neg_new
dist += inc_dist
xsec_area += inc_dist * (level - dem_v[p_neg.vector()])
innund[p_neg.vector()] = 1
plan_area += dem_cell_size ** 2
raise_level = False
if raise_level and xsec_area <= xsec_area_limit:
level += dy
xsec_area += dist * dy
if pplotxsecarea and ip[0] == pplotip and v == pplotvol:
Plot_xsecarea(p_pos, p_neg, pathpoint, level, i, seq, innund)
return (plan_area, innund)
def op_ec_points(ec_points):
"""Dumps out all points on the energy cone line for debugging"""
ec_fn = "../{}/{}".format(pwdir, "ec_points.csv")
epcsv = csv.writer(open(ec_fn, "w"), delimiter=',', dialect="excel", quoting=csv.QUOTE_MINIMAL)
epcsv.writerow(["Label", "Latitude", "Longitude", "Row", "Column"])
for i, irc in enumerate(ec_points):
ll = rc2ll(irc)
epcsv.writerow(["P"+str(i), ll[1], ll[0], irc[0], irc[1]])
#Start of Lahar sub routine
pwdir = app.pwdir
pdem_fn = app.pdem_fn
pthal_fn = app.pthal_fn
pflow_fn = app.pflow_fn
pvolume = app.pvolume
phlratio = app.phlratio
# ppeak_entry = app.ppeak_entry.reverse() #revert back to long, lat order
ppeak_entry = list(reversed(app.ppeak_entry)) #revert back to long, lat order
phprange = app.phprange/2 #Program uses half diagonals
psealevel = app.psealevel
plahar_dir = app.plahar_dir
pinitpoints_fn = app.pinitpoints_fn
plog_fn = app.plog_fn
pecraw_fn = app.pecraw_fn
pecfilled_fn = app.pecfilled_fn
pecline_fn = app.pecline_fn
pplotmesh = app.pplotmesh
pmesh_dir = app.pmesh_dir
pmeshres = app.pmeshres
pmeshsize = app.pmeshsize
pcalcinitpoints = app.pcalcinitpoints
puserowcol = app.puserowcol
piponly = app.piponly
pinitpoints_fn = "../{}/{}".format(pwdir, pinitpoints_fn)
plog_fn = "../{}/{}".format(pwdir, plog_fn)
ptextcrs = "EPSG:4326"
pwritetif = True # write output as tif files
pwriteascii = False # write output as ascii files
pwritecsv = False # write output as csv files
# Cross Sectional Area Parameters - parameters do not come from screen entry - code only - for debugging
pplotxsecarea = False # draw plots of cross sectional areas
pxsecareafolder = ("XSecArea") # folder to store plots in. If it doesnt exist create it as PIL doesn't create folder
pxsecareafolder = "../{}/{}".format(pwdir, pxsecareafolder)
pplotip = 13 # which initiation point to draw the plots for
pplotvol = 1e4 # which volume to draw the plots for
pxsec_fn = "Cross Section Points.csv"
pxsec_fn = "{}/{}".format(pxsecareafolder, pxsec_fn)
# Additonal Mesh Parameters
p_diagonal = "SW-NE" # which direction the diaginal is drawn on the mesh. Any other value is SE-NW
pmeshrescol = pmeshres #uses the same resolution for both directions from the screen parameter
pmeshresrow = pmeshres
log_msg("LaharZ Starting", initfile = True)
log_msg("Parameters", screen_op = False)
log_msg('Parameter: pwdir; Working directory; Value: ' + pwdir, screen_op = False)
log_msg('Parameter: pdem_fn; DEM file; Value: ' + pdem_fn, screen_op = False)
log_msg('Parameter: pthal_fn; Stream (or thalweg) file; Value: ' + pthal_fn, screen_op = False)
log_msg('Parameter: pflow_fn; Flow file; Value: ' + pflow_fn, screen_op = False)
log_msg('Parameter: pvolume; Volumes; Value: ' + str(pvolume), screen_op = False)
log_msg('Parameter: phlratio; H/H Ratio; Value: ' + str(phlratio), screen_op = False)
log_msg('Parameter: ppeak_entry; Entered values for peak; Value: ' + str(list(reversed(ppeak_entry))), screen_op = False) # in lat long order to match input
log_msg('Parameter: phprange; Range to search; Value: ' + str(phprange*2), screen_op = False) #x2 to match input. Program uses half diagonals
log_msg('Parameter: psealevel; Sea level; Value: ' + str(psealevel), screen_op = False)
log_msg('Parameter: plahar_dir; Lahar Directory; Value: ' + plahar_dir, screen_op = False)
log_msg('Parameter: pinitpoints_fn; Inititation Points; Value: ' + pinitpoints_fn, screen_op = False)
log_msg('Parameter: plog_fn; Log file; Value: ' + plog_fn, screen_op = False)
log_msg('Parameter: pecraw_fn; Energy Cone (Raw) file; Value: ' + pecraw_fn, screen_op = False)
log_msg('Parameter: pecfilled_fn; Energy Cone (Filled) file; Value: ' + pecfilled_fn, screen_op = False)
log_msg('Parameter: pecline_fn; Energy Cone (Line) file; Value: ' + pecline_fn, screen_op = False)
log_msg('Parameter: pplotmesh; Flag to plot mesh; Value: ' + str(pplotmesh), screen_op = False)
log_msg('Parameter: pmesh_dir; Mesh directory; Value: ' + pmesh_dir, screen_op = False)
log_msg('Parameter: pmeshres; Mesh resolution; Value: ' + str(pmeshres), screen_op = False)
log_msg('Parameter: pmeshsize; Mesh Extent; Value: ' + str(pmeshsize), screen_op = False)
log_msg('Parameter: pcalcinitpoints; Calculate or Load initiation points; Value: ' + pcalcinitpoints, screen_op = False)
log_msg('Parameter: puserowcol; Use row and columns (otherwise Latitude and longitude) from loaded initiation points; Value: ' + str(puserowcol), screen_op = False)
log_msg('Parameter: piponly; Flag to calculate initiation points only; Value: ' + str(piponly), screen_op = False)
log_msg('Parameter: pinitpoints_fn; Initiation points file name; Value: ' + pinitpoints_fn, screen_op = False)
log_msg('Parameter: ptextcrs; Coordinate Reference System to use for Ascii (Text) files; Value: ' + ptextcrs, screen_op = False)
log_msg('Parameter: pwritetif; Flag to write outputs in tif files; Value: ' + str(pwritetif), screen_op = False)
log_msg('Parameter: pwriteascii; Flag to write outputs in Ascii (Text); Value: ' + str(pwriteascii), screen_op = False)
log_msg('Parameter: pwritecsv; Flag to write outputs in csv; Value: ' + str(pwritecsv), screen_op = False)
log_msg('Parameter: pplotxsecarea; Flag to plot Cross Sectional Areas; Value: ' + str(pplotxsecarea), screen_op=False)
log_msg('Parameter: pxsecareafolder; Folder for cross sectional areas; Value: ' + pxsecareafolder, screen_op=False)
log_msg('Parameter: pplotip; Initial point to plot cross sectional areas for; Value: ' + str(pplotip), screen_op=False)
log_msg('Parameter: pplotvol; Volume to plot cross sectional areas for; Value: ' + str(pplotvol), screen_op=False)
log_msg('Parameter: pxsec_fn; CSV file of all points in a lahar where the cross sectional area has been plotted; Value: ' + pxsec_fn, screen_op=False)
log_msg('Parameter: p_diagonal; Diagonal to use on mesh; Value: ' + p_diagonal, screen_op=False)
log_msg('Parameter: pmeshrescol; Mesh resolution (columns); Value: ' + str(pmeshrescol), screen_op=False)
log_msg('Parameter: pmeshresrow; Mesh resolution (rows); Value: ' + str(pmeshresrow), screen_op=False)
if not piponly:
chk_folder = os.path.isdir("../{}/{}".format(pwdir, plahar_dir))
# If folder doesn't exist, then create it.
if not chk_folder:
os.makedirs("../{}/{}".format(pwdir, plahar_dir))
if pplotxsecarea:
chk_folder = os.path.isdir("../{}/{}".format(pwdir, pxsecareafolder))
# If folder doesn't exist, then create it.
if not chk_folder:
os.makedirs("../{}/{}".format(pwdir, pxsecareafolder))
if pplotmesh:
chk_folder = os.path.isdir("../{}/{}".format(pwdir, pmesh_dir))
# If folder doesn't exist, then create it.
if not chk_folder:
os.makedirs("../{}/{}".format(pwdir, pmesh_dir))
# Initialisations ##########################################################################################################
log_msg("Initialisations...")
kml = simplekml.Kml()
geod = pyproj.Geod(ellps='WGS84') # used as the projection method to determine distances between two points
# Load DEM #################################################################################################################
log_msg("Loading DEM...")
dem_f, dem_crs, dem_v = LoadFile(pdem_fn)
# todo 1) find a method of determing the cell size from the input file. Using the transform method is different depending on the
# projection method. It will return meters or degrees. Currently the cell distance is just calculated from the distance between
# two cells. This results in a slightly different value than using the transform method
# todo 2) currently assumes cells in the matrix are square. This may not be appropriate for some project methods/tif files
# dem_cell_size = dem_f.transform[0]
dem_cell_size = rcdist((0, 0), (0, 1)) # distance in x direction, ie one column
nrows = dem_v.shape[0]
ncols = dem_v.shape[1]
dem_se_ll = rc2ll((nrows - 1, ncols - 1))
dem_nw_ll = rc2ll((0, 0))
PolygonDEM(kml, dem_nw_ll, dem_se_ll) # creates a polygon (rectangle) of the DEM area for reading in google maps
# Load Flow Direction file
log_msg("Loading Flow Direction file...")
flowdir_f, flowdir_crs, flowdir_v = LoadFile(pflow_fn)
if flowdir_v.shape != dem_v.shape or dem_crs != flowdir_crs:
log_msg("Error - mismatch in raster size and projection between DEM file ({}) and Flow Direction file ({})".format(pdem_fn, pflow_fn), errmsg = True)
return
if pcalcinitpoints == 'calc':
log_msg("Loading Stream file...")
thal_f, thal_crs, thal_v = LoadFile(pthal_fn)
if thal_v.shape != dem_v.shape or dem_crs != thal_crs:
log_msg("Error - mismatch in raster size and projection between DEM file ({}) and Stream file ({})".format(pdem_fn, pthal_fn), errmsg = True)
return
thal_v[thal_v == 65535] = 0 # set all 'NaN' values of #FFFF to zero #todo this is necessary for some outputs from GRASS r.fillnull
thal_v[thal_v < 0] = 0 # set all negatives to zero
thal_v[thal_v > 0] = 1 # set all stream numbers to 1
# Find peak
# Adds peak and the search box to the kml file for display in google earth
log_msg("Determining High Point...")
AddPointKML(kml, ppeak_entry, "Entry Point")
search_nw_ll = lltransform(ppeak_entry, phprange, 315) # nw corner of search box
AddPointKML(kml, search_nw_ll, "Search NW")
search_se_ll = lltransform(ppeak_entry, phprange, 135) # se corner of search box
AddPointKML(kml, search_se_ll, "Search SE")
AddPolyKML(kml, search_nw_ll, search_se_ll, "Search Area", "4f0000ff")
search_nw_rc = ll2rc(search_nw_ll)
search_se_rc = ll2rc(search_se_ll)
if not (0 <= search_nw_rc[0] < nrows and 0 <= search_nw_rc[1] <= ncols and 0 <= search_se_rc[0] < nrows and 0 <= search_se_rc[1] <= ncols):
log_msg("Error: Search area for high point is not wholly within the DEM file", errmsg = True)
return
search_area = dem_v[search_nw_rc[0]:search_se_rc[0] + 1, search_nw_rc[1]:search_se_rc[1] + 1] # area to search for highest point
peak_height = np.amax(search_area) # height of highest point
sarearc = np.where(search_area == peak_height) # row and column of highest point in search box
peakrc = np.zeros(2)
peakrc[0] = sarearc[0][0] + search_nw_rc[0] # row and column in overall table
peakrc[1] = sarearc[1][0] + search_nw_rc[1]
peakll = rc2ll(peakrc)
AddPointKML(kml, peakll, "High Point")
kml.save("KMLFile.kml")
# Determine Energy Cone
log_msg("Generating Energy Cone...")
pb = ttk.Progressbar(app.frame, orient='horizontal', mode='determinate', length=400)
pb.grid(row=37, column=3, columnspan=2, sticky='W')
pb["value"] = 0
app.update()
ecraw_v = np.zeros_like(dem_v)
# todo might be very clever way to do this in a single numpy statement rather than cycling through array. Would be much faster
# for i in tqdm.tqdm(range(nrows)):
for i in range(nrows):
# print("{}/{}".format(i, dem_v.shape[0]))
for j in range(ncols):
l = rcdist(peakrc, [i, j])
h = peak_height - dem_v[i, j]
if l != 0:
if h / l > phlratio:
ecraw_v[i, j] = 1
else:
ecraw_v[i, j] = 1 # peak itself
pb["value"] = i/nrows*100+1
app.update()
pb.destroy()
app.update()
SaveFile(pecraw_fn, dem_f, ecraw_v)
log_msg("Filling Energy Cone...")
ecfilled_v = binary_fill_holes(ecraw_v)
SaveFile(pecfilled_fn, dem_f, ecfilled_v)
log_msg("Generating Energy Cone Line...")
ecline_v = binary_erosion(input=ecfilled_v)
ecline_v = np.logical_and(ecraw_v, np.logical_not(ecline_v))
SaveFile(pecline_fn, dem_f, ecline_v)
# inititiation points
log_msg("Generating Initiation Points...")
ec_points = np.argwhere(ecline_v == 1)
op_ec_points(ec_points) #creates csv file of ec_points for debugging
initrc = []
ip_counter = 1
for ii, i in enumerate(ec_points): #to allow breakpoints to be set at specific points for debugging
# checks if the ec line and the thalweg share the same cell in the matix: if so, they cross and hence an initiation point
# but also checks if the lines cross in an x where they don;t share the same cell. This is done in each direction.
if thal_v[tuple(i)] == 1:
initrc.append([ip_counter, i[0], i[1]])
ip_counter += 1
else:
# a b c
# d e f
# g h i
# nw direction
ip_found = False
if i[0] < nrows and i[1] < ncols:
b = [i[0] + 1, i[1]]
c = [i[0] + 1, i[1] + 1]
f = [i[0], i[1] + 1]
if (ecline_v[tuple(b)] == 0 and thal_v[tuple(b)] == 1 and
ecline_v[tuple(c)] == 1 and thal_v[tuple(c)] == 0 and
ecline_v[tuple(f)] == 0 and thal_v[tuple(f)] == 1):
initrc.append([ip_counter, i[0], i[1]])
log_msg("Adding extra ip", ip_counter)
ip_counter += 1
ip_found = True
# se direction
if i[0] > 0 and i[1] < ncols and not ip_found:
f = [i[0], i[1] + 1]
h = [i[0] - 1, i[1]]
i = [i[0] - 1, i[1] + 1]
if (ecline_v[tuple(f)] == 0 and thal_v[tuple(f)] == 1 and
ecline_v[tuple(h)] == 0 and thal_v[tuple(h)] == 1 and
ecline_v[tuple(i)] == 1 and thal_v[tuple(i)] == 0):
initrc.append([ip_counter, i[0], i[1]])
log_msg("Adding extra ip", ip_counter)
ip_counter += 1
ip_found = True
# sw direction
if i[0] > 0 and i[1] > 0 and not ip_found:
d = [i[0], i[1] - 1]
g = [i[0] - 1, i[1] - 1]
h = [i[0] - 1, i[1]]
if (ecline_v[tuple(d)] == 0 and thal_v[tuple(d)] == 1 and
ecline_v[tuple(g)] == 1 and thal_v[tuple(g)] == 0 and
ecline_v[tuple(i)] == 0 and thal_v[tuple(h)] == 1):
initrc.append([ip_counter, i[0], i[1]])
log_msg("Adding extra ip", ip_counter)
ip_counter += 1
ip_found = True
# ne direction
if i[0] < nrows and i[1] > 0 and not ip_found:
a = [i[0] + 1, i[1] - 1]
b = [i[0] + 1, i[1]]
d = [i[0], i[1] - 1]
if (ecline_v[tuple(a)] == 1 and thal_v[tuple(a)] == 0 and
ecline_v[tuple(b)] == 0 and thal_v[tuple(b)] == 1 and
ecline_v[tuple(d)] == 0 and thal_v[tuple(d)] == 1):
initrc.append([ip_counter, i[0], i[1]])
log_msg("Adding extra ip", ip_counter)
ip_counter += 1
ip_found = True
initrc = np.array(initrc)
# Initiation points and peak saved in a csv file. This can be edited and reloaded
ipcsv = csv.writer(open(pinitpoints_fn, "w"), delimiter=',', dialect="excel", quoting=csv.QUOTE_MINIMAL)
ipcsv.writerow(["Label", "Latitude", "Longitude", "Number", "Row", "Column"])
ipcsv.writerow(["Peak", peakll[1], peakll[0], "", peakrc[0], peakrc[1]])
for i, irc in enumerate(initrc):
ll = rc2ll(irc[1:3]) # this programme uses long, lat to match x,y directions. Presented as Lat, Long in all output
ipcsv.writerow(["IP{:02d}".format(irc[0]), ll[1], ll[0], irc[0], irc[1], irc[2]])
if pplotmesh:
# determine the parameters to use for the size of the mesh files
maxr = np.amax(initrc[:, 1])
minr = np.amin(initrc[:, 1])
centr = int((maxr + minr) / 2)
maxc = np.amax(initrc[:, 2])
minc = np.amin(initrc[:, 2])
centc = int((maxc + minc) / 2)
mesh_ur = int(min(centr + (maxr - centr) * pmeshsize, nrows - 1))
mesh_lr = int(max(centr - (centr - minr) * pmeshsize, 0))
mesh_uc = int(min(centc + (maxc - centc) * pmeshsize, ncols - 1))
mesh_lc = int(max(centc - (centc - minc) * pmeshsize, 0))
mesh_rows = mesh_ur - mesh_lr + 1
mesh_cols = mesh_uc - mesh_lc + 1
CreateSurfaceMesh()
CreateEnergyConeMesh()
else:
# load in initiation points from file. This is intended for quicker execution when the same scenario is
# rerun. There are no checks to see if the initiation points from the file were originally generated using the
# same DEM and Flow files. Nor are there are checks to ensure that the values read in are within the
# DEM and Accumulation files - ie these files could be edited to reflect points outside the DEM and Accumulation
# files; this will result in errors.
log_msg("Loading Initiation Points...")
ipload = np.loadtxt(pinitpoints_fn, delimiter=',', dtype="float", skiprows=2, usecols=np.arange(1, 6))
if ipload.ndim == 1: # a single line is enumerated by element rather than by line
ipload = np.reshape(ipload, (1, ipload.size))
initrc = np.empty([0, 3], dtype="int")
for i, line in enumerate(ipload):
if puserowcol:
r_in, c_in = line[3:5]
else:
# file is sequenced latitude and longitude for presentation consistency. The programme uses
# longitude and latitude to align to x,y axis. Hence the order read in needs to be reversed.
r_in, c_in = ll2rc([line[1], line[0]])
if 0 <= r_in < nrows and 0 <= c_in < ncols:
initrc = np.append(initrc, [[line[2], r_in, c_in]], axis=0)
else:
log_msg("Warning: Point {} ignored as it is outside the DEM file".format(line[2]))
initrc = initrc.astype('int') #reverting to integer as append converts to float64
if not piponly: # if not calculate initiation points only
if len(initrc)> 0:
# Calculate Lahars
log_msg("Generating Lahars...")
innund_total_v = np.zeros_like(dem_v) # array for total innundations
for v1, v in enumerate(pvolume):
innund_vol_v = np.zeros_like(dem_v) # array for all innundations for a particular volume
for i, ip in enumerate(initrc):
log_msg("Generating Lahar. Initiation Point: {} {}/{} Volume: {:.2e} {}/{} ".format(ip[0], i + 1, initrc.shape[0], v, v1 + 1, len(pvolume)))
innund_v = GenLahar(v, ip)
ofn = "{}/IP{:02d}-V{:.2e}".format(plahar_dir, ip[0], v).replace("+", "").replace(".", "-")
ll = rc2ll(ip[1:3])
log_msg("IP{:02d} Latitude: {} Longitude: {} Row: {} Column: {} Volume: {} Filename: {}".format(ip[0], ll[1], ll[0], ip[1], ip[2], v, ofn), screen_op=False) # todo file extension?
log_msg("Writing : {}...".format(ofn), screen_op=False)
SaveFile(ofn, dem_f, innund_v)
innund_vol_v = np.where(innund_vol_v == 0, innund_v * ip[0], innund_vol_v) # add innundation for one initiation point and volume to the overall volume array
# save overall volume file
ofn = "{}/V{:.2e}".format(plahar_dir, v).replace("+", "").replace(".", "-")
log_msg("Writing : {}...".format(ofn))
SaveFile(ofn, dem_f, innund_vol_v)
innund_total_v = np.where(innund_total_v == 0, (innund_vol_v > 0) * (v1 + 1), innund_total_v) # add volume innundation to the total innundation
# save total innundation array
ofn = "{}/Total".format(plahar_dir)
log_msg("Writing : {}...".format(ofn))
SaveFile(ofn, dem_f, innund_total_v)
log_msg("Finished")
else:
log_msg("No valid initiation points")
else:
log_msg("Finished")
# Main
if os.environ.get('DISPLAY','') == '':
#print('no display found. Using :0.0')
os.environ.__setitem__('DISPLAY', ':0.0')
#create main window
param1 = tk.Tk()
param1.title("LaharZ")
param1.geometry("1920x1080")
#create a frame
app = Application(param1)
app.pack(side="top", fill="both", expand=True)
# Run forever!
param1.mainloop()
|
[
"tkinter.StringVar",
"PIL.Image.new",
"numpy.amin",
"numpy.empty",
"scipy.ndimage.binary_fill_holes",
"tkinter.ttk.Progressbar",
"gmsh.model.add",
"numpy.shape",
"os.path.isfile",
"tkinter.BooleanVar",
"numpy.arange",
"tkinter.Frame",
"gmsh.finalize",
"tkinter.Label",
"tkinter.Checkbutton",
"simplekml.Kml",
"numpy.zeros_like",
"gmsh.model.geo.addPoint",
"tkinter.Button",
"tkinter.Entry",
"numpy.savetxt",
"numpy.logical_not",
"numpy.append",
"rasterio.rio.helpers.resolve_inout",
"tkinter.Toplevel",
"numpy.reshape",
"PIL.ImageDraw.Draw",
"datetime.datetime.now",
"tkinter.Tk",
"pyproj.Geod",
"tkinter.Frame.__init__",
"gmsh.model.mesh.generate",
"gmsh.model.geo.synchronize",
"rasterio.Env",
"gmsh.write",
"numpy.argwhere",
"sys.exit",
"os.environ.__setitem__",
"rasterio.open",
"scipy.ndimage.binary_erosion",
"tkinter.Canvas",
"os.path.isdir",
"gmsh.model.geo.addLine",
"numpy.zeros",
"tkinter.Scrollbar",
"gmsh.model.geo.addCurveLoop",
"os.environ.get",
"PIL.ImageFont.truetype",
"numpy.amax",
"numpy.where",
"numpy.array",
"pyproj.Proj",
"tkinter.Radiobutton",
"PIL.Image.fromarray",
"gmsh.initialize",
"gmsh.model.geo.addPlaneSurface"
] |
[((100048, 100055), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (100053, 100055), True, 'import tkinter as tk\n'), ((86104, 86119), 'simplekml.Kml', 'simplekml.Kml', ([], {}), '()\n', (86117, 86119), False, 'import simplekml\n'), ((86131, 86157), 'pyproj.Geod', 'pyproj.Geod', ([], {'ellps': '"""WGS84"""'}), "(ellps='WGS84')\n", (86142, 86157), False, 'import pyproj\n'), ((99893, 99922), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (99907, 99922), False, 'import os\n'), ((99976, 100017), 'os.environ.__setitem__', 'os.environ.__setitem__', (['"""DISPLAY"""', '""":0.0"""'], {}), "('DISPLAY', ':0.0')\n", (99998, 100017), False, 'import os\n'), ((3435, 3466), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'master'], {}), '(self, master)\n', (3452, 3466), True, 'import tkinter as tk\n'), ((3566, 3596), 'tkinter.Canvas', 'tk.Canvas', (['self'], {'borderwidth': '(0)'}), '(self, borderwidth=0)\n', (3575, 3596), True, 'import tkinter as tk\n'), ((3685, 3706), 'tkinter.Frame', 'tk.Frame', (['self.canvas'], {}), '(self.canvas)\n', (3693, 3706), True, 'import tkinter as tk\n'), ((3726, 3790), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self'], {'orient': '"""vertical"""', 'command': 'self.canvas.yview'}), "(self, orient='vertical', command=self.canvas.yview)\n", (3738, 3790), True, 'import tkinter as tk\n'), ((4633, 4677), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (4641, 4677), True, 'import tkinter as tk\n'), ((4820, 4946), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Working directory for this run. Should contain the input files. """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Working directory for this run. Should contain the input files. ',\n font=('Helvetica', 12))\n", (4828, 4946), True, 'import tkinter as tk\n'), ((5191, 5235), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (5199, 5235), True, 'import tkinter as tk\n'), ((5396, 5522), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of where you load the parameters from, if you wish to """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'File name of where you load the parameters from, if you wish to ',\n font=('Helvetica', 12))\n", (5404, 5522), True, 'import tkinter as tk\n'), ((5637, 5695), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Load"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Load', font=('Helvetica', 12))\n", (5646, 5695), True, 'import tkinter as tk\n'), ((6268, 6312), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (6276, 6312), True, 'import tkinter as tk\n'), ((6463, 6569), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Name of your DEM file in your working directory """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Name of your DEM file in your working directory ', font=('Helvetica', 12))\n", (6471, 6569), True, 'import tkinter as tk\n'), ((6812, 6856), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (6820, 6856), True, 'import tkinter as tk\n'), ((7011, 7125), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Name of your Stream file in your working directory """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Name of your Stream file in your working directory ', font=(\n 'Helvetica', 12))\n", (7019, 7125), True, 'import tkinter as tk\n'), ((7360, 7404), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (7368, 7404), True, 'import tkinter as tk\n'), ((7559, 7671), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Name of your Flow file in your working directory """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Name of your Flow file in your working directory ', font=('Helvetica', 12)\n )\n", (7567, 7671), True, 'import tkinter as tk\n'), ((7899, 7943), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (7907, 7943), True, 'import tkinter as tk\n'), ((8094, 8195), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Volumes (m^3) in a list separated by commas """', 'font': "('Helvetica', 12)"}), "(self.frame, text='Volumes (m^3) in a list separated by commas ',\n font=('Helvetica', 12))\n", (8102, 8195), True, 'import tkinter as tk\n'), ((8436, 8480), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (8444, 8480), True, 'import tkinter as tk\n'), ((8636, 8734), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""H/L Ratios normally between 0.2 and 0.3 """', 'font': "('Helvetica', 12)"}), "(self.frame, text='H/L Ratios normally between 0.2 and 0.3 ', font=\n ('Helvetica', 12))\n", (8644, 8734), True, 'import tkinter as tk\n'), ((8969, 9013), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (8977, 9013), True, 'import tkinter as tk\n'), ((9181, 9285), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Approximate latitude and longitude of the peak """', 'font': "('Helvetica', 12)"}), "(self.frame, text='Approximate latitude and longitude of the peak ',\n font=('Helvetica', 12))\n", (9189, 9285), True, 'import tkinter as tk\n'), ((9543, 9587), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (9551, 9587), True, 'import tkinter as tk\n'), ((9743, 9832), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Length of search diagonal in m """', 'font': "('Helvetica', 12)"}), "(self.frame, text='Length of search diagonal in m ', font=(\n 'Helvetica', 12))\n", (9751, 9832), True, 'import tkinter as tk\n'), ((10075, 10119), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (10083, 10119), True, 'import tkinter as tk\n'), ((10279, 10347), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Sea Level in m """', 'font': "('Helvetica', 12)"}), "(self.frame, text='Sea Level in m ', font=('Helvetica', 12))\n", (10287, 10347), True, 'import tkinter as tk\n'), ((10887, 10931), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (10895, 10931), True, 'import tkinter as tk\n'), ((11095, 11224), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Directory which contains the lahar files in your working directory """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Directory which contains the lahar files in your working directory ',\n font=('Helvetica', 12))\n", (11103, 11224), True, 'import tkinter as tk\n'), ((11486, 11530), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (11494, 11530), True, 'import tkinter as tk\n'), ((11710, 11803), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of the initiation points """', 'font': "('Helvetica', 12)"}), "(self.frame, text='File name of the initiation points ', font=(\n 'Helvetica', 12))\n", (11718, 11803), True, 'import tkinter as tk\n'), ((12048, 12092), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (12056, 12092), True, 'import tkinter as tk\n'), ((12244, 12338), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of the log of all details """', 'font': "('Helvetica', 12)"}), "(self.frame, text='File name of the log of all details ', font=(\n 'Helvetica', 12))\n", (12252, 12338), True, 'import tkinter as tk\n'), ((12592, 12636), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (12600, 12636), True, 'import tkinter as tk\n'), ((12796, 12887), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of the Raw Energy Cone """', 'font': "('Helvetica', 12)"}), "(self.frame, text='File name of the Raw Energy Cone ', font=(\n 'Helvetica', 12))\n", (12804, 12887), True, 'import tkinter as tk\n'), ((13152, 13196), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (13160, 13196), True, 'import tkinter as tk\n'), ((13368, 13462), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of the Filled Energy Cone """', 'font': "('Helvetica', 12)"}), "(self.frame, text='File name of the Filled Energy Cone ', font=(\n 'Helvetica', 12))\n", (13376, 13462), True, 'import tkinter as tk\n'), ((13724, 13768), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (13732, 13768), True, 'import tkinter as tk\n'), ((13932, 14024), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of the Energy Cone Line """', 'font': "('Helvetica', 12)"}), "(self.frame, text='File name of the Energy Cone Line ', font=(\n 'Helvetica', 12))\n", (13940, 14024), True, 'import tkinter as tk\n'), ((14421, 14456), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {'value': 'self.pplotmesh'}), '(value=self.pplotmesh)\n', (14434, 14456), True, 'import tkinter as tk\n'), ((14645, 14744), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Check if you wish to create 3D mesh files """', 'font': "('Helvetica', 12)"}), "(self.frame, text='Check if you wish to create 3D mesh files ',\n font=('Helvetica', 12))\n", (14653, 14744), True, 'import tkinter as tk\n'), ((14999, 15043), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (15007, 15043), True, 'import tkinter as tk\n'), ((15203, 15331), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Directory which contains the mesh files in your working directory """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Directory which contains the mesh files in your working directory ',\n font=('Helvetica', 12))\n", (15211, 15331), True, 'import tkinter as tk\n'), ((15582, 15626), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (15590, 15626), True, 'import tkinter as tk\n'), ((15782, 15899), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Mesh resolution (number of points in x & y direction) """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Mesh resolution (number of points in x & y direction) ', font=(\n 'Helvetica', 12))\n", (15790, 15899), True, 'import tkinter as tk\n'), ((16141, 16185), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (16149, 16185), True, 'import tkinter as tk\n'), ((16345, 16486), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""What extent to plot the mesh (1.3 = 130% of the area of the energy cone line) """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'What extent to plot the mesh (1.3 = 130% of the area of the energy cone line) '\n , font=('Helvetica', 12))\n", (16353, 16486), True, 'import tkinter as tk\n'), ((16903, 16917), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (16915, 16917), True, 'import tkinter as tk\n'), ((17478, 17514), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {'value': 'self.puserowcol'}), '(value=self.puserowcol)\n', (17491, 17514), True, 'import tkinter as tk\n'), ((17705, 17866), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Check to use rows and columns if loading initiation points; otherwise uses latitude and longitude """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'Check to use rows and columns if loading initiation points; otherwise uses latitude and longitude '\n , font=('Helvetica', 12))\n", (17713, 17866), True, 'import tkinter as tk\n'), ((17997, 18012), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (18010, 18012), True, 'import tkinter as tk\n'), ((18491, 18535), 'tkinter.Entry', 'tk.Entry', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (18499, 18535), True, 'import tkinter as tk\n'), ((18697, 18823), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""File name of where you load the parameters from, if you wish to """', 'font': "('Helvetica', 12)"}), "(self.frame, text=\n 'File name of where you load the parameters from, if you wish to ',\n font=('Helvetica', 12))\n", (18705, 18823), True, 'import tkinter as tk\n'), ((18939, 18997), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Save"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Save', font=('Helvetica', 12))\n", (18948, 18997), True, 'import tkinter as tk\n'), ((19176, 19236), 'tkinter.Button', 'tk.Button', (['self.frame'], {'text': '"""Submit"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Submit', font=('Helvetica', 12))\n", (19185, 19236), True, 'import tkinter as tk\n'), ((19403, 19447), 'tkinter.Label', 'tk.Label', (['self.frame'], {'font': "('Helvetica', 12)"}), "(self.frame, font=('Helvetica', 12))\n", (19411, 19447), True, 'import tkinter as tk\n'), ((20489, 20512), 'os.path.isfile', 'os.path.isfile', (['pdem_fn'], {}), '(pdem_fn)\n', (20503, 20512), False, 'import os\n'), ((20996, 21020), 'os.path.isfile', 'os.path.isfile', (['pthal_fn'], {}), '(pthal_fn)\n', (21010, 21020), False, 'import os\n'), ((21510, 21534), 'os.path.isfile', 'os.path.isfile', (['pflow_fn'], {}), '(pflow_fn)\n', (21524, 21534), False, 'import os\n'), ((40825, 40849), 'os.path.isfile', 'os.path.isfile', (['pload_fn'], {}), '(pload_fn)\n', (40839, 40849), False, 'import os\n'), ((48532, 48555), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (48553, 48555), False, 'import datetime\n'), ((52634, 52662), 'PIL.Image.fromarray', 'Image.fromarray', (['data', '"""RGB"""'], {}), "(data, 'RGB')\n", (52649, 52662), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((54552, 54604), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(pwidth, pheight)'], {'color': '"""#000000"""'}), "('RGB', (pwidth, pheight), color='#000000')\n", (54561, 54604), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((54621, 54648), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img', '"""RGBA"""'], {}), "(img, 'RGBA')\n", (54635, 54648), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((54664, 54699), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(16)'], {}), "('arial.ttf', 16)\n", (54682, 54699), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((59049, 59074), 'gmsh.initialize', 'gmsh.initialize', (['sys.argv'], {}), '(sys.argv)\n', (59064, 59074), False, 'import gmsh\n'), ((59083, 59106), 'gmsh.model.add', 'gmsh.model.add', (['"""3DDEM"""'], {}), "('3DDEM')\n", (59097, 59106), False, 'import gmsh\n'), ((62742, 62770), 'gmsh.model.geo.synchronize', 'gmsh.model.geo.synchronize', ([], {}), '()\n', (62768, 62770), False, 'import gmsh\n'), ((62779, 62806), 'gmsh.model.mesh.generate', 'gmsh.model.mesh.generate', (['(3)'], {}), '(3)\n', (62803, 62806), False, 'import gmsh\n'), ((62894, 62954), 'gmsh.write', 'gmsh.write', (["('../' + pwdir + '/' + pmesh_dir + '/Surface.vtk')"], {}), "('../' + pwdir + '/' + pmesh_dir + '/Surface.vtk')\n", (62904, 62954), False, 'import gmsh\n'), ((63029, 63044), 'gmsh.finalize', 'gmsh.finalize', ([], {}), '()\n', (63042, 63044), False, 'import gmsh\n'), ((63216, 63241), 'gmsh.initialize', 'gmsh.initialize', (['sys.argv'], {}), '(sys.argv)\n', (63231, 63241), False, 'import gmsh\n'), ((63250, 63274), 'gmsh.model.add', 'gmsh.model.add', (['"""3DCone"""'], {}), "('3DCone')\n", (63264, 63274), False, 'import gmsh\n'), ((67177, 67205), 'gmsh.model.geo.synchronize', 'gmsh.model.geo.synchronize', ([], {}), '()\n', (67203, 67205), False, 'import gmsh\n'), ((67214, 67241), 'gmsh.model.mesh.generate', 'gmsh.model.mesh.generate', (['(3)'], {}), '(3)\n', (67238, 67241), False, 'import gmsh\n'), ((67326, 67383), 'gmsh.write', 'gmsh.write', (["('../' + pwdir + '/' + pmesh_dir + '/Cone.vtk')"], {}), "('../' + pwdir + '/' + pmesh_dir + '/Cone.vtk')\n", (67336, 67383), False, 'import gmsh\n'), ((67392, 67407), 'gmsh.finalize', 'gmsh.finalize', ([], {}), '()\n', (67405, 67407), False, 'import gmsh\n'), ((67992, 68012), 'numpy.zeros_like', 'np.zeros_like', (['dem_v'], {}), '(dem_v)\n', (68005, 68012), True, 'import numpy as np\n'), ((89392, 89412), 'numpy.amax', 'np.amax', (['search_area'], {}), '(search_area)\n', (89399, 89412), True, 'import numpy as np\n'), ((89458, 89494), 'numpy.where', 'np.where', (['(search_area == peak_height)'], {}), '(search_area == peak_height)\n', (89466, 89494), True, 'import numpy as np\n'), ((89561, 89572), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (89569, 89572), True, 'import numpy as np\n'), ((89915, 89994), 'tkinter.ttk.Progressbar', 'ttk.Progressbar', (['app.frame'], {'orient': '"""horizontal"""', 'mode': '"""determinate"""', 'length': '(400)'}), "(app.frame, orient='horizontal', mode='determinate', length=400)\n", (89930, 89994), False, 'from tkinter import ttk\n'), ((90119, 90139), 'numpy.zeros_like', 'np.zeros_like', (['dem_v'], {}), '(dem_v)\n', (90132, 90139), True, 'import numpy as np\n'), ((90930, 90956), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', (['ecraw_v'], {}), '(ecraw_v)\n', (90947, 90956), False, 'from scipy.ndimage import binary_erosion, binary_fill_holes\n'), ((91077, 91109), 'scipy.ndimage.binary_erosion', 'binary_erosion', ([], {'input': 'ecfilled_v'}), '(input=ecfilled_v)\n', (91091, 91109), False, 'from scipy.ndimage import binary_erosion, binary_fill_holes\n'), ((91327, 91353), 'numpy.argwhere', 'np.argwhere', (['(ecline_v == 1)'], {}), '(ecline_v == 1)\n', (91338, 91353), True, 'import numpy as np\n'), ((94766, 94782), 'numpy.array', 'np.array', (['initrc'], {}), '(initrc)\n', (94774, 94782), True, 'import numpy as np\n'), ((97069, 97098), 'numpy.empty', 'np.empty', (['[0, 3]'], {'dtype': '"""int"""'}), "([0, 3], dtype='int')\n", (97077, 97098), True, 'import numpy as np\n'), ((19966, 19994), 'os.path.isdir', 'os.path.isdir', (["('../' + pwdir)"], {}), "('../' + pwdir)\n", (19979, 19994), False, 'import os\n'), ((39717, 39745), 'os.path.isdir', 'os.path.isdir', (["('../' + pwdir)"], {}), "('../' + pwdir)\n", (39730, 39745), False, 'import os\n'), ((39921, 39945), 'os.path.isfile', 'os.path.isfile', (['pload_fn'], {}), '(pload_fn)\n', (39935, 39945), False, 'import os\n'), ((49206, 49218), 'rasterio.open', 'rio.open', (['fn'], {}), '(fn)\n', (49214, 49218), True, 'import rasterio as rio\n'), ((49402, 49423), 'pyproj.Proj', 'pyproj.Proj', (['ptextcrs'], {}), '(ptextcrs)\n', (49413, 49423), False, 'import pyproj\n'), ((50570, 50599), 'rasterio.rio.helpers.resolve_inout', 'resolve_inout', ([], {'overwrite': '(True)'}), '(overwrite=True)\n', (50583, 50599), False, 'from rasterio.rio.helpers import resolve_inout\n'), ((51692, 51755), 'numpy.savetxt', 'np.savetxt', (["('../' + pwdir + '/' + fn + '.csv')", 'v'], {'delimiter': '""","""'}), "('../' + pwdir + '/' + fn + '.csv', v, delimiter=',')\n", (51702, 51755), True, 'import numpy as np\n'), ((91153, 91177), 'numpy.logical_not', 'np.logical_not', (['ecline_v'], {}), '(ecline_v)\n', (91167, 91177), True, 'import numpy as np\n'), ((95538, 95559), 'numpy.amax', 'np.amax', (['initrc[:, 1]'], {}), '(initrc[:, 1])\n', (95545, 95559), True, 'import numpy as np\n'), ((95579, 95600), 'numpy.amin', 'np.amin', (['initrc[:, 1]'], {}), '(initrc[:, 1])\n', (95586, 95600), True, 'import numpy as np\n'), ((95663, 95684), 'numpy.amax', 'np.amax', (['initrc[:, 2]'], {}), '(initrc[:, 2])\n', (95670, 95684), True, 'import numpy as np\n'), ((95704, 95725), 'numpy.amin', 'np.amin', (['initrc[:, 2]'], {}), '(initrc[:, 2])\n', (95711, 95725), True, 'import numpy as np\n'), ((97015, 97051), 'numpy.reshape', 'np.reshape', (['ipload', '(1, ipload.size)'], {}), '(ipload, (1, ipload.size))\n', (97025, 97051), True, 'import numpy as np\n'), ((98032, 98052), 'numpy.zeros_like', 'np.zeros_like', (['dem_v'], {}), '(dem_v)\n', (98045, 98052), True, 'import numpy as np\n'), ((4205, 4272), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""LaharZ"""', 'font': "('Helvetica', 14, 'bold')"}), "(self.frame, text='LaharZ', font=('Helvetica', 14, 'bold'))\n", (4213, 4272), True, 'import tkinter as tk\n'), ((4351, 4404), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'font': "('Helvetica', 12)"}), "(self.frame, text='', font=('Helvetica', 12))\n", (4359, 4404), True, 'import tkinter as tk\n'), ((4490, 4560), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Working Directory"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Working Directory', font=('Helvetica', 12))\n", (4498, 4560), True, 'import tkinter as tk\n'), ((5047, 5115), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Load Parameters"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Load Parameters', font=('Helvetica', 12))\n", (5055, 5115), True, 'import tkinter as tk\n'), ((5860, 5913), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'font': "('Helvetica', 12)"}), "(self.frame, text='', font=('Helvetica', 12))\n", (5868, 5913), True, 'import tkinter as tk\n'), ((5988, 6055), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Inputs"""', 'font': "('Helvetica', 14, 'bold')"}), "(self.frame, text='Inputs', font=('Helvetica', 14, 'bold'))\n", (5996, 6055), True, 'import tkinter as tk\n'), ((6132, 6193), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""DEM File"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='DEM File', font=('Helvetica', 12))\n", (6140, 6193), True, 'import tkinter as tk\n'), ((6672, 6736), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Stream File"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Stream File', font=('Helvetica', 12))\n", (6680, 6736), True, 'import tkinter as tk\n'), ((7222, 7284), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Flow File"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Flow File', font=('Helvetica', 12))\n", (7230, 7284), True, 'import tkinter as tk\n'), ((7765, 7824), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Volume"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Volume', font=('Helvetica', 12))\n", (7773, 7824), True, 'import tkinter as tk\n'), ((8297, 8359), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""H/L Ratio"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='H/L Ratio', font=('Helvetica', 12))\n", (8305, 8359), True, 'import tkinter as tk\n'), ((8832, 8889), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Peak"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Peak', font=('Helvetica', 12))\n", (8840, 8889), True, 'import tkinter as tk\n'), ((9398, 9466), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Search Diagonal"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Search Diagonal', font=('Helvetica', 12))\n", (9406, 9466), True, 'import tkinter as tk\n'), ((9935, 9997), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Sea Level"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Sea Level', font=('Helvetica', 12))\n", (9943, 9997), True, 'import tkinter as tk\n'), ((10457, 10510), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'font': "('Helvetica', 12)"}), "(self.frame, text='', font=('Helvetica', 12))\n", (10465, 10510), True, 'import tkinter as tk\n'), ((10587, 10655), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Outputs"""', 'font': "('Helvetica', 14, 'bold')"}), "(self.frame, text='Outputs', font=('Helvetica', 14, 'bold'))\n", (10595, 10655), True, 'import tkinter as tk\n'), ((10740, 10808), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Lahar Directory"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Lahar Directory', font=('Helvetica', 12))\n", (10748, 10808), True, 'import tkinter as tk\n'), ((11333, 11403), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Initiation Points"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Initiation Points', font=('Helvetica', 12))\n", (11341, 11403), True, 'import tkinter as tk\n'), ((11911, 11972), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Log File"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Log File', font=('Helvetica', 12))\n", (11919, 11972), True, 'import tkinter as tk\n'), ((12446, 12514), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Raw Energy Cone"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Raw Energy Cone', font=('Helvetica', 12))\n", (12454, 12514), True, 'import tkinter as tk\n'), ((13000, 13071), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Filled Energy Cone"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Filled Energy Cone', font=('Helvetica', 12))\n", (13008, 13071), True, 'import tkinter as tk\n'), ((13576, 13645), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Energy Cone Line"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Energy Cone Line', font=('Helvetica', 12))\n", (13584, 13645), True, 'import tkinter as tk\n'), ((14130, 14183), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'font': "('Helvetica', 12)"}), "(self.frame, text='', font=('Helvetica', 12))\n", (14138, 14183), True, 'import tkinter as tk\n'), ((14257, 14322), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Mesh"""', 'font': "('Helvetica', 14, 'bold')"}), "(self.frame, text='Mesh', font=('Helvetica', 14, 'bold'))\n", (14265, 14322), True, 'import tkinter as tk\n'), ((14467, 14567), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.frame'], {'text': '"""Plot Mesh"""', 'font': "('Helvetica', 12)", 'variable': 'self.tk_pplotmesh'}), "(self.frame, text='Plot Mesh', font=('Helvetica', 12),\n variable=self.tk_pplotmesh)\n", (14481, 14567), True, 'import tkinter as tk\n'), ((14854, 14921), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Mesh Directory"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Mesh Directory', font=('Helvetica', 12))\n", (14862, 14921), True, 'import tkinter as tk\n'), ((15437, 15505), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Mesh Resolution"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Mesh Resolution', font=('Helvetica', 12))\n", (15445, 15505), True, 'import tkinter as tk\n'), ((15999, 16063), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Mesh Extent"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Mesh Extent', font=('Helvetica', 12))\n", (16007, 16063), True, 'import tkinter as tk\n'), ((16586, 16639), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'font': "('Helvetica', 12)"}), "(self.frame, text='', font=('Helvetica', 12))\n", (16594, 16639), True, 'import tkinter as tk\n'), ((16717, 16786), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Controls"""', 'font': "('Helvetica', 14, 'bold')"}), "(self.frame, text='Controls', font=('Helvetica', 14, 'bold'))\n", (16725, 16786), True, 'import tkinter as tk\n'), ((17022, 17160), 'tkinter.Radiobutton', 'tk.Radiobutton', (['self.frame'], {'text': '"""Calculate Initiation Ponts"""', 'font': "('Helvetica', 12)", 'variable': 'self.tk_pcalcinitpoints', 'value': '"""calc"""'}), "(self.frame, text='Calculate Initiation Ponts', font=(\n 'Helvetica', 12), variable=self.tk_pcalcinitpoints, value='calc')\n", (17036, 17160), True, 'import tkinter as tk\n'), ((17247, 17380), 'tkinter.Radiobutton', 'tk.Radiobutton', (['self.frame'], {'text': '"""Load Initiation Points"""', 'font': "('Helvetica', 12)", 'variable': 'self.tk_pcalcinitpoints', 'value': '"""load"""'}), "(self.frame, text='Load Initiation Points', font=('Helvetica',\n 12), variable=self.tk_pcalcinitpoints, value='load')\n", (17261, 17380), True, 'import tkinter as tk\n'), ((17523, 17626), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.frame'], {'text': '"""Use Row/Col"""', 'font': "('Helvetica', 12)", 'variable': 'self.tk_puserowcol'}), "(self.frame, text='Use Row/Col', font=('Helvetica', 12),\n variable=self.tk_puserowcol)\n", (17537, 17626), True, 'import tkinter as tk\n'), ((18021, 18132), 'tkinter.Checkbutton', 'tk.Checkbutton', (['self.frame'], {'text': '"""Initiation Points Only"""', 'font': "('Helvetica', 12)", 'variable': 'self.tk_piponly'}), "(self.frame, text='Initiation Points Only', font=('Helvetica',\n 12), variable=self.tk_piponly)\n", (18035, 18132), True, 'import tkinter as tk\n'), ((18208, 18261), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '""""""', 'font': "('Helvetica', 12)"}), "(self.frame, text='', font=('Helvetica', 12))\n", (18216, 18261), True, 'import tkinter as tk\n'), ((18346, 18414), 'tkinter.Label', 'tk.Label', (['self.frame'], {'text': '"""Save Parameters"""', 'font': "('Helvetica', 12)"}), "(self.frame, text='Save Parameters', font=('Helvetica', 12))\n", (18354, 18414), True, 'import tkinter as tk\n'), ((49292, 49302), 'sys.exit', 'sys.exit', ([], {}), '()\n', (49300, 49302), False, 'import sys\n'), ((49486, 49504), 'pyproj.Proj', 'pyproj.Proj', (['f.crs'], {}), '(f.crs)\n', (49497, 49504), False, 'import pyproj\n'), ((49618, 49628), 'sys.exit', 'sys.exit', ([], {}), '()\n', (49626, 49628), False, 'import sys\n'), ((50803, 50812), 'rasterio.Env', 'rio.Env', ([], {}), '()\n', (50810, 50812), True, 'import rasterio as rio\n'), ((62384, 62419), 'gmsh.model.geo.addPlaneSurface', 'gmsh.model.geo.addPlaneSurface', (['[j]'], {}), '([j])\n', (62414, 62419), False, 'import gmsh\n'), ((66822, 66857), 'gmsh.model.geo.addPlaneSurface', 'gmsh.model.geo.addPlaneSurface', (['[j]'], {}), '([j])\n', (66852, 66857), False, 'import gmsh\n'), ((96887, 96902), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (96896, 96902), True, 'import numpy as np\n'), ((97578, 97628), 'numpy.append', 'np.append', (['initrc', '[[line[2], r_in, c_in]]'], {'axis': '(0)'}), '(initrc, [[line[2], r_in, c_in]], axis=0)\n', (97587, 97628), True, 'import numpy as np\n'), ((98161, 98181), 'numpy.zeros_like', 'np.zeros_like', (['dem_v'], {}), '(dem_v)\n', (98174, 98181), True, 'import numpy as np\n'), ((99428, 99504), 'numpy.where', 'np.where', (['(innund_total_v == 0)', '((innund_vol_v > 0) * (v1 + 1))', 'innund_total_v'], {}), '(innund_total_v == 0, (innund_vol_v > 0) * (v1 + 1), innund_total_v)\n', (99436, 99504), True, 'import numpy as np\n'), ((50835, 50894), 'rasterio.open', 'rio.open', (["('../' + pwdir + '/' + fn + '.tif')", '"""w"""'], {}), "('../' + pwdir + '/' + fn + '.tif', 'w', **profile)\n", (50843, 50894), True, 'import rasterio as rio\n'), ((52351, 52364), 'numpy.shape', 'np.shape', (['arr'], {}), '(arr)\n', (52359, 52364), True, 'import numpy as np\n'), ((52369, 52382), 'numpy.shape', 'np.shape', (['arr'], {}), '(arr)\n', (52377, 52382), True, 'import numpy as np\n'), ((64369, 64401), 'gmsh.model.geo.addPoint', 'gmsh.model.geo.addPoint', (['x', 'y', 'h'], {}), '(x, y, h)\n', (64392, 64401), False, 'import gmsh\n'), ((99010, 99069), 'numpy.where', 'np.where', (['(innund_vol_v == 0)', '(innund_v * ip[0])', 'innund_vol_v'], {}), '(innund_vol_v == 0, innund_v * ip[0], innund_vol_v)\n', (99018, 99069), True, 'import numpy as np\n'), ((44572, 44600), 'os.path.isdir', 'os.path.isdir', (["('../' + pwdir)"], {}), "('../' + pwdir)\n", (44585, 44600), False, 'import os\n'), ((44744, 44768), 'os.path.isfile', 'os.path.isfile', (['psave_fn'], {}), '(psave_fn)\n', (44758, 44768), False, 'import os\n'), ((60643, 60698), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[base_index]', 'p1[hplus_index]'], {}), '(p1[base_index], p1[hplus_index])\n', (60665, 60698), False, 'import gmsh\n'), ((60775, 60830), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[base_index]', 'p1[vplus_index]'], {}), '(p1[base_index], p1[vplus_index])\n', (60797, 60830), False, 'import gmsh\n'), ((61690, 61786), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[hor_line[h_index], ver_line[vplus_index], -dia_line[d_index]]'], {}), '([hor_line[h_index], ver_line[vplus_index], -\n dia_line[d_index]])\n', (61717, 61786), False, 'import gmsh\n'), ((61820, 61916), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[ver_line[v_index], hor_line[hplus_index], -dia_line[d_index]]'], {}), '([ver_line[v_index], hor_line[hplus_index], -\n dia_line[d_index]])\n', (61847, 61916), False, 'import gmsh\n'), ((61972, 62065), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[hor_line[h_index], -dia_line[d_index], -ver_line[v_index]]'], {}), '([hor_line[h_index], -dia_line[d_index], -\n ver_line[v_index]])\n', (61999, 62065), False, 'import gmsh\n'), ((62099, 62199), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[hor_line[hplus_index], -ver_line[vplus_index], -dia_line[d_index]]'], {}), '([hor_line[hplus_index], -ver_line[vplus_index],\n -dia_line[d_index]])\n', (62126, 62199), False, 'import gmsh\n'), ((65087, 65142), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[base_index]', 'p1[hplus_index]'], {}), '(p1[base_index], p1[hplus_index])\n', (65109, 65142), False, 'import gmsh\n'), ((65219, 65274), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[base_index]', 'p1[vplus_index]'], {}), '(p1[base_index], p1[vplus_index])\n', (65241, 65274), False, 'import gmsh\n'), ((66131, 66227), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[hor_line[h_index], ver_line[vplus_index], -dia_line[d_index]]'], {}), '([hor_line[h_index], ver_line[vplus_index], -\n dia_line[d_index]])\n', (66158, 66227), False, 'import gmsh\n'), ((66261, 66357), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[ver_line[v_index], hor_line[hplus_index], -dia_line[d_index]]'], {}), '([ver_line[v_index], hor_line[hplus_index], -\n dia_line[d_index]])\n', (66288, 66357), False, 'import gmsh\n'), ((66413, 66506), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[hor_line[h_index], -dia_line[d_index], -ver_line[v_index]]'], {}), '([hor_line[h_index], -dia_line[d_index], -\n ver_line[v_index]])\n', (66440, 66506), False, 'import gmsh\n'), ((66540, 66640), 'gmsh.model.geo.addCurveLoop', 'gmsh.model.geo.addCurveLoop', (['[hor_line[hplus_index], -ver_line[vplus_index], -dia_line[d_index]]'], {}), '([hor_line[hplus_index], -ver_line[vplus_index],\n -dia_line[d_index]])\n', (66567, 66640), False, 'import gmsh\n'), ((44799, 44812), 'tkinter.Toplevel', 'tk.Toplevel', ([], {}), '()\n', (44810, 44812), True, 'import tkinter as tk\n'), ((44842, 44886), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""File already exists"""'}), "(window, text='File already exists')\n", (44850, 44886), True, 'import tkinter as tk\n'), ((45137, 45193), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Cancel"""', 'command': 'window.destroy'}), "(window, text='Cancel', command=window.destroy)\n", (45146, 45193), True, 'import tkinter as tk\n'), ((60981, 61036), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[base_index]', 'p1[dplus_index]'], {}), '(p1[base_index], p1[dplus_index])\n', (61003, 61036), False, 'import gmsh\n'), ((61103, 61159), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[vplus_index]', 'p1[hplus_index]'], {}), '(p1[vplus_index], p1[hplus_index])\n', (61125, 61159), False, 'import gmsh\n'), ((65425, 65480), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[base_index]', 'p1[dplus_index]'], {}), '(p1[base_index], p1[dplus_index])\n', (65447, 65480), False, 'import gmsh\n'), ((65547, 65603), 'gmsh.model.geo.addLine', 'gmsh.model.geo.addLine', (['p1[vplus_index]', 'p1[hplus_index]'], {}), '(p1[vplus_index], p1[hplus_index])\n', (65569, 65603), False, 'import gmsh\n'), ((69399, 69409), 'sys.exit', 'sys.exit', ([], {}), '()\n', (69407, 69409), False, 'import sys\n'), ((73909, 73919), 'sys.exit', 'sys.exit', ([], {}), '()\n', (73917, 73919), False, 'import sys\n')]
|
import numpy as np
from scipy import constants
from .conversion import vol_uc2mol
def zharkov_panh(v, temp, v0, a0, m, n, z, t_ref=300.,
three_r=3. * constants.R):
"""
calculate pressure from anharmonicity for Zharkov equation
the equation is from Dorogokupets 2015
:param v: unit-cell volume in A^3
:param temp: temperature in K
:param v0: unit-cell volume in A^3 at 1 bar
:param a0: parameter in K-1 for the Zharkov equation
:param m: parameter for the Zharkov equation
:param n: number of elements in a chemical formula
:param z: number of formula unit in a unit cell
:param three_r: 3 times gas constant
:return: anharmonic contribution for pressure in GPa
"""
v_mol = vol_uc2mol(v, z)
x = v / v0
a = a0 * np.power(x, m)
def f(t):
return three_r * n / 2. * a * m / v_mol * np.power(t, 2.) * 1.e-9
return f(temp) - f(t_ref)
|
[
"numpy.power"
] |
[((794, 808), 'numpy.power', 'np.power', (['x', 'm'], {}), '(x, m)\n', (802, 808), True, 'import numpy as np\n'), ((874, 890), 'numpy.power', 'np.power', (['t', '(2.0)'], {}), '(t, 2.0)\n', (882, 890), True, 'import numpy as np\n')]
|
# python color_tracking.py --video balls.mp4
# python color_tracking.py
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
import urllib # for reading image from URL
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the colors in the HSV color space
lower = {'red': (166, 84, 141), 'green': (66, 122, 129), 'blue': (97, 100, 117), 'yellow': (23, 59, 119),
'orange': (0, 50, 80)} # assign new item lower['blue'] = (93, 10, 0)
upper = {'red': (186, 255, 255), 'green': (86, 255, 255), 'blue': (117, 255, 255), 'yellow': (54, 255, 255),
'orange': (20, 255, 255)}
# define standard colors for circle around the object
colors = {'red': (0, 0, 255), 'green': (0, 255, 0), 'blue': (255, 0, 0), 'yellow': (0, 255, 217),
'orange': (0, 140, 255)}
# pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(1)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# IP webcam image stream
# URL = 'http://10.254.254.102:8080/shot.jpg'
# urllib.urlretrieve(URL, 'shot1.jpg')
# frame = cv2.imread('shot1.jpg')
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=1080)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# for each color in dictionary check object in frame
for key, value in upper.items():
# construct a mask for the color from dictionary`1, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
kernel = np.ones((9, 9), np.uint8)
mask = cv2.inRange(hsv, lower[key], upper[key])
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size. Correct this value for your obect's size
if radius > 0.5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius), colors[key], 2)
cv2.putText(frame, key + " color", (int(x - radius), int(y - radius)), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
colors[key], 2)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
|
[
"cv2.GaussianBlur",
"cv2.minEnclosingCircle",
"argparse.ArgumentParser",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.waitKey",
"cv2.moments",
"cv2.imshow",
"numpy.ones",
"cv2.VideoCapture",
"imutils.resize",
"cv2.destroyAllWindows",
"cv2.inRange"
] |
[((313, 338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (336, 338), False, 'import argparse\n'), ((4089, 4112), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4110, 4112), False, 'import cv2\n'), ((1328, 1347), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (1344, 1347), False, 'import cv2\n'), ((1422, 1453), 'cv2.VideoCapture', 'cv2.VideoCapture', (["args['video']"], {}), "(args['video'])\n", (1438, 1453), False, 'import cv2\n'), ((1982, 2015), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(1080)'}), '(frame, width=1080)\n', (1996, 2015), False, 'import imutils\n'), ((2033, 2069), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(11, 11)', '(0)'], {}), '(frame, (11, 11), 0)\n', (2049, 2069), False, 'import cv2\n'), ((2081, 2121), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred', 'cv2.COLOR_BGR2HSV'], {}), '(blurred, cv2.COLOR_BGR2HSV)\n', (2093, 2121), False, 'import cv2\n'), ((3869, 3895), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (3879, 3895), False, 'import cv2\n'), ((2410, 2435), 'numpy.ones', 'np.ones', (['(9, 9)', 'np.uint8'], {}), '((9, 9), np.uint8)\n', (2417, 2435), True, 'import numpy as np\n'), ((2454, 2494), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower[key]', 'upper[key]'], {}), '(hsv, lower[key], upper[key])\n', (2465, 2494), False, 'import cv2\n'), ((2511, 2557), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(mask, cv2.MORPH_OPEN, kernel)\n', (2527, 2557), False, 'import cv2\n'), ((2574, 2621), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(mask, cv2.MORPH_CLOSE, kernel)\n', (2590, 2621), False, 'import cv2\n'), ((3909, 3923), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3920, 3923), False, 'import cv2\n'), ((3190, 3215), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (3212, 3215), False, 'import cv2\n'), ((3233, 3247), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (3244, 3247), False, 'import cv2\n')]
|
import argparse
import os
import string
import sys
import time
import cv2
import numpy as np
import torch
from torch.autograd import Variable
from torchvision import transforms
import utils
import crnn_captcha
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='./crnn_capcha.pth', help='the path to your images')
parser.add_argument('--imgs_dir', type=str, default='./imgs', help='the path to your images')
opt = parser.parse_args()
crnn_model_path = opt.model_path
alphabet = string.digits + string.ascii_letters
nclass = len(alphabet)+1
transformer = transforms.Normalize([0.906, 0.910, 0.907], [0.147, 0.130, 0.142])
imgH = 64
def crnn_recognition(image, model):
converter = utils.strLabelConverter(alphabet)
h, w, c = image.shape
ratio = imgH * 1.0/h
image = cv2.resize(image, (0,0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
image = (np.reshape(image, (imgH, -1, c))).transpose(2, 0, 1)
image = image.astype(np.float32) / 255.
image = torch.from_numpy(image).type(torch.FloatTensor)
image = transformer(image)
if torch.cuda.is_available():
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
return sim_pred
if __name__ == '__main__':
model = crnn_captcha.CRNN(64, 3, nclass, 256)
if torch.cuda.is_available():
model = model.cuda()
print('loading pretrained model from {0}'.format(crnn_model_path))
model.load_state_dict(torch.load(crnn_model_path))
model.eval()
started = time.time()
imgs = os.listdir(opt.imgs_dir)
n = 0
for i, name in enumerate(imgs):
img_path = os.path.join(opt.imgs_dir, name)
image = cv2.imread(img_path)
pred = crnn_recognition(image, model)
print('path: {0}, pred: {1}'.format(name, pred))
finished = time.time()
print('elapsed time: {0}'.format(finished-started))
|
[
"crnn_captcha.CRNN",
"argparse.ArgumentParser",
"torch.autograd.Variable",
"torch.load",
"utils.strLabelConverter",
"time.time",
"cv2.imread",
"torch.cuda.is_available",
"numpy.reshape",
"torchvision.transforms.Normalize",
"os.path.join",
"os.listdir",
"cv2.resize",
"torch.from_numpy"
] |
[((221, 246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (244, 246), False, 'import argparse\n'), ((599, 663), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.906, 0.91, 0.907]', '[0.147, 0.13, 0.142]'], {}), '([0.906, 0.91, 0.907], [0.147, 0.13, 0.142])\n', (619, 663), False, 'from torchvision import transforms\n'), ((730, 763), 'utils.strLabelConverter', 'utils.strLabelConverter', (['alphabet'], {}), '(alphabet)\n', (753, 763), False, 'import utils\n'), ((827, 903), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': 'ratio', 'fy': 'ratio', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)\n', (837, 903), False, 'import cv2\n'), ((1111, 1136), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1134, 1136), False, 'import torch\n'), ((1220, 1235), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (1228, 1235), False, 'from torch.autograd import Variable\n'), ((1545, 1582), 'crnn_captcha.CRNN', 'crnn_captcha.CRNN', (['(64)', '(3)', 'nclass', '(256)'], {}), '(64, 3, nclass, 256)\n', (1562, 1582), False, 'import crnn_captcha\n'), ((1590, 1615), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1613, 1615), False, 'import torch\n'), ((1803, 1814), 'time.time', 'time.time', ([], {}), '()\n', (1812, 1814), False, 'import time\n'), ((1826, 1850), 'os.listdir', 'os.listdir', (['opt.imgs_dir'], {}), '(opt.imgs_dir)\n', (1836, 1850), False, 'import os\n'), ((2104, 2115), 'time.time', 'time.time', ([], {}), '()\n', (2113, 2115), False, 'import time\n'), ((1743, 1770), 'torch.load', 'torch.load', (['crnn_model_path'], {}), '(crnn_model_path)\n', (1753, 1770), False, 'import torch\n'), ((1916, 1948), 'os.path.join', 'os.path.join', (['opt.imgs_dir', 'name'], {}), '(opt.imgs_dir, name)\n', (1928, 1948), False, 'import os\n'), ((1965, 1985), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1975, 1985), False, 'import cv2\n'), ((916, 948), 'numpy.reshape', 'np.reshape', (['image', '(imgH, -1, c)'], {}), '(image, (imgH, -1, c))\n', (926, 948), True, 'import numpy as np\n'), ((1025, 1048), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1041, 1048), False, 'import torch\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for attention layers."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from etcmodel import layers as etc_layers
# `attention` module used for testing `_expand_local_ids_to_blocks` helper.
from etcmodel.layers import attention
class LayersTest(tf.test.TestCase, parameterized.TestCase):
def assert_all_identical(self, *elements):
if not elements:
return
first_element = elements[0]
for element in elements[1:]:
self.assertIs(element, first_element)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_relative_attention(self, use_one_hot_lookup):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
from_seq_len = 16
to_seq_len = 17
num_heads = 5
from_hidden_size = 11
to_hidden_size = 12
output_hidden_size = 13
total_key_size = 10
total_value_size = 15
relative_vocab_size = 21
from_seq = tf.random.normal([batch_size, from_seq_len, from_hidden_size])
to_seq = tf.random.normal([batch_size, to_seq_len, to_hidden_size])
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, from_seq_len, to_seq_len]))
relative_att_ids = tf.random.uniform([batch_size, from_seq_len, to_seq_len],
maxval=relative_vocab_size,
dtype=tf.int32)
layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result = layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=relative_att_ids)
self.assertAllEqual([batch_size, from_seq_len, output_hidden_size],
result.shape)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_relative_attention_self_attention(self, use_one_hot_lookup):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
seq_len = 16
num_heads = 5
input_hidden_size = 11
output_hidden_size = 12
total_key_size = 10
total_value_size = 15
relative_vocab_size = 21
inputs = tf.constant(
np.random.normal(size=[batch_size, seq_len, input_hidden_size]),
tf.float32)
att_mask = tf.constant(
np.random.binomial(n=1, p=0.9, size=[batch_size, seq_len, seq_len]))
relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size, size=[batch_size, seq_len, seq_len]))
layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result1 = layer(
inputs, att_mask=att_mask, relative_att_ids=relative_att_ids)
self.assertAllEqual([batch_size, seq_len, output_hidden_size],
result1.shape)
result2 = layer(
from_seq=inputs,
to_seq=inputs,
att_mask=att_mask,
relative_att_ids=relative_att_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(result1, result2)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_relative_attention_shared_sublayers(self, use_one_hot_lookup):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
from_seq_len = 16
to_seq_len = 17
num_heads = 5
from_hidden_size = 11
to_hidden_size = 12
output_hidden_size = 13
total_key_size = 10
total_value_size = 15
relative_vocab_size = 9
from_seq = tf.constant(
np.random.random(size=[batch_size, from_seq_len, from_hidden_size]))
to_seq = tf.constant(
np.random.random(size=[batch_size, to_seq_len, to_hidden_size]))
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, from_seq_len, to_seq_len]))
layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
sharing_layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
query_projection=layer.query_projection,
key_projection=layer.key_projection,
value_projection=layer.value_projection,
qkv_relative_attention=layer.qkv_relative_attention,
output_projection=layer.output_projection)
different_layer = etc_layers.RelativeAttention(
hidden_size=output_hidden_size,
num_heads=num_heads,
total_key_size=total_key_size,
total_value_size=total_value_size,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result1 = layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=None)
result2 = sharing_layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=None)
result3 = different_layer(
from_seq=from_seq,
to_seq=to_seq,
att_mask=att_mask,
relative_att_ids=None)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(result1, result2)
self.assertNotAllClose(result1, result3)
def test_fused_global_local_attention_special_case_equivalence(self):
# To test for correctness, we make sure the output is equivalent to
# standard attention in the special case where `local_radius` covers the
# entire long sequence length and projection weights are shared.
# For simplicity, we don't use attention masks or relative attention ids
# in this test.
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
long_seq_len = 12
global_seq_len = 6
hidden_size = 10
num_heads = 5
local_radius = 15 # Must be >= `long_seq_len - 1` to remove sparsity.
# relative_vocab_size = 9
long_input = tf.constant(
np.random.normal(size=[batch_size, long_seq_len, hidden_size]))
global_input = tf.constant(
np.random.normal(size=[batch_size, global_seq_len, hidden_size]))
fused_att_layer = etc_layers.FusedGlobalLocalAttention(
long_hidden_size=hidden_size,
global_hidden_size=hidden_size,
num_heads=num_heads,
local_radius=local_radius,
share_qkv_projections=True,
share_att_output_projection=True)
long_output, global_output = fused_att_layer(
long_input,
global_input,
att_implementation='sparse')
# [batch_size, long_seq_len + global_seq_len, hidden_size]
fused_output = tf.concat([long_output, global_output], axis=1)
# Create concatenated input for standard attention.
# [batch_size, long_seq_len + global_seq_len, hidden_size]
concat_input = tf.concat([long_input, global_input], axis=1)
standard_att_layer = etc_layers.RelativeAttention(
hidden_size=hidden_size,
num_heads=num_heads,
query_projection=fused_att_layer.long_query_projection,
key_projection=fused_att_layer.l2l_key_projection,
value_projection=fused_att_layer.l2l_value_projection,
output_projection=fused_att_layer.long_output_projection)
expected_output = standard_att_layer(concat_input)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(expected_output, fused_output)
# Make sure 'full' att_implementation gives the same output.
long_output_full_att, global_output_full_att = fused_att_layer(
long_input,
global_input,
att_implementation='full')
self.assertAllClose(long_output, long_output_full_att)
self.assertAllClose(global_output, global_output_full_att)
@parameterized.named_parameters(
dict(testcase_name='share_nothing'),
dict(testcase_name='share_kv_projections', share_kv_projections=True),
dict(testcase_name='share_qkv_projections', share_qkv_projections=True),
dict(
testcase_name='share_qkv_projections_supersedes_kv',
share_kv_projections=True,
share_qkv_projections=True),
dict(
testcase_name='share_att_output_projection',
share_att_output_projection=True),
dict(
testcase_name='share_everything',
share_qkv_projections=True,
share_att_output_projection=True),
)
def test_fused_global_local_attention_shared_sublayers(
self,
share_kv_projections=False,
share_qkv_projections=False,
share_att_output_projection=False):
hidden_size = 10
layer = etc_layers.FusedGlobalLocalAttention(
long_hidden_size=hidden_size,
global_hidden_size=hidden_size,
num_heads=5,
local_radius=7,
relative_vocab_size=9,
share_kv_projections=share_kv_projections,
share_qkv_projections=share_qkv_projections,
share_att_output_projection=share_att_output_projection)
# Run layer to make sure all variables are built.
layer(
long_input=tf.ones([1, 1, hidden_size]),
global_input=tf.ones([1, 1, hidden_size]))
if share_qkv_projections:
self.assertIs(layer.long_query_projection, layer.global_query_projection)
self.assert_all_identical(layer.l2l_key_projection,
layer.l2g_key_projection,
layer.g2g_key_projection,
layer.g2l_key_projection)
self.assert_all_identical(layer.l2l_value_projection,
layer.l2g_value_projection,
layer.g2g_value_projection,
layer.g2l_value_projection)
elif share_kv_projections:
self.assertIsNot(layer.long_query_projection,
layer.global_query_projection)
self.assertIs(layer.l2l_key_projection, layer.l2g_key_projection)
self.assertIs(layer.g2g_key_projection, layer.g2l_key_projection)
self.assertIsNot(layer.l2l_key_projection, layer.g2g_key_projection)
self.assertIs(layer.l2l_value_projection, layer.l2g_value_projection)
self.assertIs(layer.g2g_value_projection, layer.g2l_value_projection)
self.assertIsNot(layer.l2l_value_projection, layer.g2g_value_projection)
else:
self.assertIsNot(layer.long_query_projection,
layer.global_query_projection)
self.assertIsNot(layer.l2l_key_projection, layer.l2g_key_projection)
self.assertIsNot(layer.l2l_key_projection, layer.g2g_key_projection)
self.assertIsNot(layer.l2l_value_projection, layer.l2g_value_projection)
self.assertIsNot(layer.l2l_value_projection, layer.g2g_value_projection)
self.assertIsNot(layer.long_query_projection, layer.l2l_key_projection)
self.assertIsNot(layer.long_query_projection, layer.l2l_value_projection)
self.assertIsNot(layer.l2l_key_projection, layer.l2l_value_projection)
if share_att_output_projection:
self.assertIs(layer.long_output_projection,
layer.global_output_projection)
else:
self.assertIsNot(layer.long_output_projection,
layer.global_output_projection)
def test_fused_global_local_attention_custom_total_att_size(self):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
long_seq_len = 12
global_seq_len = 6
hidden_size = 11
num_heads = 5
local_radius = 2
total_att_size = 10
relative_vocab_size = 9
long_input = tf.constant(
np.random.normal(size=[batch_size, long_seq_len, hidden_size]))
global_input = tf.constant(
np.random.normal(size=[batch_size, global_seq_len, hidden_size]))
l2l_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, long_seq_len, 2 * local_radius + 1]))
g2g_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, global_seq_len, global_seq_len]))
l2g_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, long_seq_len, global_seq_len]))
g2l_att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, global_seq_len, long_seq_len]))
l2l_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_seq_len, 2 * local_radius + 1]))
g2g_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, global_seq_len, global_seq_len]))
l2g_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_seq_len, global_seq_len]))
g2l_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, global_seq_len, long_seq_len]))
fused_att_layer = etc_layers.FusedGlobalLocalAttention(
long_hidden_size=hidden_size,
global_hidden_size=hidden_size,
num_heads=num_heads,
local_radius=local_radius,
long_total_att_size=total_att_size,
global_total_att_size=total_att_size,
relative_vocab_size=relative_vocab_size,
share_qkv_projections=True,
share_att_output_projection=True)
long_output, global_output = fused_att_layer(
long_input,
global_input,
l2l_att_mask=l2l_att_mask,
g2g_att_mask=g2g_att_mask,
l2g_att_mask=l2g_att_mask,
g2l_att_mask=g2l_att_mask,
l2l_relative_att_ids=l2l_relative_att_ids,
g2g_relative_att_ids=g2g_relative_att_ids,
l2g_relative_att_ids=l2g_relative_att_ids,
g2l_relative_att_ids=g2l_relative_att_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual([batch_size, long_seq_len, hidden_size],
long_output.shape)
self.assertAllEqual([batch_size, global_seq_len, hidden_size],
global_output.shape)
def test_attention_head_projection(self):
inputs = tf.ones([2, 3, 10])
layer = etc_layers.ProjectAttentionHeads(num_heads=4, size_per_head=5)
result = layer(inputs)
self.assertAllEqual([2, 3, 4, 5], result.shape)
inputs = tf.ones([2, 3, 4, 10])
layer = etc_layers.ProjectAttentionHeads(num_heads=5, size_per_head=6)
result = layer(inputs)
self.assertAllEqual([2, 3, 4, 5, 6], result.shape)
@parameterized.named_parameters(
('using_gather', False),
('using_one_hot', True),
)
def test_qkv_relative_attention(self, use_one_hot_lookup):
# batch_size: 2
# query_len: 3
# key_len: 4
# num_heads: 2
# key_size_per_head: 3
# value_size_per_head: 5
# relative_vocab_size: 6
# [batch_size, query_len, num_heads, key_size_per_head]
queries = tf.constant([
[
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]],
], #
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 0.0], [1.0, 1.0, 0.0]],
[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]],
]
])
# [batch_size, key_len, num_heads, key_size_per_head]
keys = tf.constant([
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[99.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],
[[0.0, 0.0, 99.0], [0.0, 0.0, -99.0]],
[[0.0, 99.0, 0.0], [0.0, -99.0, 0.0]],
], #
[
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[99.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],
[[0.0, 99.0, 0.0], [0.0, -99.0, 0.0]],
[[0.0, 0.0, 99.0], [0.0, 0.0, -99.0]],
]
])
# [batch_size, key_len, num_heads, value_size_per_head]
values = tf.constant([
[
[[0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, -0.1]],
[[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, -0.2]],
[[0.3, 0.3, 0.3, 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, -0.3]],
[[0.4, 0.4, 0.4, 0.4, 0.4], [0.4, 0.4, 0.4, 0.4, -0.4]],
], #
[
[[-0.1, 0.1, 0.1, 0.1, 0.1], [-0.1, 0.1, 0.1, 0.1, -0.1]],
[[-0.2, 0.2, 0.2, 0.2, 0.2], [-0.2, 0.2, 0.2, 0.2, -0.2]],
[[-0.3, 0.3, 0.3, 0.3, 0.3], [-0.3, 0.3, 0.3, 0.3, -0.3]],
[[-0.4, 0.4, 0.4, 0.4, 0.4], [-0.4, 0.4, 0.4, 0.4, -0.4]],
]
])
# [batch_size, query_len, key_len]
att_mask = tf.constant([
[
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
],
[
[1, 0, 1, 1],
[1, 0, 1, 1],
[1, 0, 1, 1],
],
])
# [batch_size, query_len, key_len]
relative_att_ids = tf.constant([
[
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5],
],
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 2, 2],
],
])
# [relative_vocab_size, num_heads, key_size_per_head]
relative_emb_table = [
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],
[[-99.0, 0.0, 0.0], [99.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, -99.0]],
]
layer = etc_layers.QkvRelativeAttention(
relative_vocab_size=6,
use_one_hot_lookup=use_one_hot_lookup,
initializer=tf.initializers.constant(relative_emb_table))
result = layer(
queries=queries,
keys=keys,
values=values,
att_mask=att_mask,
relative_att_ids=relative_att_ids)
self.evaluate(tf.compat.v1.global_variables_initializer())
expected = [
[
[[0.2, 0.2, 0.2, 0.2, 0.2], [0.35, 0.35, 0.35, 0.35, -0.35]],
[[0.4, 0.4, 0.4, 0.4, 0.4], [0.2, 0.2, 0.2, 0.2, -0.2]],
[[0.3, 0.3, 0.3, 0.3, 0.3], [0.15, 0.15, 0.15, 0.15, -0.15]],
], #
[
[[-0.35, 0.35, 0.35, 0.35, 0.35], [-0.1, 0.1, 0.1, 0.1, -0.1]],
[[-0.3, 0.3, 0.3, 0.3, 0.3], [-0.25, 0.25, 0.25, 0.25, -0.25]],
[[-0.1, 0.1, 0.1, 0.1, 0.1], [-0.35, 0.35, 0.35, 0.35, -0.35]],
]
]
self.assertAllEqual([2, 3, 2, 5], result.shape)
self.assertAllClose(expected, result)
@parameterized.named_parameters(
dict(testcase_name='even_blocking_with_gather', local_radius=15),
dict(testcase_name='uneven_blocking_with_gather', local_radius=16),
dict(testcase_name='degenerate_blocking_with_gather', local_radius=35),
dict(
testcase_name='even_blocking_with_one_hot',
local_radius=15,
use_one_hot_lookup=True),
dict(
testcase_name='uneven_blocking_with_one_hot',
local_radius=16,
use_one_hot_lookup=True),
dict(
testcase_name='degenerate_blocking_with_one_hot',
local_radius=35,
use_one_hot_lookup=True),
dict(
testcase_name='even_blocking_with_gather_full_att',
local_radius=15,
att_implementation='full'),
dict(
testcase_name='uneven_blocking_with_gather_full_att',
local_radius=16,
att_implementation='full'),
dict(
testcase_name='degenerate_blocking_with_gather_full_att',
local_radius=35,
att_implementation='full'),
dict(
testcase_name='even_blocking_with_one_hot_full_att',
local_radius=15,
use_one_hot_lookup=True,
att_implementation='full'),
dict(
testcase_name='uneven_blocking_with_one_hot_full_att',
local_radius=16,
use_one_hot_lookup=True,
att_implementation='full'),
dict(
testcase_name='degenerate_blocking_with_one_hot_full_att',
local_radius=35,
use_one_hot_lookup=True,
att_implementation='full'),
)
def test_qkv_relative_local_attention(self,
local_radius,
use_one_hot_lookup=False,
att_implementation='sparse'):
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 2
long_len = 64
side_len = 6
num_heads = 5
key_size_per_head = 2
value_size_per_head = 3
relative_vocab_size = 7
# Note: block_len = local_radius + 1
queries = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
keys = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
values = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, value_size_per_head]),
tf.float32)
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.9, size=[batch_size, long_len, 2 * local_radius + 1]))
relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_len, 2 * local_radius + 1]))
side_keys = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, key_size_per_head]),
tf.float32)
side_values = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, value_size_per_head]),
tf.float32)
side_att_mask = tf.constant(
np.random.binomial(n=1, p=0.9, size=[batch_size, long_len, side_len]))
side_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size, size=[batch_size, long_len, side_len]))
layer = etc_layers.QkvRelativeLocalAttention(
local_radius=local_radius,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
result1 = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=side_keys,
side_values=side_values,
side_att_mask=side_att_mask,
side_relative_att_ids=side_relative_att_ids,
att_implementation=att_implementation)
self.assertAllEqual([batch_size, long_len, num_heads, value_size_per_head],
result1.shape)
result2 = layer(
queries,
keys,
values,
att_mask=None,
relative_att_ids=None,
side_keys=side_keys,
side_values=side_values,
side_att_mask=None,
side_relative_att_ids=None,
att_implementation=att_implementation)
self.assertAllEqual([batch_size, long_len, num_heads, value_size_per_head],
result2.shape)
result3 = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=None,
side_values=None,
side_att_mask=None,
side_relative_att_ids=None,
att_implementation=att_implementation)
self.assertAllEqual([batch_size, long_len, num_heads, value_size_per_head],
result3.shape)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertNotAllClose(result1, result2)
self.assertNotAllClose(result2, result3)
self.assertNotAllClose(result1, result3)
@parameterized.named_parameters(
('even_blocking_with_gather', 15, False),
('uneven_blocking_with_gather', 16, False),
('degenerate_blocking_with_gather', 35, False),
('even_blocking_with_one_hot', 15, True),
('uneven_blocking_with_one_hot', 16, True),
('degenerate_blocking_with_one_hot', 35, True),
)
def test_qkv_relative_local_attention_full_att_implementation(
self, local_radius, use_one_hot_lookup):
# We check the validity of the `att_implementation` option
# by confirming both internal implementations return the same output.
tf.compat.v1.random.set_random_seed(1234)
np.random.seed(1234)
batch_size = 3
long_len = 64
side_len = 6
num_heads = 5
key_size_per_head = 2
value_size_per_head = 3
relative_vocab_size = 7
# Note: block_len = local_radius + 1
queries = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
keys = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, key_size_per_head]),
tf.float32)
values = tf.constant(
np.random.normal(
size=[batch_size, long_len, num_heads, value_size_per_head]),
tf.float32)
att_mask = tf.constant(
np.random.binomial(
n=1, p=0.8, size=[batch_size, long_len, 2 * local_radius + 1]),
dtype=tf.int32)
relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size,
size=[batch_size, long_len, 2 * local_radius + 1]),
dtype=tf.int32)
side_keys = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, key_size_per_head]),
tf.float32)
side_values = tf.constant(
np.random.normal(
size=[batch_size, side_len, num_heads, value_size_per_head]),
tf.float32)
side_att_mask = tf.constant(
np.random.binomial(n=1, p=0.8, size=[batch_size, long_len, side_len]),
dtype=tf.int32)
side_relative_att_ids = tf.constant(
np.random.randint(
relative_vocab_size, size=[batch_size, long_len, side_len]),
dtype=tf.int32)
layer = etc_layers.QkvRelativeLocalAttention(
local_radius=local_radius,
relative_vocab_size=relative_vocab_size,
use_one_hot_lookup=use_one_hot_lookup)
sparse_implementation_result = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=side_keys,
side_values=side_values,
side_att_mask=side_att_mask,
side_relative_att_ids=side_relative_att_ids,
att_implementation='sparse')
full_implementation_result = layer(
queries,
keys,
values,
att_mask=att_mask,
relative_att_ids=relative_att_ids,
side_keys=side_keys,
side_values=side_values,
side_att_mask=side_att_mask,
side_relative_att_ids=side_relative_att_ids,
att_implementation='full')
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllClose(sparse_implementation_result,
full_implementation_result)
class HelpersTest(tf.test.TestCase):
def test_expand_local_ids_to_blocks_with_even_blocking(self):
# batch_size = 2
# seq_len = 6
# local_radius = 1
# block_len = 2
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[10, 11, 12], #
[13, 14, 15], #
[16, 17, 18], #
], #
[
[-1, -2, -3], #
[-4, -5, -6], #
[-7, -8, -9], #
[-10, -11, -12], #
[-13, -14, -15], #
[-16, -17, -18], #
], #
])
self.assertAllEqual(
[
[
[
[0, 1, 2, 3, 0, 0], #
[0, 0, 4, 5, 6, 0], #
], #
[
[0, 7, 8, 9, 0, 0], #
[0, 0, 10, 11, 12, 0], #
], #
[
[0, 13, 14, 15, 0, 0], #
[0, 0, 16, 17, 18, 0], #
]
], #
[
[
[0, -1, -2, -3, 0, 0], #
[0, 0, -4, -5, -6, 0], #
], #
[
[0, -7, -8, -9, 0, 0], #
[0, 0, -10, -11, -12, 0], #
], #
[
[0, -13, -14, -15, 0, 0], #
[0, 0, -16, -17, -18, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[
[
[0, 0, 2, 3, 0, 0], #
[0, 0, 4, 5, 6, 0], #
], #
[
[0, 7, 8, 9, 0, 0], #
[0, 0, 10, 11, 12, 0], #
], #
[
[0, 13, 14, 15, 0, 0], #
[0, 0, 16, 17, 0, 0], #
]
], #
[
[
[0, 0, -2, -3, 0, 0], #
[0, 0, -4, -5, -6, 0], #
], #
[
[0, -7, -8, -9, 0, 0], #
[0, 0, -10, -11, -12, 0], #
], #
[
[0, -13, -14, -15, 0, 0], #
[0, 0, -16, -17, 0, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(local_ids))
def test_expand_local_ids_to_blocks_with_uneven_blocking(self):
# batch_size = 2
# seq_len = 5
# local_radius = 2
# block_len = 3
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 2, 3, 4, 5], #
[6, 7, 8, 9, 10], #
[11, 12, 13, 14, 15], #
[16, 17, 18, 19, 20], #
[21, 22, 23, 24, 25], #
], #
[
[-1, -2, -3, -4, -5], #
[-6, -7, -8, -9, -10], #
[-11, -12, -13, -14, -15], #
[-16, -17, -18, -19, -20], #
[-21, -22, -23, -24, -25], #
], #
])
self.assertAllEqual(
[
[
[
[0, 1, 2, 3, 4, 5, 0, 0, 0], #
[0, 0, 6, 7, 8, 9, 10, 0, 0], #
[0, 0, 0, 11, 12, 13, 14, 15, 0], #
], #
[
[0, 16, 17, 18, 19, 20, 0, 0, 0], #
[0, 0, 21, 22, 23, 24, 25, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
[
[
[0, -1, -2, -3, -4, -5, 0, 0, 0], #
[0, 0, -6, -7, -8, -9, -10, 0, 0], #
[0, 0, 0, -11, -12, -13, -14, -15, 0], #
], #
[
[0, -16, -17, -18, -19, -20, 0, 0, 0], #
[0, 0, -21, -22, -23, -24, -25, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[
[
[0, 0, 0, 3, 4, 5, 0, 0, 0], #
[0, 0, 0, 7, 8, 9, 10, 0, 0], #
[0, 0, 0, 11, 12, 13, 14, 15, 0], #
], #
[
[0, 16, 17, 18, 19, 0, 0, 0, 0], #
[0, 0, 21, 22, 23, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
[
[
[0, 0, 0, -3, -4, -5, 0, 0, 0], #
[0, 0, 0, -7, -8, -9, -10, 0, 0], #
[0, 0, 0, -11, -12, -13, -14, -15, 0], #
], #
[
[0, -16, -17, -18, -19, 0, 0, 0, 0], #
[0, 0, -21, -22, -23, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
]
], #
],
attention._expand_local_ids_to_blocks(local_ids))
def test_expand_local_ids_to_blocks_with_uneven_blocking_ones_mask(self):
# batch_size = 1
# seq_len = 7
# local_radius = 2
# block_len = 3
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
[1, 1, 1, 1, 1], #
], #
])
self.assertAllEqual(
[
[
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 1, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 1, 0], #
], #
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 1, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 1, 0], #
], #
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
], #
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[
[
[0, 0, 0, 1, 1, 1, 0, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 1, 0], #
], #
[
[0, 1, 1, 1, 1, 1, 0, 0, 0], #
[0, 0, 1, 1, 1, 1, 1, 0, 0], #
[0, 0, 0, 1, 1, 1, 1, 0, 0], #
], #
[
[0, 1, 1, 1, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
], #
], #
],
attention._expand_local_ids_to_blocks(local_ids))
def test_expand_local_ids_to_blocks_with_degenerate_blocking(self):
# batch_size = 2
# seq_len = 2
# local_radius = 2
# block_len = 3
# [batch_size, seq_len, 2*local_radius + 1]
local_ids = tf.constant([
[
[1, 2, 3, 4, 5], #
[6, 7, 8, 9, 10], #
], #
[
[-1, -2, -3, -4, -5], #
[-6, -7, -8, -9, -10], #
], #
])
self.assertAllEqual(
[
[ #
[
[0, 1, 2, 3, 4, 5, 0, 0, 0], #
[0, 0, 6, 7, 8, 9, 10, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
[ #
[
[0, -1, -2, -3, -4, -5, 0, 0, 0], #
[0, 0, -6, -7, -8, -9, -10, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
],
attention._expand_local_ids_to_blocks(
local_ids, mask_padding_ids=False))
self.assertAllEqual(
[
[ #
[
[0, 0, 0, 3, 4, 0, 0, 0, 0], #
[0, 0, 0, 7, 8, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
[ #
[
[0, 0, 0, -3, -4, 0, 0, 0, 0], #
[0, 0, 0, -7, -8, 0, 0, 0, 0], #
[0, 0, 0, 0, 0, 0, 0, 0, 0], #
] #
], #
],
attention._expand_local_ids_to_blocks(local_ids))
if __name__ == '__main__':
tf.test.main()
|
[
"numpy.random.seed",
"etcmodel.layers.RelativeAttention",
"tensorflow.initializers.constant",
"numpy.random.randint",
"numpy.random.normal",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.test.main",
"tensorflow.random.uniform",
"tensorflow.concat",
"etcmodel.layers.FusedGlobalLocalAttention",
"etcmodel.layers.ProjectAttentionHeads",
"tensorflow.ones",
"numpy.random.binomial",
"tensorflow.random.normal",
"tensorflow.constant",
"etcmodel.layers.QkvRelativeLocalAttention",
"etcmodel.layers.attention._expand_local_ids_to_blocks",
"absl.testing.parameterized.named_parameters",
"tensorflow.compat.v1.random.set_random_seed",
"numpy.random.random"
] |
[((1137, 1222), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('using_gather', False)", "('using_one_hot', True)"], {}), "(('using_gather', False), ('using_one_hot', True)\n )\n", (1167, 1222), False, 'from absl.testing import parameterized\n'), ((2629, 2714), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('using_gather', False)", "('using_one_hot', True)"], {}), "(('using_gather', False), ('using_one_hot', True)\n )\n", (2659, 2714), False, 'from absl.testing import parameterized\n'), ((4155, 4240), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('using_gather', False)", "('using_one_hot', True)"], {}), "(('using_gather', False), ('using_one_hot', True)\n )\n", (4185, 4240), False, 'from absl.testing import parameterized\n'), ((15819, 15904), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('using_gather', False)", "('using_one_hot', True)"], {}), "(('using_gather', False), ('using_one_hot', True)\n )\n", (15849, 15904), False, 'from absl.testing import parameterized\n'), ((24814, 25137), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('even_blocking_with_gather', 15, False)", "('uneven_blocking_with_gather', 16, False)", "('degenerate_blocking_with_gather', 35, False)", "('even_blocking_with_one_hot', 15, True)", "('uneven_blocking_with_one_hot', 16, True)", "('degenerate_blocking_with_one_hot', 35, True)"], {}), "(('even_blocking_with_gather', 15, False), (\n 'uneven_blocking_with_gather', 16, False), (\n 'degenerate_blocking_with_gather', 35, False), (\n 'even_blocking_with_one_hot', 15, True), (\n 'uneven_blocking_with_one_hot', 16, True), (\n 'degenerate_blocking_with_one_hot', 35, True))\n", (24844, 25137), False, 'from absl.testing import parameterized\n'), ((37007, 37021), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (37019, 37021), True, 'import tensorflow as tf\n'), ((1296, 1337), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (1331, 1337), True, 'import tensorflow as tf\n'), ((1342, 1362), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1356, 1362), True, 'import numpy as np\n'), ((1616, 1678), 'tensorflow.random.normal', 'tf.random.normal', (['[batch_size, from_seq_len, from_hidden_size]'], {}), '([batch_size, from_seq_len, from_hidden_size])\n', (1632, 1678), True, 'import tensorflow as tf\n'), ((1692, 1750), 'tensorflow.random.normal', 'tf.random.normal', (['[batch_size, to_seq_len, to_hidden_size]'], {}), '([batch_size, to_seq_len, to_hidden_size])\n', (1708, 1750), True, 'import tensorflow as tf\n'), ((1900, 2006), 'tensorflow.random.uniform', 'tf.random.uniform', (['[batch_size, from_seq_len, to_seq_len]'], {'maxval': 'relative_vocab_size', 'dtype': 'tf.int32'}), '([batch_size, from_seq_len, to_seq_len], maxval=\n relative_vocab_size, dtype=tf.int32)\n', (1917, 2006), True, 'import tensorflow as tf\n'), ((2097, 2338), 'etcmodel.layers.RelativeAttention', 'etc_layers.RelativeAttention', ([], {'hidden_size': 'output_hidden_size', 'num_heads': 'num_heads', 'total_key_size': 'total_key_size', 'total_value_size': 'total_value_size', 'relative_vocab_size': 'relative_vocab_size', 'use_one_hot_lookup': 'use_one_hot_lookup'}), '(hidden_size=output_hidden_size, num_heads=\n num_heads, total_key_size=total_key_size, total_value_size=\n total_value_size, relative_vocab_size=relative_vocab_size,\n use_one_hot_lookup=use_one_hot_lookup)\n', (2125, 2338), True, 'from etcmodel import layers as etc_layers\n'), ((2803, 2844), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (2838, 2844), True, 'import tensorflow as tf\n'), ((2849, 2869), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (2863, 2869), True, 'import numpy as np\n'), ((3431, 3672), 'etcmodel.layers.RelativeAttention', 'etc_layers.RelativeAttention', ([], {'hidden_size': 'output_hidden_size', 'num_heads': 'num_heads', 'total_key_size': 'total_key_size', 'total_value_size': 'total_value_size', 'relative_vocab_size': 'relative_vocab_size', 'use_one_hot_lookup': 'use_one_hot_lookup'}), '(hidden_size=output_hidden_size, num_heads=\n num_heads, total_key_size=total_key_size, total_value_size=\n total_value_size, relative_vocab_size=relative_vocab_size,\n use_one_hot_lookup=use_one_hot_lookup)\n', (3459, 3672), True, 'from etcmodel import layers as etc_layers\n'), ((4331, 4372), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (4366, 4372), True, 'import tensorflow as tf\n'), ((4377, 4397), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (4391, 4397), True, 'import numpy as np\n'), ((4978, 5219), 'etcmodel.layers.RelativeAttention', 'etc_layers.RelativeAttention', ([], {'hidden_size': 'output_hidden_size', 'num_heads': 'num_heads', 'total_key_size': 'total_key_size', 'total_value_size': 'total_value_size', 'relative_vocab_size': 'relative_vocab_size', 'use_one_hot_lookup': 'use_one_hot_lookup'}), '(hidden_size=output_hidden_size, num_heads=\n num_heads, total_key_size=total_key_size, total_value_size=\n total_value_size, relative_vocab_size=relative_vocab_size,\n use_one_hot_lookup=use_one_hot_lookup)\n', (5006, 5219), True, 'from etcmodel import layers as etc_layers\n'), ((5276, 5661), 'etcmodel.layers.RelativeAttention', 'etc_layers.RelativeAttention', ([], {'hidden_size': 'output_hidden_size', 'num_heads': 'num_heads', 'total_key_size': 'total_key_size', 'total_value_size': 'total_value_size', 'query_projection': 'layer.query_projection', 'key_projection': 'layer.key_projection', 'value_projection': 'layer.value_projection', 'qkv_relative_attention': 'layer.qkv_relative_attention', 'output_projection': 'layer.output_projection'}), '(hidden_size=output_hidden_size, num_heads=\n num_heads, total_key_size=total_key_size, total_value_size=\n total_value_size, query_projection=layer.query_projection,\n key_projection=layer.key_projection, value_projection=layer.\n value_projection, qkv_relative_attention=layer.qkv_relative_attention,\n output_projection=layer.output_projection)\n', (5304, 5661), True, 'from etcmodel import layers as etc_layers\n'), ((5735, 5976), 'etcmodel.layers.RelativeAttention', 'etc_layers.RelativeAttention', ([], {'hidden_size': 'output_hidden_size', 'num_heads': 'num_heads', 'total_key_size': 'total_key_size', 'total_value_size': 'total_value_size', 'relative_vocab_size': 'relative_vocab_size', 'use_one_hot_lookup': 'use_one_hot_lookup'}), '(hidden_size=output_hidden_size, num_heads=\n num_heads, total_key_size=total_key_size, total_value_size=\n total_value_size, relative_vocab_size=relative_vocab_size,\n use_one_hot_lookup=use_one_hot_lookup)\n', (5763, 5976), True, 'from etcmodel import layers as etc_layers\n'), ((6964, 7005), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (6999, 7005), True, 'import tensorflow as tf\n'), ((7010, 7030), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (7024, 7030), True, 'import numpy as np\n'), ((7472, 7689), 'etcmodel.layers.FusedGlobalLocalAttention', 'etc_layers.FusedGlobalLocalAttention', ([], {'long_hidden_size': 'hidden_size', 'global_hidden_size': 'hidden_size', 'num_heads': 'num_heads', 'local_radius': 'local_radius', 'share_qkv_projections': '(True)', 'share_att_output_projection': '(True)'}), '(long_hidden_size=hidden_size,\n global_hidden_size=hidden_size, num_heads=num_heads, local_radius=\n local_radius, share_qkv_projections=True, share_att_output_projection=True)\n', (7508, 7689), True, 'from etcmodel import layers as etc_layers\n'), ((7943, 7990), 'tensorflow.concat', 'tf.concat', (['[long_output, global_output]'], {'axis': '(1)'}), '([long_output, global_output], axis=1)\n', (7952, 7990), True, 'import tensorflow as tf\n'), ((8131, 8176), 'tensorflow.concat', 'tf.concat', (['[long_input, global_input]'], {'axis': '(1)'}), '([long_input, global_input], axis=1)\n', (8140, 8176), True, 'import tensorflow as tf\n'), ((8203, 8516), 'etcmodel.layers.RelativeAttention', 'etc_layers.RelativeAttention', ([], {'hidden_size': 'hidden_size', 'num_heads': 'num_heads', 'query_projection': 'fused_att_layer.long_query_projection', 'key_projection': 'fused_att_layer.l2l_key_projection', 'value_projection': 'fused_att_layer.l2l_value_projection', 'output_projection': 'fused_att_layer.long_output_projection'}), '(hidden_size=hidden_size, num_heads=num_heads,\n query_projection=fused_att_layer.long_query_projection, key_projection=\n fused_att_layer.l2l_key_projection, value_projection=fused_att_layer.\n l2l_value_projection, output_projection=fused_att_layer.\n long_output_projection)\n', (8231, 8516), True, 'from etcmodel import layers as etc_layers\n'), ((9913, 10224), 'etcmodel.layers.FusedGlobalLocalAttention', 'etc_layers.FusedGlobalLocalAttention', ([], {'long_hidden_size': 'hidden_size', 'global_hidden_size': 'hidden_size', 'num_heads': '(5)', 'local_radius': '(7)', 'relative_vocab_size': '(9)', 'share_kv_projections': 'share_kv_projections', 'share_qkv_projections': 'share_qkv_projections', 'share_att_output_projection': 'share_att_output_projection'}), '(long_hidden_size=hidden_size,\n global_hidden_size=hidden_size, num_heads=5, local_radius=7,\n relative_vocab_size=9, share_kv_projections=share_kv_projections,\n share_qkv_projections=share_qkv_projections,\n share_att_output_projection=share_att_output_projection)\n', (9949, 10224), True, 'from etcmodel import layers as etc_layers\n'), ((12594, 12635), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (12629, 12635), True, 'import tensorflow as tf\n'), ((12640, 12660), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (12654, 12660), True, 'import numpy as np\n'), ((14270, 14611), 'etcmodel.layers.FusedGlobalLocalAttention', 'etc_layers.FusedGlobalLocalAttention', ([], {'long_hidden_size': 'hidden_size', 'global_hidden_size': 'hidden_size', 'num_heads': 'num_heads', 'local_radius': 'local_radius', 'long_total_att_size': 'total_att_size', 'global_total_att_size': 'total_att_size', 'relative_vocab_size': 'relative_vocab_size', 'share_qkv_projections': '(True)', 'share_att_output_projection': '(True)'}), '(long_hidden_size=hidden_size,\n global_hidden_size=hidden_size, num_heads=num_heads, local_radius=\n local_radius, long_total_att_size=total_att_size, global_total_att_size\n =total_att_size, relative_vocab_size=relative_vocab_size,\n share_qkv_projections=True, share_att_output_projection=True)\n', (14306, 14611), True, 'from etcmodel import layers as etc_layers\n'), ((15447, 15466), 'tensorflow.ones', 'tf.ones', (['[2, 3, 10]'], {}), '([2, 3, 10])\n', (15454, 15466), True, 'import tensorflow as tf\n'), ((15479, 15541), 'etcmodel.layers.ProjectAttentionHeads', 'etc_layers.ProjectAttentionHeads', ([], {'num_heads': '(4)', 'size_per_head': '(5)'}), '(num_heads=4, size_per_head=5)\n', (15511, 15541), True, 'from etcmodel import layers as etc_layers\n'), ((15635, 15657), 'tensorflow.ones', 'tf.ones', (['[2, 3, 4, 10]'], {}), '([2, 3, 4, 10])\n', (15642, 15657), True, 'import tensorflow as tf\n'), ((15670, 15732), 'etcmodel.layers.ProjectAttentionHeads', 'etc_layers.ProjectAttentionHeads', ([], {'num_heads': '(5)', 'size_per_head': '(6)'}), '(num_heads=5, size_per_head=6)\n', (15702, 15732), True, 'from etcmodel import layers as etc_layers\n'), ((16213, 16460), 'tensorflow.constant', 'tf.constant', (['[[[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], [\n [0.0, 0.0, 1.0], [0.0, 0.0, 1.0]]], [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[1.0, 1.0, 0.0], [1.0, 1.0, 0.0]], [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]]]'], {}), '([[[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0], [0.0, \n 1.0, 0.0]], [[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]]], [[[1.0, 1.0, 1.0], [\n 1.0, 1.0, 1.0]], [[1.0, 1.0, 0.0], [1.0, 1.0, 0.0]], [[1.0, 0.0, 0.0],\n [1.0, 0.0, 0.0]]]])\n', (16224, 16460), True, 'import tensorflow as tf\n'), ((16636, 16977), 'tensorflow.constant', 'tf.constant', (['[[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[99.0, 0.0, 0.0], [-99.0, 0.0, 0.0]],\n [[0.0, 0.0, 99.0], [0.0, 0.0, -99.0]], [[0.0, 99.0, 0.0], [0.0, -99.0, \n 0.0]]], [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[99.0, 0.0, 0.0], [-99.0,\n 0.0, 0.0]], [[0.0, 99.0, 0.0], [0.0, -99.0, 0.0]], [[0.0, 0.0, 99.0], [\n 0.0, 0.0, -99.0]]]]'], {}), '([[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[99.0, 0.0, 0.0], [-99.0,\n 0.0, 0.0]], [[0.0, 0.0, 99.0], [0.0, 0.0, -99.0]], [[0.0, 99.0, 0.0], [\n 0.0, -99.0, 0.0]]], [[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], [[99.0, 0.0, \n 0.0], [-99.0, 0.0, 0.0]], [[0.0, 99.0, 0.0], [0.0, -99.0, 0.0]], [[0.0,\n 0.0, 99.0], [0.0, 0.0, -99.0]]]])\n', (16647, 16977), True, 'import tensorflow as tf\n'), ((17177, 17687), 'tensorflow.constant', 'tf.constant', (['[[[[0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, -0.1]], [[0.2, 0.2, 0.2,\n 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, -0.2]], [[0.3, 0.3, 0.3, 0.3, 0.3], [\n 0.3, 0.3, 0.3, 0.3, -0.3]], [[0.4, 0.4, 0.4, 0.4, 0.4], [0.4, 0.4, 0.4,\n 0.4, -0.4]]], [[[-0.1, 0.1, 0.1, 0.1, 0.1], [-0.1, 0.1, 0.1, 0.1, -0.1]\n ], [[-0.2, 0.2, 0.2, 0.2, 0.2], [-0.2, 0.2, 0.2, 0.2, -0.2]], [[-0.3, \n 0.3, 0.3, 0.3, 0.3], [-0.3, 0.3, 0.3, 0.3, -0.3]], [[-0.4, 0.4, 0.4, \n 0.4, 0.4], [-0.4, 0.4, 0.4, 0.4, -0.4]]]]'], {}), '([[[[0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, -0.1]], [[\n 0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, -0.2]], [[0.3, 0.3, 0.3,\n 0.3, 0.3], [0.3, 0.3, 0.3, 0.3, -0.3]], [[0.4, 0.4, 0.4, 0.4, 0.4], [\n 0.4, 0.4, 0.4, 0.4, -0.4]]], [[[-0.1, 0.1, 0.1, 0.1, 0.1], [-0.1, 0.1, \n 0.1, 0.1, -0.1]], [[-0.2, 0.2, 0.2, 0.2, 0.2], [-0.2, 0.2, 0.2, 0.2, -\n 0.2]], [[-0.3, 0.3, 0.3, 0.3, 0.3], [-0.3, 0.3, 0.3, 0.3, -0.3]], [[-\n 0.4, 0.4, 0.4, 0.4, 0.4], [-0.4, 0.4, 0.4, 0.4, -0.4]]]])\n', (17188, 17687), True, 'import tensorflow as tf\n'), ((17857, 17962), 'tensorflow.constant', 'tf.constant', (['[[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 0, 1, 1], [1, 0, 1, 1], [\n 1, 0, 1, 1]]]'], {}), '([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 0, 1, 1], [1,\n 0, 1, 1], [1, 0, 1, 1]]])\n', (17868, 17962), True, 'import tensorflow as tf\n'), ((18139, 18244), 'tensorflow.constant', 'tf.constant', (['[[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 5]], [[0, 0, 0, 0], [0, 0, 0, 0], [\n 0, 0, 2, 2]]]'], {}), '([[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 5]], [[0, 0, 0, 0], [0,\n 0, 0, 0], [0, 0, 2, 2]]])\n', (18150, 18244), True, 'import tensorflow as tf\n'), ((21597, 21638), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (21632, 21638), True, 'import tensorflow as tf\n'), ((21643, 21663), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (21657, 21663), True, 'import numpy as np\n'), ((23151, 23303), 'etcmodel.layers.QkvRelativeLocalAttention', 'etc_layers.QkvRelativeLocalAttention', ([], {'local_radius': 'local_radius', 'relative_vocab_size': 'relative_vocab_size', 'use_one_hot_lookup': 'use_one_hot_lookup'}), '(local_radius=local_radius,\n relative_vocab_size=relative_vocab_size, use_one_hot_lookup=\n use_one_hot_lookup)\n', (23187, 23303), True, 'from etcmodel import layers as etc_layers\n'), ((25408, 25449), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['(1234)'], {}), '(1234)\n', (25443, 25449), True, 'import tensorflow as tf\n'), ((25454, 25474), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (25468, 25474), True, 'import numpy as np\n'), ((27057, 27209), 'etcmodel.layers.QkvRelativeLocalAttention', 'etc_layers.QkvRelativeLocalAttention', ([], {'local_radius': 'local_radius', 'relative_vocab_size': 'relative_vocab_size', 'use_one_hot_lookup': 'use_one_hot_lookup'}), '(local_radius=local_radius,\n relative_vocab_size=relative_vocab_size, use_one_hot_lookup=\n use_one_hot_lookup)\n', (27093, 27209), True, 'from etcmodel import layers as etc_layers\n'), ((28341, 28535), 'tensorflow.constant', 'tf.constant', (['[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15], [16, 17, 18]\n ], [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9], [-10, -11, -12], [-13, -\n 14, -15], [-16, -17, -18]]]'], {}), '([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15],\n [16, 17, 18]], [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9], [-10, -11, -\n 12], [-13, -14, -15], [-16, -17, -18]]])\n', (28352, 28535), True, 'import tensorflow as tf\n'), ((30911, 31168), 'tensorflow.constant', 'tf.constant', (['[[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19,\n 20], [21, 22, 23, 24, 25]], [[-1, -2, -3, -4, -5], [-6, -7, -8, -9, -10\n ], [-11, -12, -13, -14, -15], [-16, -17, -18, -19, -20], [-21, -22, -23,\n -24, -25]]]'], {}), '([[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16,\n 17, 18, 19, 20], [21, 22, 23, 24, 25]], [[-1, -2, -3, -4, -5], [-6, -7,\n -8, -9, -10], [-11, -12, -13, -14, -15], [-16, -17, -18, -19, -20], [-\n 21, -22, -23, -24, -25]]])\n', (30922, 31168), True, 'import tensorflow as tf\n'), ((33610, 33749), 'tensorflow.constant', 'tf.constant', (['[[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1,\n 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]]'], {}), '([[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, \n 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]])\n', (33621, 33749), True, 'import tensorflow as tf\n'), ((35584, 35686), 'tensorflow.constant', 'tf.constant', (['[[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], [[-1, -2, -3, -4, -5], [-6, -7, -8, -\n 9, -10]]]'], {}), '([[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], [[-1, -2, -3, -4, -5], [-\n 6, -7, -8, -9, -10]]])\n', (35595, 35686), True, 'import tensorflow as tf\n'), ((1787, 1862), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, from_seq_len, to_seq_len]'}), '(n=1, p=0.9, size=[batch_size, from_seq_len, to_seq_len])\n', (1805, 1862), True, 'import numpy as np\n'), ((3094, 3157), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, seq_len, input_hidden_size]'}), '(size=[batch_size, seq_len, input_hidden_size])\n', (3110, 3157), True, 'import numpy as np\n'), ((3215, 3282), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, seq_len, seq_len]'}), '(n=1, p=0.9, size=[batch_size, seq_len, seq_len])\n', (3233, 3282), True, 'import numpy as np\n'), ((3328, 3403), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, seq_len, seq_len]'}), '(relative_vocab_size, size=[batch_size, seq_len, seq_len])\n', (3345, 3403), True, 'import numpy as np\n'), ((4064, 4107), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (4105, 4107), True, 'import tensorflow as tf\n'), ((4671, 4738), 'numpy.random.random', 'np.random.random', ([], {'size': '[batch_size, from_seq_len, from_hidden_size]'}), '(size=[batch_size, from_seq_len, from_hidden_size])\n', (4687, 4738), True, 'import numpy as np\n'), ((4774, 4837), 'numpy.random.random', 'np.random.random', ([], {'size': '[batch_size, to_seq_len, to_hidden_size]'}), '(size=[batch_size, to_seq_len, to_hidden_size])\n', (4790, 4837), True, 'import numpy as np\n'), ((4875, 4950), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, from_seq_len, to_seq_len]'}), '(n=1, p=0.9, size=[batch_size, from_seq_len, to_seq_len])\n', (4893, 4950), True, 'import numpy as np\n'), ((6439, 6482), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (6480, 6482), True, 'import tensorflow as tf\n'), ((7279, 7341), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_seq_len, hidden_size]'}), '(size=[batch_size, long_seq_len, hidden_size])\n', (7295, 7341), True, 'import numpy as np\n'), ((7383, 7447), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, global_seq_len, hidden_size]'}), '(size=[batch_size, global_seq_len, hidden_size])\n', (7399, 7447), True, 'import numpy as np\n'), ((8622, 8665), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (8663, 8665), True, 'import tensorflow as tf\n'), ((12877, 12939), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_seq_len, hidden_size]'}), '(size=[batch_size, long_seq_len, hidden_size])\n', (12893, 12939), True, 'import numpy as np\n'), ((12981, 13045), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, global_seq_len, hidden_size]'}), '(size=[batch_size, global_seq_len, hidden_size])\n', (12997, 13045), True, 'import numpy as np\n'), ((13087, 13176), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, long_seq_len, 2 * local_radius + 1]'}), '(n=1, p=0.9, size=[batch_size, long_seq_len, 2 *\n local_radius + 1])\n', (13105, 13176), True, 'import numpy as np\n'), ((13227, 13312), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, global_seq_len, global_seq_len]'}), '(n=1, p=0.9, size=[batch_size, global_seq_len,\n global_seq_len])\n', (13245, 13312), True, 'import numpy as np\n'), ((13363, 13442), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, long_seq_len, global_seq_len]'}), '(n=1, p=0.9, size=[batch_size, long_seq_len, global_seq_len])\n', (13381, 13442), True, 'import numpy as np\n'), ((13497, 13576), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, global_seq_len, long_seq_len]'}), '(n=1, p=0.9, size=[batch_size, global_seq_len, long_seq_len])\n', (13515, 13576), True, 'import numpy as np\n'), ((13639, 13736), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, long_seq_len, 2 * local_radius + 1]'}), '(relative_vocab_size, size=[batch_size, long_seq_len, 2 *\n local_radius + 1])\n', (13656, 13736), True, 'import numpy as np\n'), ((13807, 13900), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, global_seq_len, global_seq_len]'}), '(relative_vocab_size, size=[batch_size, global_seq_len,\n global_seq_len])\n', (13824, 13900), True, 'import numpy as np\n'), ((13971, 14062), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, long_seq_len, global_seq_len]'}), '(relative_vocab_size, size=[batch_size, long_seq_len,\n global_seq_len])\n', (13988, 14062), True, 'import numpy as np\n'), ((14133, 14224), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, global_seq_len, long_seq_len]'}), '(relative_vocab_size, size=[batch_size, global_seq_len,\n long_seq_len])\n', (14150, 14224), True, 'import numpy as np\n'), ((15123, 15166), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (15164, 15166), True, 'import tensorflow as tf\n'), ((19088, 19131), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (19129, 19131), True, 'import tensorflow as tf\n'), ((21896, 21971), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_len, num_heads, key_size_per_head]'}), '(size=[batch_size, long_len, num_heads, key_size_per_head])\n', (21912, 21971), True, 'import numpy as np\n'), ((22038, 22113), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_len, num_heads, key_size_per_head]'}), '(size=[batch_size, long_len, num_heads, key_size_per_head])\n', (22054, 22113), True, 'import numpy as np\n'), ((22182, 22259), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_len, num_heads, value_size_per_head]'}), '(size=[batch_size, long_len, num_heads, value_size_per_head])\n', (22198, 22259), True, 'import numpy as np\n'), ((22330, 22415), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, long_len, 2 * local_radius + 1]'}), '(n=1, p=0.9, size=[batch_size, long_len, 2 * local_radius +\n 1])\n', (22348, 22415), True, 'import numpy as np\n'), ((22470, 22563), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, long_len, 2 * local_radius + 1]'}), '(relative_vocab_size, size=[batch_size, long_len, 2 *\n local_radius + 1])\n', (22487, 22563), True, 'import numpy as np\n'), ((22624, 22699), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, side_len, num_heads, key_size_per_head]'}), '(size=[batch_size, side_len, num_heads, key_size_per_head])\n', (22640, 22699), True, 'import numpy as np\n'), ((22773, 22850), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, side_len, num_heads, value_size_per_head]'}), '(size=[batch_size, side_len, num_heads, value_size_per_head])\n', (22789, 22850), True, 'import numpy as np\n'), ((22926, 22995), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.9)', 'size': '[batch_size, long_len, side_len]'}), '(n=1, p=0.9, size=[batch_size, long_len, side_len])\n', (22944, 22995), True, 'import numpy as np\n'), ((23046, 23123), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, long_len, side_len]'}), '(relative_vocab_size, size=[batch_size, long_len, side_len])\n', (23063, 23123), True, 'import numpy as np\n'), ((24630, 24673), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (24671, 24673), True, 'import tensorflow as tf\n'), ((25707, 25782), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_len, num_heads, key_size_per_head]'}), '(size=[batch_size, long_len, num_heads, key_size_per_head])\n', (25723, 25782), True, 'import numpy as np\n'), ((25849, 25924), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_len, num_heads, key_size_per_head]'}), '(size=[batch_size, long_len, num_heads, key_size_per_head])\n', (25865, 25924), True, 'import numpy as np\n'), ((25993, 26070), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, long_len, num_heads, value_size_per_head]'}), '(size=[batch_size, long_len, num_heads, value_size_per_head])\n', (26009, 26070), True, 'import numpy as np\n'), ((26141, 26226), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.8)', 'size': '[batch_size, long_len, 2 * local_radius + 1]'}), '(n=1, p=0.8, size=[batch_size, long_len, 2 * local_radius +\n 1])\n', (26159, 26226), True, 'import numpy as np\n'), ((26305, 26398), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, long_len, 2 * local_radius + 1]'}), '(relative_vocab_size, size=[batch_size, long_len, 2 *\n local_radius + 1])\n', (26322, 26398), True, 'import numpy as np\n'), ((26482, 26557), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, side_len, num_heads, key_size_per_head]'}), '(size=[batch_size, side_len, num_heads, key_size_per_head])\n', (26498, 26557), True, 'import numpy as np\n'), ((26631, 26708), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[batch_size, side_len, num_heads, value_size_per_head]'}), '(size=[batch_size, side_len, num_heads, value_size_per_head])\n', (26647, 26708), True, 'import numpy as np\n'), ((26784, 26853), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.8)', 'size': '[batch_size, long_len, side_len]'}), '(n=1, p=0.8, size=[batch_size, long_len, side_len])\n', (26802, 26853), True, 'import numpy as np\n'), ((26928, 27005), 'numpy.random.randint', 'np.random.randint', (['relative_vocab_size'], {'size': '[batch_size, long_len, side_len]'}), '(relative_vocab_size, size=[batch_size, long_len, side_len])\n', (26945, 27005), True, 'import numpy as np\n'), ((27939, 27982), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (27980, 27982), True, 'import tensorflow as tf\n'), ((29661, 29733), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {'mask_padding_ids': '(False)'}), '(local_ids, mask_padding_ids=False)\n', (29698, 29733), False, 'from etcmodel.layers import attention\n'), ((30647, 30695), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {}), '(local_ids)\n', (30684, 30695), False, 'from etcmodel.layers import attention\n'), ((32309, 32381), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {'mask_padding_ids': '(False)'}), '(local_ids, mask_padding_ids=False)\n', (32346, 32381), False, 'from etcmodel.layers import attention\n'), ((33336, 33384), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {}), '(local_ids)\n', (33373, 33384), False, 'from etcmodel.layers import attention\n'), ((34554, 34626), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {'mask_padding_ids': '(False)'}), '(local_ids, mask_padding_ids=False)\n', (34591, 34626), False, 'from etcmodel.layers import attention\n'), ((35316, 35364), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {}), '(local_ids)\n', (35353, 35364), False, 'from etcmodel.layers import attention\n'), ((36320, 36392), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {'mask_padding_ids': '(False)'}), '(local_ids, mask_padding_ids=False)\n', (36357, 36392), False, 'from etcmodel.layers import attention\n'), ((36926, 36974), 'etcmodel.layers.attention._expand_local_ids_to_blocks', 'attention._expand_local_ids_to_blocks', (['local_ids'], {}), '(local_ids)\n', (36963, 36974), False, 'from etcmodel.layers import attention\n'), ((10358, 10386), 'tensorflow.ones', 'tf.ones', (['[1, 1, hidden_size]'], {}), '([1, 1, hidden_size])\n', (10365, 10386), True, 'import tensorflow as tf\n'), ((10409, 10437), 'tensorflow.ones', 'tf.ones', (['[1, 1, hidden_size]'], {}), '([1, 1, hidden_size])\n', (10416, 10437), True, 'import tensorflow as tf\n'), ((18865, 18909), 'tensorflow.initializers.constant', 'tf.initializers.constant', (['relative_emb_table'], {}), '(relative_emb_table)\n', (18889, 18909), True, 'import tensorflow as tf\n')]
|
"""Manage grid logic"""
import math
from random import random
import numpy as np
from PIL import Image, ImageDraw, ImageFilter
# defaults
HNPOLY = 64 # image height will contain HNPOLY polygons
COEF = 0.560451 # [0, 1) move verteces coef dist as ratio to next vert
NCOLORS = 128 # reduce pallete to NCOLORS
class Grid():
def __init__(self, image,
hnpoly=HNPOLY, coef=COEF, ncolors=NCOLORS):
# config
self.__nhpoly = hnpoly
self.__coef = coef
self.__ncolors = ncolors
# units in cells
self.w = 0
self.h = 0
# in pixels
self.__poly_side = 0
self.image = None
self.mosaic = None
self.__assing_image(image)
self.__cells = np.zeros((self.h, self.w),
dtype=[('rgb', int, 3), ('noise', float), ('sharpness', float)]
)
self.__dots = np.zeros((self.h+1, self.w+1), dtype=[('x', float), ('y', float)])
self.__place_dots()
self.__parse_image()
self.__shuffle_dots()
self.__render_mosaic()
def width(self):
return self.w
def height(self):
return self.h
def count(self):
return self.w * self.h
def show_mosaic(self):
self.mosaic.show()
def show_origin(self):
self.image.show()
def __assing_image(self, image):
im = Image.open(image)
if im is None:
raise ValueError
# TODO rectangular images
wpx, hpx = im.size
if wpx > hpx:
im = im.crop((0, 0, hpx, hpx))
elif hpx > wpx:
im = im.crop((0, 0, wpx, wpx))
h = self.__nhpoly
side = hpx / self.__nhpoly
hpx = side * h
w = h
wpx = side * w
# TODO work with pre-defined palette
im = im.convert("P", palette=Image.ADAPTIVE, colors=self.__ncolors)
im = im.convert("RGB", palette=Image.ADAPTIVE, colors=self.__ncolors)
self.image = im
self.h = h
self.w = w
self.__poly_side = side
def __shuffle_dots(self):
R = self.__poly_side * self.__coef
# Body
for i in range(1, self.h):
for j in range(1, self.w):
dot = self.__dots[i, j]
t = 2 * math.pi * random()
r = R * random()
dot['y'] += r * math.sin(t)
dot['x'] += r * math.cos(t)
# Perimeter
for j in range(1, self.w):
self.__dots[0, j]['x'] += R * random()
for j in range(1, self.w):
self.__dots[self.h, j]['x'] += R * random()
for i in range(1, self.h):
self.__dots[i, 0]['y'] += R * random()
for i in range(1, self.h):
self.__dots[i, self.w]['y'] += R * random()
def __render_mosaic(self):
img = Image.new('RGB', self.image.size)
self.mosaic = img
draw = ImageDraw.Draw(img)
for y in range(self.h):
for x in range(self.w):
cell = self.__cells[y, x]
dots = self.__dots
xy = [(d['x'], d['y']) for d in [
dots[y, x], # up-left
dots[y, x+1], # up-right
dots[y+1, x+1], # down-right
dots[y+1, x] # down-left
]]
col = (*cell['rgb'],)
draw.polygon(xy, fill=col, outline=col)
img.filter(ImageFilter.SMOOTH_MORE)
def __parse_cell(self, y, x):
cell = self.__cells[y, x]
lu = self.__dots[y, x]
rd = self.__dots[y+1, x+1]
crop = self.image.crop((lu['x'], lu['y'], rd['x'], rd['y']))
cols = getcolors(crop)
cell['rgb'] = cols[0]
# TODO
cell['noise'] = 0
cell['sharpness'] = 0
def __place_dots(self):
h, w = self.__dots.shape
side = self.__poly_side
for i in range(h):
for j in range(w):
d = self.__dots[i, j]
d['y'] = int(i * side)
d['x'] = int(j * side)
def __parse_image(self):
for y in range(self.h):
for x in range(self.w):
self.__parse_cell(y, x)
def getcolors(img, n=2, ncolors=256):
cols = img.getcolors(ncolors)
cols = sorted(cols, key=lambda x: x[0], reverse=True)[:n]
return [e[1] for e in cols]
|
[
"PIL.Image.new",
"numpy.zeros",
"math.sin",
"PIL.Image.open",
"random.random",
"math.cos",
"PIL.ImageDraw.Draw"
] |
[((766, 862), 'numpy.zeros', 'np.zeros', (['(self.h, self.w)'], {'dtype': "[('rgb', int, 3), ('noise', float), ('sharpness', float)]"}), "((self.h, self.w), dtype=[('rgb', int, 3), ('noise', float), (\n 'sharpness', float)])\n", (774, 862), True, 'import numpy as np\n'), ((906, 976), 'numpy.zeros', 'np.zeros', (['(self.h + 1, self.w + 1)'], {'dtype': "[('x', float), ('y', float)]"}), "((self.h + 1, self.w + 1), dtype=[('x', float), ('y', float)])\n", (914, 976), True, 'import numpy as np\n'), ((1394, 1411), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1404, 1411), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((2863, 2896), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'self.image.size'], {}), "('RGB', self.image.size)\n", (2872, 2896), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((2938, 2957), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2952, 2957), False, 'from PIL import Image, ImageDraw, ImageFilter\n'), ((2540, 2548), 'random.random', 'random', ([], {}), '()\n', (2546, 2548), False, 'from random import random\n'), ((2631, 2639), 'random.random', 'random', ([], {}), '()\n', (2637, 2639), False, 'from random import random\n'), ((2717, 2725), 'random.random', 'random', ([], {}), '()\n', (2723, 2725), False, 'from random import random\n'), ((2808, 2816), 'random.random', 'random', ([], {}), '()\n', (2814, 2816), False, 'from random import random\n'), ((2313, 2321), 'random.random', 'random', ([], {}), '()\n', (2319, 2321), False, 'from random import random\n'), ((2346, 2354), 'random.random', 'random', ([], {}), '()\n', (2352, 2354), False, 'from random import random\n'), ((2387, 2398), 'math.sin', 'math.sin', (['t'], {}), '(t)\n', (2395, 2398), False, 'import math\n'), ((2431, 2442), 'math.cos', 'math.cos', (['t'], {}), '(t)\n', (2439, 2442), False, 'import math\n')]
|
from .particle_filter_base import ParticleFilter
from core.resampling.resampler import Resampler
import copy
import numpy as np
from scipy.stats import multivariate_normal
from scipy import linalg
class KalmanParticleFilter(ParticleFilter):
"""
Notes:
* State is (x, y, heading), where x and y are in meters and heading in radians
* State space assumed limited size in each dimension, world is cyclic (hence leaving at x_max means entering at
x_min)
* propagation and measurement models are largely hardcoded (except for standard deviations).
For convenience reasons this class inherits from the generic particle filter class. Note however, that some of
the methods are hardcoded and some of the members are unused.
"""
def __init__(self,
number_of_particles,
limits,
process_noise,
measurement_noise):
"""
Initialize the extended Kalman particle filter. Resampling method is hardcoded hence no argument.
:param number_of_particles: Number of particles.
:param limits: List with maximum and minimum values for x and y dimension: [xmin, xmax, ymin, ymax].
:param process_noise: Process noise parameters (standard deviations): [std_forward, std_angular].
:param measurement_noise: Measurement noise parameters (standard deviations): [std_range, std_angle].
"""
# Initialize particle filter base class
ParticleFilter.__init__(self, number_of_particles, limits, process_noise, measurement_noise)
# Initialize covariance matrices for the EKF
self.Q = np.diag([process_noise[0], process_noise[0], process_noise[1]])
self.R = np.diag([measurement_noise[0], measurement_noise[1]])
def initialize_particles_uniform(self):
"""
Initialize the particles uniformly over the world assuming a 3D state (x, y, heading). No arguments are required
and function always succeeds hence no return value.
"""
# Initialize particles with uniform weight distribution
super(KalmanParticleFilter, self).initialize_particles_uniform()
# Add covariance matrices to particles (now particle is following list: [weight, particle_state,
# covariance_matrix])
for particle in self.particles:
particle.append(np.eye(3))
def multinomial_resampling(self):
"""
Particles are sampled with replacement proportional to their weight and in arbitrary order. This leads
to a maximum variance on the number of times a particle will be resampled, since any particle will be
resampled between 0 and N times.
This function is reimplemented in this class since each particle now contains three elements instead of two
(covariance of extended Kalman filter is added).
"""
# Get list with only weights
weights = [weighted_sample[0] for weighted_sample in self.particles]
# Compute cumulative sum
Q = np.cumsum(weights).tolist()
# As long as the number of new particles is insufficient
n = 0
new_particles = []
while n < self.n_particles:
# Draw a random sample u
u = np.random.uniform(1e-6, 1, 1)[0]
# Naive search
m = 0
while Q[m] < u:
m += 1
# Add copy of the particle but set uniform weight
new_sample = copy.deepcopy(self.particles[m])
new_sample[0] = 1.0/self.n_particles
new_particles.append(new_sample)
# Added another sample
n += 1
self.particles = new_particles
def update(self, robot_forward_motion, robot_angular_motion, measurements, landmarks):
"""
Process a measurement given the measured robot displacement and resample.
:param robot_forward_motion: Measured forward robot motion in meters.
:param robot_angular_motion: Measured angular robot motion in radians.
:param measurements: Measurements.
:param landmarks: Landmark positions.
"""
# Loop over all particles
new_particles = []
sum_weights = 0.0
for par in self.particles:
# This time an extended Kalman filter prediction is included in the propagation step
# note: par = [weight, state, EKF_covariance]
propagated_state = copy.deepcopy(par[1])
cov_ekf = copy.deepcopy(par[2])
# Propagate the particle state using the nonlinear process model
propagated_state[2] += robot_angular_motion
propagated_state[0] += robot_forward_motion * np.cos(propagated_state[2])
propagated_state[1] += robot_forward_motion * np.sin(propagated_state[2])
self.validate_state(propagated_state)
# Compute Jacobian (df/dx) around current state
F = np.eye(3)
F[0,2] = -robot_forward_motion * np.sin(propagated_state[2])
F[1,2] = robot_forward_motion * np.cos(propagated_state[2])
# Update covariance
cov_ekf = F * cov_ekf * np.transpose(F) + self.Q
# Process measurements one by one (EKF update step)
for idx, landmark in enumerate(landmarks):
# Compute expected measurement
dx = propagated_state[0] - landmark[0]
dy = propagated_state[1] - landmark[1]
z1_exp = np.sqrt((dx*dx + dy*dy))
z2_exp = np.arctan2(dy, dx)
# Compute Jacobian (dh/dx) around propagated state
H11 = dx / (np.sqrt(dx*dx + dy*dy))
H12 = dy / (np.sqrt(dx*dx + dy*dy))
H21 = 1 / (1 + (dy / dx)**2) * -dy / dx**2
H22 = 1 / (1 + (dy / dx)**2) * 1 / dx
H = np.array([[H11, H12, 0], [H21, H22, 0]])
# Innovation
y_tilde = np.array([[measurements[idx][0] - z1_exp], [measurements[idx][1] - z2_exp]])
S = np.dot(np.dot(H, cov_ekf), np.transpose(H)) + self.R
# Kalman gain
K = np.dot(np.dot(cov_ekf, np.transpose(H)), linalg.pinv(S))
# Update state vector and covariance
delta_state = np.dot(K, y_tilde)
propagated_state[0] += delta_state[0][0]
propagated_state[1] += delta_state[1][0]
propagated_state[2] += delta_state[2][0]
self.validate_state(propagated_state)
cov_ekf = np.dot((np.eye(3) - np.dot(K, H)), cov_ekf)
# New particle state: sample from normal distribution EKF
updated_state = np.random.multivariate_normal(propagated_state, cov_ekf)
self.validate_state(updated_state)
# Compute likelihood using propagated state
likelihood = self.compute_likelihood(propagated_state, measurements, landmarks)
# Compute prior (mean is zero vector by default)
prior = multivariate_normal.pdf(updated_state-propagated_state, cov=self.Q)
# Importance density
importance_density = multivariate_normal.pdf(updated_state-propagated_state, cov=cov_ekf)
# Compute current particle's weight
weight = likelihood * prior / importance_density
sum_weights += weight
# Store updated particle
new_particles.append([weight, propagated_state, cov_ekf])
# Normalize particle weight
if sum_weights < 1e-10:
print("Warning: sum particles weights very low")
for par in new_particles:
par[0] /= sum_weights
# Update particles
self.particles = new_particles
# Resample at each time step
self.multinomial_resampling()
|
[
"numpy.random.uniform",
"copy.deepcopy",
"numpy.arctan2",
"numpy.eye",
"numpy.transpose",
"numpy.cumsum",
"numpy.sin",
"numpy.random.multivariate_normal",
"numpy.array",
"scipy.stats.multivariate_normal.pdf",
"numpy.cos",
"numpy.dot",
"numpy.diag",
"scipy.linalg.pinv",
"numpy.sqrt"
] |
[((1674, 1737), 'numpy.diag', 'np.diag', (['[process_noise[0], process_noise[0], process_noise[1]]'], {}), '([process_noise[0], process_noise[0], process_noise[1]])\n', (1681, 1737), True, 'import numpy as np\n'), ((1755, 1808), 'numpy.diag', 'np.diag', (['[measurement_noise[0], measurement_noise[1]]'], {}), '([measurement_noise[0], measurement_noise[1]])\n', (1762, 1808), True, 'import numpy as np\n'), ((3516, 3548), 'copy.deepcopy', 'copy.deepcopy', (['self.particles[m]'], {}), '(self.particles[m])\n', (3529, 3548), False, 'import copy\n'), ((4493, 4514), 'copy.deepcopy', 'copy.deepcopy', (['par[1]'], {}), '(par[1])\n', (4506, 4514), False, 'import copy\n'), ((4537, 4558), 'copy.deepcopy', 'copy.deepcopy', (['par[2]'], {}), '(par[2])\n', (4550, 4558), False, 'import copy\n'), ((4992, 5001), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4998, 5001), True, 'import numpy as np\n'), ((6770, 6826), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['propagated_state', 'cov_ekf'], {}), '(propagated_state, cov_ekf)\n', (6799, 6826), True, 'import numpy as np\n'), ((7104, 7173), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['(updated_state - propagated_state)'], {'cov': 'self.Q'}), '(updated_state - propagated_state, cov=self.Q)\n', (7127, 7173), False, 'from scipy.stats import multivariate_normal\n'), ((7238, 7308), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['(updated_state - propagated_state)'], {'cov': 'cov_ekf'}), '(updated_state - propagated_state, cov=cov_ekf)\n', (7261, 7308), False, 'from scipy.stats import multivariate_normal\n'), ((2401, 2410), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2407, 2410), True, 'import numpy as np\n'), ((3072, 3090), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (3081, 3090), True, 'import numpy as np\n'), ((3297, 3327), 'numpy.random.uniform', 'np.random.uniform', (['(1e-06)', '(1)', '(1)'], {}), '(1e-06, 1, 1)\n', (3314, 3327), True, 'import numpy as np\n'), ((4751, 4778), 'numpy.cos', 'np.cos', (['propagated_state[2]'], {}), '(propagated_state[2])\n', (4757, 4778), True, 'import numpy as np\n'), ((4837, 4864), 'numpy.sin', 'np.sin', (['propagated_state[2]'], {}), '(propagated_state[2])\n', (4843, 4864), True, 'import numpy as np\n'), ((5047, 5074), 'numpy.sin', 'np.sin', (['propagated_state[2]'], {}), '(propagated_state[2])\n', (5053, 5074), True, 'import numpy as np\n'), ((5119, 5146), 'numpy.cos', 'np.cos', (['propagated_state[2]'], {}), '(propagated_state[2])\n', (5125, 5146), True, 'import numpy as np\n'), ((5544, 5570), 'numpy.sqrt', 'np.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (5551, 5570), True, 'import numpy as np\n'), ((5594, 5612), 'numpy.arctan2', 'np.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (5604, 5612), True, 'import numpy as np\n'), ((5918, 5958), 'numpy.array', 'np.array', (['[[H11, H12, 0], [H21, H22, 0]]'], {}), '([[H11, H12, 0], [H21, H22, 0]])\n', (5926, 5958), True, 'import numpy as np\n'), ((6015, 6091), 'numpy.array', 'np.array', (['[[measurements[idx][0] - z1_exp], [measurements[idx][1] - z2_exp]]'], {}), '([[measurements[idx][0] - z1_exp], [measurements[idx][1] - z2_exp]])\n', (6023, 6091), True, 'import numpy as np\n'), ((6357, 6375), 'numpy.dot', 'np.dot', (['K', 'y_tilde'], {}), '(K, y_tilde)\n', (6363, 6375), True, 'import numpy as np\n'), ((5216, 5231), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (5228, 5231), True, 'import numpy as np\n'), ((5709, 5735), 'numpy.sqrt', 'np.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (5716, 5735), True, 'import numpy as np\n'), ((5761, 5787), 'numpy.sqrt', 'np.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (5768, 5787), True, 'import numpy as np\n'), ((6257, 6271), 'scipy.linalg.pinv', 'linalg.pinv', (['S'], {}), '(S)\n', (6268, 6271), False, 'from scipy import linalg\n'), ((6119, 6137), 'numpy.dot', 'np.dot', (['H', 'cov_ekf'], {}), '(H, cov_ekf)\n', (6125, 6137), True, 'import numpy as np\n'), ((6139, 6154), 'numpy.transpose', 'np.transpose', (['H'], {}), '(H)\n', (6151, 6154), True, 'import numpy as np\n'), ((6239, 6254), 'numpy.transpose', 'np.transpose', (['H'], {}), '(H)\n', (6251, 6254), True, 'import numpy as np\n'), ((6635, 6644), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (6641, 6644), True, 'import numpy as np\n'), ((6647, 6659), 'numpy.dot', 'np.dot', (['K', 'H'], {}), '(K, H)\n', (6653, 6659), True, 'import numpy as np\n')]
|
"""
Compare puncta distribution in a channel with synapse distribution.
"""
import os
import copy
import socket
import numpy as np
import pandas as pd
from skimage import measure
from at_synapse_detection import SynapseDetection as syn
from at_synapse_detection import dataAccess as da
from at_synapse_detection import antibodyAnalysis as aa
from at_synapse_detection import SynapseAnalysis as sa
def run_SACT_FXS(synaptic_volumes, query, thresh, resolution, target_antibody_name, result_location, volume_um3):
"""
Run SACT.
MEASURES
- Puncta Density
- Average punctum size
- Standard deviation of the size
- Synapse density
- Target Specificity Ratio (tsr)
- Raw data mean/std
Parameters
-----------
synaptic_volumes : dict - has two keys, 'postsynaptic' and 'presynaptic.' Each key contains a list of volumes.
query : dict
thresh : float
resolution : dict
Returns
-----------
antibody_measure : AntibodyAnalysis()
"""
antibody_measure = aa.AntibodyAnalysis(query)
# Get data volume
antibody_measure.volume_um3 = volume_um3
# Check to see if user supplied blobsize
if 'punctumSize' in query.keys():
blobsize = query['punctumSize']
edge_win = int(np.ceil(blobsize * 1.5))
# Data
presynaptic_volumes = synaptic_volumes['presynaptic']
postsynaptic_volumes = synaptic_volumes['postsynaptic']
# Number of slices each blob should span
preIF_z = query['preIF_z']
postIF_z = query['postIF_z']
# Compute raw mean and standard deviation
antibody_measure = aa.compute_raw_measures(
presynaptic_volumes, antibody_measure, 'presynaptic')
# SNR test
raw_presynaptic_volumes = []
for vol in presynaptic_volumes:
raw_presynaptic_volumes.append(np.copy(vol))
for n in range(0, len(presynaptic_volumes)):
presynaptic_volumes[n] = syn.getProbMap(
presynaptic_volumes[n]) # Step 1
presynaptic_volumes[n] = syn.convolveVolume(
presynaptic_volumes[n], blobsize) # Step 2
if preIF_z[n] > 1:
factor_vol = syn.computeFactor(
presynaptic_volumes[n], int(preIF_z[n])) # Step 3
presynaptic_volumes[n] = presynaptic_volumes[n] * factor_vol
# Compute single channel measurements
antibody_measure = aa.compute_single_channel_measurements(
presynaptic_volumes, antibody_measure, thresh, 'presynaptic')
# SNR test
antibody_measure = aa.compute_SNR_synapticside(raw_presynaptic_volumes,
presynaptic_volumes, thresh,
antibody_measure, 'presynaptic')
print('Computed presynaptic single channel measurements')
# Compute raw mean and standard deviation
antibody_measure = aa.compute_raw_measures(
postsynaptic_volumes, antibody_measure, 'postsynaptic')
# SNR test
raw_postsynaptic_volumes = []
for vol in postsynaptic_volumes:
raw_postsynaptic_volumes.append(np.copy(vol))
for n in range(0, len(postsynaptic_volumes)):
postsynaptic_volumes[n] = syn.getProbMap(
postsynaptic_volumes[n]) # Step 1
postsynaptic_volumes[n] = syn.convolveVolume(
postsynaptic_volumes[n], blobsize) # Step 2
if postIF_z[n] > 1:
factor_vol = syn.computeFactor(
postsynaptic_volumes[n], int(postIF_z[n])) # Step 3
postsynaptic_volumes[n] = postsynaptic_volumes[n] * factor_vol
# Compute single channel measurements
antibody_measure = aa.compute_single_channel_measurements(
postsynaptic_volumes, antibody_measure, thresh, 'postsynaptic')
# SNR test
antibody_measure = aa.compute_SNR_synapticside(raw_postsynaptic_volumes,
postsynaptic_volumes, thresh,
antibody_measure, 'postsynaptic')
print('Computed postsynaptic single channel measurements')
# Load result volume
resultVol = np.load(result_location)
# Compute whole statistics
label_vol = measure.label(resultVol > thresh)
stats = measure.regionprops(label_vol)
antibody_measure.synapse_density = len(stats) / antibody_measure.volume_um3
antibody_measure.synapse_count = len(stats)
antibody_measure = aa.calculuate_target_ratio(
antibody_measure, target_antibody_name)
return antibody_measure
def run_blob_synapse(mouse_number, mouse_project_str, base_query_num, channel_name):
"""
Blob Synapse Ratio. Run SACT for FXS data
Only runs on Galicia
"""
query_fn = 'queries/' + mouse_project_str + '_queries.json'
hostname = socket.gethostname()
if hostname == 'Galicia':
data_location = '/data5TB/yi_mice/' + str(mouse_number) + 'ss_stacks'
dapi_mask_str_base = '/data5TB/yi_mice/dapi-masks/' + \
str(mouse_number) + 'ss_stacks'
listOfQueries = syn.loadQueriesJSON(query_fn)
resolution = {'res_xy_nm': 100, 'res_z_nm': 70}
region_name_base = 'F00'
thresh = 0.9
mask_location_str = -1 # no mask specified
foldernames = []
measure_list = []
target_filenames = []
conjugate_filenames = []
for region_num in range(0, 4):
region_name = region_name_base + str(region_num)
data_region_location = os.path.join(data_location, region_name)
query = listOfQueries[base_query_num]
query_number = base_query_num + 12 * region_num
foldername = region_name + '-Q' + str(base_query_num)
foldernames.append(foldername)
conjugate_filenames.append('Query' + str(base_query_num))
# Load the data
synaptic_volumes = da.load_tiff_from_query(
query, data_region_location)
# Load DAPI mask
dapi_mask_str = os.path.join(dapi_mask_str_base, region_name)
dapi_mask_fn = os.path.join(dapi_mask_str, str(
mouse_number) + 'ss-DAPI-mask.tiff')
dapi_mask = da.imreadtiff(dapi_mask_fn)
# Mask data
dapi_mask = dapi_mask.astype(np.bool)
combined_mask = np.logical_not(dapi_mask) # keep portions without dapi
synaptic_volumes = sa.mask_synaptic_volumes(
synaptic_volumes, combined_mask)
volume_um3 = sa.get_masked_volume(
synaptic_volumes, combined_mask, resolution)
print(volume_um3)
target_antibody_name = str(mouse_number) + channel_name
target_filenames.append(target_antibody_name)
result_location = os.path.join(data_location, 'results_' + str(
mouse_number) + 'ss_fragX', region_name, 'query_' + str(query_number) + '.npy')
antibody_measure = run_SACT_FXS(
synaptic_volumes, query, thresh, resolution, target_antibody_name, result_location, volume_um3)
measure_list.append(antibody_measure)
mouse_df = aa.create_df(measure_list, foldernames,
target_filenames, conjugate_filenames)
return mouse_df
def iterate_over_mice(query_number, channel_name):
mouse_list = [1, 2, 3, 4, 5, 6, 7, 22]
df_list = []
for num in mouse_list:
mouse_df = run_blob_synapse(
num, str(num) + 'ss', query_number, channel_name)
df_list.append(mouse_df)
return df_list
def iterate_over_queries():
channel_name = 'ss_PSD.tif'
query_number = 4
df_list = iterate_over_mice(query_number, channel_name)
aa.write_dfs_to_excel(df_list, 'blob_synapse', 'psd_q4_ratio.xlsx')
channel_name = 'ss_VGluT1.tif'
query_number = 5
df_list = iterate_over_mice(query_number, channel_name)
aa.write_dfs_to_excel(df_list, 'blob_synapse', 'vglut1_q5_ratio.xlsx')
channel_name = 'ss_Synap.tif'
query_number = 5
df_list = iterate_over_mice(query_number, channel_name)
aa.write_dfs_to_excel(df_list, 'blob_synapse', 'synap_q5_ratio.xlsx')
channel_name = 'ss_VGluT2.tif'
query_number = 6
df_list = iterate_over_mice(query_number, channel_name)
aa.write_dfs_to_excel(df_list, 'blob_synapse', 'vglut2_q6_ratio.xlsx')
channel_name = 'ss_Synap.tif'
query_number = 6
df_list = iterate_over_mice(query_number, channel_name)
aa.write_dfs_to_excel(df_list, 'blob_synapse', 'synap_q6_ratio.xlsx')
def main():
iterate_over_queries()
if __name__ == '__main__':
main()
|
[
"at_synapse_detection.antibodyAnalysis.calculuate_target_ratio",
"numpy.load",
"at_synapse_detection.antibodyAnalysis.write_dfs_to_excel",
"skimage.measure.label",
"at_synapse_detection.SynapseDetection.convolveVolume",
"at_synapse_detection.SynapseAnalysis.mask_synaptic_volumes",
"os.path.join",
"skimage.measure.regionprops",
"at_synapse_detection.antibodyAnalysis.compute_raw_measures",
"numpy.copy",
"numpy.logical_not",
"socket.gethostname",
"at_synapse_detection.SynapseDetection.loadQueriesJSON",
"numpy.ceil",
"at_synapse_detection.SynapseDetection.getProbMap",
"at_synapse_detection.antibodyAnalysis.compute_SNR_synapticside",
"at_synapse_detection.dataAccess.imreadtiff",
"at_synapse_detection.antibodyAnalysis.AntibodyAnalysis",
"at_synapse_detection.antibodyAnalysis.create_df",
"at_synapse_detection.SynapseAnalysis.get_masked_volume",
"at_synapse_detection.antibodyAnalysis.compute_single_channel_measurements",
"at_synapse_detection.dataAccess.load_tiff_from_query"
] |
[((1029, 1055), 'at_synapse_detection.antibodyAnalysis.AntibodyAnalysis', 'aa.AntibodyAnalysis', (['query'], {}), '(query)\n', (1048, 1055), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((1606, 1683), 'at_synapse_detection.antibodyAnalysis.compute_raw_measures', 'aa.compute_raw_measures', (['presynaptic_volumes', 'antibody_measure', '"""presynaptic"""'], {}), "(presynaptic_volumes, antibody_measure, 'presynaptic')\n", (1629, 1683), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((2362, 2466), 'at_synapse_detection.antibodyAnalysis.compute_single_channel_measurements', 'aa.compute_single_channel_measurements', (['presynaptic_volumes', 'antibody_measure', 'thresh', '"""presynaptic"""'], {}), "(presynaptic_volumes,\n antibody_measure, thresh, 'presynaptic')\n", (2400, 2466), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((2511, 2629), 'at_synapse_detection.antibodyAnalysis.compute_SNR_synapticside', 'aa.compute_SNR_synapticside', (['raw_presynaptic_volumes', 'presynaptic_volumes', 'thresh', 'antibody_measure', '"""presynaptic"""'], {}), "(raw_presynaptic_volumes, presynaptic_volumes,\n thresh, antibody_measure, 'presynaptic')\n", (2538, 2629), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((2861, 2940), 'at_synapse_detection.antibodyAnalysis.compute_raw_measures', 'aa.compute_raw_measures', (['postsynaptic_volumes', 'antibody_measure', '"""postsynaptic"""'], {}), "(postsynaptic_volumes, antibody_measure, 'postsynaptic')\n", (2884, 2940), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((3632, 3738), 'at_synapse_detection.antibodyAnalysis.compute_single_channel_measurements', 'aa.compute_single_channel_measurements', (['postsynaptic_volumes', 'antibody_measure', 'thresh', '"""postsynaptic"""'], {}), "(postsynaptic_volumes,\n antibody_measure, thresh, 'postsynaptic')\n", (3670, 3738), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((3783, 3904), 'at_synapse_detection.antibodyAnalysis.compute_SNR_synapticside', 'aa.compute_SNR_synapticside', (['raw_postsynaptic_volumes', 'postsynaptic_volumes', 'thresh', 'antibody_measure', '"""postsynaptic"""'], {}), "(raw_postsynaptic_volumes, postsynaptic_volumes,\n thresh, antibody_measure, 'postsynaptic')\n", (3810, 3904), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((4108, 4132), 'numpy.load', 'np.load', (['result_location'], {}), '(result_location)\n', (4115, 4132), True, 'import numpy as np\n'), ((4181, 4214), 'skimage.measure.label', 'measure.label', (['(resultVol > thresh)'], {}), '(resultVol > thresh)\n', (4194, 4214), False, 'from skimage import measure\n'), ((4227, 4257), 'skimage.measure.regionprops', 'measure.regionprops', (['label_vol'], {}), '(label_vol)\n', (4246, 4257), False, 'from skimage import measure\n'), ((4410, 4476), 'at_synapse_detection.antibodyAnalysis.calculuate_target_ratio', 'aa.calculuate_target_ratio', (['antibody_measure', 'target_antibody_name'], {}), '(antibody_measure, target_antibody_name)\n', (4436, 4476), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((4771, 4791), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4789, 4791), False, 'import socket\n'), ((5029, 5058), 'at_synapse_detection.SynapseDetection.loadQueriesJSON', 'syn.loadQueriesJSON', (['query_fn'], {}), '(query_fn)\n', (5048, 5058), True, 'from at_synapse_detection import SynapseDetection as syn\n'), ((6977, 7055), 'at_synapse_detection.antibodyAnalysis.create_df', 'aa.create_df', (['measure_list', 'foldernames', 'target_filenames', 'conjugate_filenames'], {}), '(measure_list, foldernames, target_filenames, conjugate_filenames)\n', (6989, 7055), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((7545, 7612), 'at_synapse_detection.antibodyAnalysis.write_dfs_to_excel', 'aa.write_dfs_to_excel', (['df_list', '"""blob_synapse"""', '"""psd_q4_ratio.xlsx"""'], {}), "(df_list, 'blob_synapse', 'psd_q4_ratio.xlsx')\n", (7566, 7612), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((7734, 7804), 'at_synapse_detection.antibodyAnalysis.write_dfs_to_excel', 'aa.write_dfs_to_excel', (['df_list', '"""blob_synapse"""', '"""vglut1_q5_ratio.xlsx"""'], {}), "(df_list, 'blob_synapse', 'vglut1_q5_ratio.xlsx')\n", (7755, 7804), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((7925, 7994), 'at_synapse_detection.antibodyAnalysis.write_dfs_to_excel', 'aa.write_dfs_to_excel', (['df_list', '"""blob_synapse"""', '"""synap_q5_ratio.xlsx"""'], {}), "(df_list, 'blob_synapse', 'synap_q5_ratio.xlsx')\n", (7946, 7994), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((8116, 8186), 'at_synapse_detection.antibodyAnalysis.write_dfs_to_excel', 'aa.write_dfs_to_excel', (['df_list', '"""blob_synapse"""', '"""vglut2_q6_ratio.xlsx"""'], {}), "(df_list, 'blob_synapse', 'vglut2_q6_ratio.xlsx')\n", (8137, 8186), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((8307, 8376), 'at_synapse_detection.antibodyAnalysis.write_dfs_to_excel', 'aa.write_dfs_to_excel', (['df_list', '"""blob_synapse"""', '"""synap_q6_ratio.xlsx"""'], {}), "(df_list, 'blob_synapse', 'synap_q6_ratio.xlsx')\n", (8328, 8376), True, 'from at_synapse_detection import antibodyAnalysis as aa\n'), ((1914, 1952), 'at_synapse_detection.SynapseDetection.getProbMap', 'syn.getProbMap', (['presynaptic_volumes[n]'], {}), '(presynaptic_volumes[n])\n', (1928, 1952), True, 'from at_synapse_detection import SynapseDetection as syn\n'), ((2009, 2061), 'at_synapse_detection.SynapseDetection.convolveVolume', 'syn.convolveVolume', (['presynaptic_volumes[n]', 'blobsize'], {}), '(presynaptic_volumes[n], blobsize)\n', (2027, 2061), True, 'from at_synapse_detection import SynapseDetection as syn\n'), ((3176, 3215), 'at_synapse_detection.SynapseDetection.getProbMap', 'syn.getProbMap', (['postsynaptic_volumes[n]'], {}), '(postsynaptic_volumes[n])\n', (3190, 3215), True, 'from at_synapse_detection import SynapseDetection as syn\n'), ((3273, 3326), 'at_synapse_detection.SynapseDetection.convolveVolume', 'syn.convolveVolume', (['postsynaptic_volumes[n]', 'blobsize'], {}), '(postsynaptic_volumes[n], blobsize)\n', (3291, 3326), True, 'from at_synapse_detection import SynapseDetection as syn\n'), ((5429, 5469), 'os.path.join', 'os.path.join', (['data_location', 'region_name'], {}), '(data_location, region_name)\n', (5441, 5469), False, 'import os\n'), ((5794, 5846), 'at_synapse_detection.dataAccess.load_tiff_from_query', 'da.load_tiff_from_query', (['query', 'data_region_location'], {}), '(query, data_region_location)\n', (5817, 5846), True, 'from at_synapse_detection import dataAccess as da\n'), ((5910, 5955), 'os.path.join', 'os.path.join', (['dapi_mask_str_base', 'region_name'], {}), '(dapi_mask_str_base, region_name)\n', (5922, 5955), False, 'import os\n'), ((6081, 6108), 'at_synapse_detection.dataAccess.imreadtiff', 'da.imreadtiff', (['dapi_mask_fn'], {}), '(dapi_mask_fn)\n', (6094, 6108), True, 'from at_synapse_detection import dataAccess as da\n'), ((6200, 6225), 'numpy.logical_not', 'np.logical_not', (['dapi_mask'], {}), '(dapi_mask)\n', (6214, 6225), True, 'import numpy as np\n'), ((6283, 6340), 'at_synapse_detection.SynapseAnalysis.mask_synaptic_volumes', 'sa.mask_synaptic_volumes', (['synaptic_volumes', 'combined_mask'], {}), '(synaptic_volumes, combined_mask)\n', (6307, 6340), True, 'from at_synapse_detection import SynapseAnalysis as sa\n'), ((6376, 6441), 'at_synapse_detection.SynapseAnalysis.get_masked_volume', 'sa.get_masked_volume', (['synaptic_volumes', 'combined_mask', 'resolution'], {}), '(synaptic_volumes, combined_mask, resolution)\n', (6396, 6441), True, 'from at_synapse_detection import SynapseAnalysis as sa\n'), ((1271, 1294), 'numpy.ceil', 'np.ceil', (['(blobsize * 1.5)'], {}), '(blobsize * 1.5)\n', (1278, 1294), True, 'import numpy as np\n'), ((1817, 1829), 'numpy.copy', 'np.copy', (['vol'], {}), '(vol)\n', (1824, 1829), True, 'import numpy as np\n'), ((3077, 3089), 'numpy.copy', 'np.copy', (['vol'], {}), '(vol)\n', (3084, 3089), True, 'import numpy as np\n')]
|
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.utils import plot_model
def Autoencoder(series_length):
"""
Return a keras model of autoencoder
:param series_length:
:return:
"""
model = Sequential()
model.add(LSTM(100, activation='relu', input_shape=(series_length, 1)))
model.add(RepeatVector(series_length))
model.add(LSTM(100, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(1)))
model.compile(optimizer='adam', loss='mse')
return model
if __name__ == '__main__':
# define input sequence
sequence = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
# reshape input into [samples, timesteps, features]
n_in = len(sequence)
sequence = sequence.reshape((1, n_in, 1))
# define model
model = Autoencoder(series_length=n_in)
# fit model
model.fit(sequence, sequence, epochs=300, verbose=0)
plot_model(model, show_shapes=True, to_file='reconstruct_lstm_autoencoder.png')
# demonstrate recreation
yhat = model.predict(sequence, verbose=0)
print(yhat[0, :, 0])
|
[
"keras.layers.LSTM",
"keras.utils.plot_model",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"keras.layers.RepeatVector"
] |
[((371, 383), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (381, 383), False, 'from keras.models import Sequential\n'), ((748, 803), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]'], {}), '([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n', (756, 803), True, 'import numpy as np\n'), ((1071, 1150), 'keras.utils.plot_model', 'plot_model', (['model'], {'show_shapes': '(True)', 'to_file': '"""reconstruct_lstm_autoencoder.png"""'}), "(model, show_shapes=True, to_file='reconstruct_lstm_autoencoder.png')\n", (1081, 1150), False, 'from keras.utils import plot_model\n'), ((398, 458), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'activation': '"""relu"""', 'input_shape': '(series_length, 1)'}), "(100, activation='relu', input_shape=(series_length, 1))\n", (402, 458), False, 'from keras.layers import LSTM\n'), ((474, 501), 'keras.layers.RepeatVector', 'RepeatVector', (['series_length'], {}), '(series_length)\n', (486, 501), False, 'from keras.layers import RepeatVector\n'), ((517, 568), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'activation': '"""relu"""', 'return_sequences': '(True)'}), "(100, activation='relu', return_sequences=True)\n", (521, 568), False, 'from keras.layers import LSTM\n'), ((600, 608), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (605, 608), False, 'from keras.layers import Dense\n')]
|
import operator
import re
import os
import json
import logging
from collections import Counter
from tqdm import tqdm
import colorlog
from sklearn.feature_extraction.text import TfidfTransformer
import numpy as np
#####################
# Hyperparameters
#####################
CONTEXT_LENGTH = 100
CAPTION_VOCAB_SIZE = 40000
HASHTAG_VOCAB_SIZE = 60000
DATA_ROOT_PATH = os.path.join('..', '..', 'data', 'Instagram')
# For dataset
CAPTION_TRAIN_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'insta-caption-train.json'
)
CAPTION_TEST1_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'insta-caption-test1.json'
)
CAPTION_TEST2_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'insta-caption-test2.json'
)
HASHTAG_TRAIN_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'insta-hashtag-train.json'
)
HASHTAG_TEST1_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'insta-hashtag-test1.json'
)
HASHTAG_TEST2_JSON_FNAME = os.path.join(
DATA_ROOT_PATH, 'json', 'insta-hashtag-test2.json'
)
CAPTION_OUTPUT_PATH = os.path.join(DATA_ROOT_PATH, 'caption_dataset')
HASHTAG_OUTPUT_PATH = os.path.join(DATA_ROOT_PATH, 'hashtag_dataset')
CAPTION_VOCAB_FNAME = os.path.join(
CAPTION_OUTPUT_PATH, '%d.vocab' % (CAPTION_VOCAB_SIZE)
)
HASHTAG_VOCAB_FNAME = os.path.join(
HASHTAG_OUTPUT_PATH, '%d.vocab' % (HASHTAG_VOCAB_SIZE)
)
# For vocaulary
_PAD = "_PAD"
_GO = "_GO"
_EOS = "_EOS"
_UNK = "_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# For tokenization
try:
# UCS-4
EMOTICON = re.compile(u'(([\U00002600-\U000027BF])|([\U0001f300-\U0001f64F])|([\U0001f680-\U0001f6FF]))')
except:
# UCS-2
EMOTICON = re.compile(u'(([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF]))')
NOT_EMOTICON = re.compile(r'(\\U([0-9A-Fa-f]){8})|(\\u([0-9A-Fa-f]){4})')
def sort_dict(dic):
# Sort by alphabet
sorted_pair_list = sorted(dic.items(), key=operator.itemgetter(0))
# Sort by count
sorted_pair_list = sorted(sorted_pair_list, key=operator.itemgetter(1), reverse=True)
return sorted_pair_list
def load_json(json_fname):
colorlog.info("Load %s" % (json_fname))
with open(json_fname, 'r') as f:
json_object = json.load(f)
return json_object
def tokenize(sentence):
"""Tokenize a sentence"""
if isinstance(sentence, list):
sentence = ' '.join(sentence)
if isinstance(sentence, bytes):
sentence = sentence.decode('ISO-8859-1')
sentence = sentence.replace('#', ' #')
sentence = sentence.replace('@', ' @')
sentence = sentence.replace('\n', ' ')
sentence = sentence.lower()
sentence = re.sub(r'@[a-zA-Z0-9._]+', '@username', sentence) # change username
sentence = EMOTICON.sub(r'@@byeongchang\1 ', sentence)
sentence = re.sub(r'@@byeongchang\\', '@@byeongchang', sentence)
#sentence = sentence.encode('unicode-escape') # for emoticons
sentence = NOT_EMOTICON.sub(r' ', sentence)
sentence = re.sub(r"[\-_]", r"-", sentence) # incoporate - and _
sentence = re.sub(r"([!?,\.\"])", r" ", sentence) # remove duplicates on . , ! ?
sentence = re.sub(r"(?<![a-zA-Z0-9])\-(?![a-zA-Z0-9])", r"", sentence) # remove - if there is no preceed or following
sentence = ' '.join(re.split(r'[^a-zA-Z0-9#@\'\-]+', sentence))
sentence = re.sub(r'@@byeongchang', r' \\', sentence)
return sentence.split()
def tokenize_all(train_json, test1_json, test2_json, key='caption'):
"""
Tokenize sentences in raw dataset
Args:
train_json, test1_json, test2_json: raw json object
key: 'caption' or 'tags'
"""
colorlog.info("Tokenize %s data" % (key))
token_counter = Counter()
train_tokens = {}
test1_tokens = {}
test2_tokens = {}
# Train data
for user_id, posts in tqdm(train_json.items(), ncols=70, desc="train data"):
train_tokens[user_id] = {}
for post_id, post in posts.items():
post_tokens = tokenize(post[key])
train_tokens[user_id][post_id] = post_tokens
for post_token in post_tokens:
token_counter[post_token] += 1
# Test1 data
for user_id, posts in tqdm(test1_json.items(), ncols=70, desc="test1 data"):
test1_tokens[user_id] = {}
for post_id, post in posts.items():
post_tokens = tokenize(post[key])
test1_tokens[user_id][post_id] = post_tokens
# Test2 data
for user_id, posts in tqdm(test2_json.items(), ncols=70, desc="test2 data"):
test2_tokens[user_id] = {}
for post_id, post in posts.items():
post_tokens = tokenize(post[key])
test2_tokens[user_id][post_id] = post_tokens
return token_counter, train_tokens, test1_tokens, test2_tokens
def get_tfidf_words(train_tokens, test1_tokens, test2_tokens,
vocab, rev_vocab
):
colorlog.info("Get tfidf words")
def _preprocess(all_tokens, rev_vocab):
counter = np.zeros([len(all_tokens), len(rev_vocab)])
user_ids = []
for i, (user_id, posts) in enumerate(
tqdm(all_tokens.items(), ncols=70, desc="preprocess")
):
user_ids.append(user_id)
for post_id, tokens in posts.items():
token_ids = [rev_vocab.get(token, UNK_ID) for token in tokens]
for token_id in token_ids:
counter[i, token_id] += 1
return counter, user_ids
train_counter, train_user_ids = _preprocess(train_tokens, rev_vocab)
test1_counter, test1_user_ids = _preprocess(test1_tokens, rev_vocab)
test2_counter, test2_user_ids = _preprocess(test2_tokens, rev_vocab)
colorlog.info("Fit and transform train tfidf")
vectorizer = TfidfTransformer()
train_tfidf = vectorizer.fit_transform(train_counter).toarray()
test1_tfidf = vectorizer.transform(test1_counter).toarray()
test2_tfidf = vectorizer.transform(test2_counter).toarray()
def _extract_tokens(tfidfs, user_ids, vocab):
user_tokens = {}
for i, user_id in enumerate(user_ids):
tfidf = np.argsort(-tfidfs[i])[:CONTEXT_LENGTH]
weight = np.sort(-tfidfs[i])[:CONTEXT_LENGTH]
tokens = []
for j, (index, token_weight) in enumerate(zip(tfidf, weight)):
token = vocab[index]
if token_weight < 0.0:
if index != UNK_ID:
tokens.append(token)
else:
break
user_tokens[user_id] = tokens
return user_tokens
colorlog.info("Extract tokens from tfidf matrix")
train_user_tokens = _extract_tokens(train_tfidf, train_user_ids, vocab)
test1_user_tokens = _extract_tokens(test1_tfidf, test1_user_ids, vocab)
test2_user_tokens = _extract_tokens(test2_tfidf, test2_user_ids, vocab)
return train_user_tokens, test1_user_tokens, test2_user_tokens
def create_vocabulary(counter, fname, vocab_size):
colorlog.info("Create vocabulary %s" % (fname))
sorted_tokens = sort_dict(counter)
vocab = _START_VOCAB + [x[0] for x in sorted_tokens]
if len(vocab) > vocab_size:
vocab = vocab[:vocab_size]
with open(fname, 'w') as f:
for w in vocab:
f.write(w + "\n")
rev_vocab = {}
for i, token in enumerate(vocab):
rev_vocab[token] = i
return vocab, rev_vocab
def save_data(train_data, test1_data, test2_data, output_path, rev_vocab):
"""
Data format:
numpyfname,contextlength,captionlength,contexttoken1_contexttoken2,wordtoken1_wordtoken2
e.g. 12345.npy,4,3,445_24_445_232,134_466_234
"""
def _save_data(all_tokens, all_tfidf, fname):
all_strings = []
for user_id, posts in all_tokens.items():
context_tokenids = map(
str, [rev_vocab.get(token, UNK_ID) for token in all_tfidf[user_id]]
)
context_tokenids = [i for i in context_tokenids]
context_length = str(len(context_tokenids))
context_string = '_'.join(context_tokenids)
for post_id, tokens in posts.items():
caption_tokenids = map(
str, [rev_vocab.get(token, UNK_ID) for token in tokens]
)
caption_tokenids = [i for i in caption_tokenids]
caption_length = str(len(caption_tokenids))
caption_string = '_'.join(caption_tokenids)
numpy_string = '%s_@_%s.npy' % (user_id, post_id)
all_string = ','.join([
numpy_string, context_length, caption_length,
context_string, caption_string
])
all_strings.append((all_string + '\n', len(caption_tokenids)))
# sort by caption length
all_strings = sorted(all_strings, key=lambda x: x[1])
with open(fname, 'w') as f:
for all_string in all_strings:
f.write(all_string[0])
_save_data(
train_data[0], train_data[1], os.path.join(output_path, "train.txt")
)
_save_data(
test1_data[0], test1_data[1], os.path.join(output_path, "test1.txt")
)
_save_data(
test2_data[0], test2_data[1], os.path.join(output_path, "test2.txt")
)
def main():
colorlog.basicConfig(
filename=None,
level=logging.INFO,
format="%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S"
)
if not os.path.exists(CAPTION_OUTPUT_PATH):
colorlog.info("Create directory %s" % (CAPTION_OUTPUT_PATH))
os.makedirs(CAPTION_OUTPUT_PATH)
if not os.path.exists(HASHTAG_OUTPUT_PATH):
colorlog.info("Create directory %s" % (HASHTAG_OUTPUT_PATH))
os.makedirs(HASHTAG_OUTPUT_PATH)
# Load raw data
caption_train_json = load_json(CAPTION_TRAIN_JSON_FNAME)
caption_test1_json = load_json(CAPTION_TEST1_JSON_FNAME)
caption_test2_json = load_json(CAPTION_TEST2_JSON_FNAME)
hashtag_train_json = load_json(HASHTAG_TRAIN_JSON_FNAME)
hashtag_test1_json = load_json(HASHTAG_TEST1_JSON_FNAME)
hashtag_test2_json = load_json(HASHTAG_TEST2_JSON_FNAME)
# Tokenize all
caption_counter, caption_train_tokens, caption_test1_tokens, \
caption_test2_tokens = tokenize_all(
caption_train_json,
caption_test1_json,
caption_test2_json,
'caption'
)
hashtag_counter, hashtag_train_tokens, hashtag_test1_tokens, \
hashtag_test2_tokens = tokenize_all(
hashtag_train_json,
hashtag_test1_json,
hashtag_test2_json,
'tags'
)
# Create vocabulary
caption_vocab, caption_rev_vocab = create_vocabulary(
caption_counter, CAPTION_VOCAB_FNAME, CAPTION_VOCAB_SIZE
)
hashtag_vocab, hashtag_rev_vocab = create_vocabulary(
hashtag_counter, HASHTAG_VOCAB_FNAME, HASHTAG_VOCAB_SIZE
)
# Get tfidf weighted tokens
caption_train_tfidf_tokens, caption_test1_tfidf_tokens, \
caption_test2_tfidf_tokens = get_tfidf_words(
caption_train_tokens,
caption_test1_tokens,
caption_test2_tokens,
caption_vocab,
caption_rev_vocab
)
hashtag_train_tfidf_tokens, hashtag_test1_tfidf_tokens, \
hashtag_test2_tfidf_tokens = get_tfidf_words(
hashtag_train_tokens,
hashtag_test1_tokens,
hashtag_test2_tokens,
hashtag_vocab,
hashtag_rev_vocab
)
# Save data
save_data(
(caption_train_tokens, caption_train_tfidf_tokens),
(caption_test1_tokens, caption_test1_tfidf_tokens),
(caption_test2_tokens, caption_test2_tfidf_tokens),
CAPTION_OUTPUT_PATH,
caption_rev_vocab
)
save_data(
(hashtag_train_tokens, hashtag_train_tfidf_tokens),
(hashtag_test1_tokens, hashtag_test1_tfidf_tokens),
(hashtag_test2_tokens, hashtag_test2_tfidf_tokens),
HASHTAG_OUTPUT_PATH,
hashtag_rev_vocab
)
if __name__ == '__main__':
main()
|
[
"colorlog.basicConfig",
"json.load",
"re.split",
"os.makedirs",
"os.path.exists",
"colorlog.info",
"numpy.argsort",
"numpy.sort",
"collections.Counter",
"operator.itemgetter",
"os.path.join",
"sklearn.feature_extraction.text.TfidfTransformer",
"re.sub",
"re.compile"
] |
[((387, 432), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""data"""', '"""Instagram"""'], {}), "('..', '..', 'data', 'Instagram')\n", (399, 432), False, 'import os\n'), ((478, 542), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""json"""', '"""insta-caption-train.json"""'], {}), "(DATA_ROOT_PATH, 'json', 'insta-caption-train.json')\n", (490, 542), False, 'import os\n'), ((579, 643), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""json"""', '"""insta-caption-test1.json"""'], {}), "(DATA_ROOT_PATH, 'json', 'insta-caption-test1.json')\n", (591, 643), False, 'import os\n'), ((680, 744), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""json"""', '"""insta-caption-test2.json"""'], {}), "(DATA_ROOT_PATH, 'json', 'insta-caption-test2.json')\n", (692, 744), False, 'import os\n'), ((781, 845), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""json"""', '"""insta-hashtag-train.json"""'], {}), "(DATA_ROOT_PATH, 'json', 'insta-hashtag-train.json')\n", (793, 845), False, 'import os\n'), ((882, 946), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""json"""', '"""insta-hashtag-test1.json"""'], {}), "(DATA_ROOT_PATH, 'json', 'insta-hashtag-test1.json')\n", (894, 946), False, 'import os\n'), ((983, 1047), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""json"""', '"""insta-hashtag-test2.json"""'], {}), "(DATA_ROOT_PATH, 'json', 'insta-hashtag-test2.json')\n", (995, 1047), False, 'import os\n'), ((1081, 1128), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""caption_dataset"""'], {}), "(DATA_ROOT_PATH, 'caption_dataset')\n", (1093, 1128), False, 'import os\n'), ((1152, 1199), 'os.path.join', 'os.path.join', (['DATA_ROOT_PATH', '"""hashtag_dataset"""'], {}), "(DATA_ROOT_PATH, 'hashtag_dataset')\n", (1164, 1199), False, 'import os\n'), ((1225, 1291), 'os.path.join', 'os.path.join', (['CAPTION_OUTPUT_PATH', "('%d.vocab' % CAPTION_VOCAB_SIZE)"], {}), "(CAPTION_OUTPUT_PATH, '%d.vocab' % CAPTION_VOCAB_SIZE)\n", (1237, 1291), False, 'import os\n'), ((1325, 1391), 'os.path.join', 'os.path.join', (['HASHTAG_OUTPUT_PATH', "('%d.vocab' % HASHTAG_VOCAB_SIZE)"], {}), "(HASHTAG_OUTPUT_PATH, '%d.vocab' % HASHTAG_VOCAB_SIZE)\n", (1337, 1391), False, 'import os\n'), ((1879, 1940), 're.compile', 're.compile', (['"""(\\\\\\\\U([0-9A-Fa-f]){8})|(\\\\\\\\u([0-9A-Fa-f]){4})"""'], {}), "('(\\\\\\\\U([0-9A-Fa-f]){8})|(\\\\\\\\u([0-9A-Fa-f]){4})')\n", (1889, 1940), False, 'import re\n'), ((1621, 1670), 're.compile', 're.compile', (['u"""(([☀-➿])|([🌀-🙏])|([🚀-\U0001f6ff]))"""'], {}), "(u'(([☀-➿])|([🌀-🙏])|([🚀-\\U0001f6ff]))')\n", (1631, 1670), False, 'import re\n'), ((2225, 2262), 'colorlog.info', 'colorlog.info', (["('Load %s' % json_fname)"], {}), "('Load %s' % json_fname)\n", (2238, 2262), False, 'import colorlog\n'), ((2736, 2784), 're.sub', 're.sub', (['"""@[a-zA-Z0-9._]+"""', '"""@username"""', 'sentence'], {}), "('@[a-zA-Z0-9._]+', '@username', sentence)\n", (2742, 2784), False, 'import re\n'), ((2877, 2931), 're.sub', 're.sub', (['"""@@byeongchang\\\\\\\\"""', '"""@@byeongchang"""', 'sentence'], {}), "('@@byeongchang\\\\\\\\', '@@byeongchang', sentence)\n", (2883, 2931), False, 'import re\n'), ((3058, 3089), 're.sub', 're.sub', (['"""[\\\\-_]"""', '"""-"""', 'sentence'], {}), "('[\\\\-_]', '-', sentence)\n", (3064, 3089), False, 'import re\n'), ((3127, 3165), 're.sub', 're.sub', (['"""([!?,\\\\.\\\\"])"""', '""" """', 'sentence'], {}), '(\'([!?,\\\\.\\\\"])\', \' \', sentence)\n', (3133, 3165), False, 'import re\n'), ((3212, 3270), 're.sub', 're.sub', (['"""(?<![a-zA-Z0-9])\\\\-(?![a-zA-Z0-9])"""', '""""""', 'sentence'], {}), "('(?<![a-zA-Z0-9])\\\\-(?![a-zA-Z0-9])', '', sentence)\n", (3218, 3270), False, 'import re\n'), ((3401, 3443), 're.sub', 're.sub', (['"""@@byeongchang"""', '""" \\\\\\\\"""', 'sentence'], {}), "('@@byeongchang', ' \\\\\\\\', sentence)\n", (3407, 3443), False, 'import re\n'), ((3699, 3738), 'colorlog.info', 'colorlog.info', (["('Tokenize %s data' % key)"], {}), "('Tokenize %s data' % key)\n", (3712, 3738), False, 'import colorlog\n'), ((3760, 3769), 'collections.Counter', 'Counter', ([], {}), '()\n', (3767, 3769), False, 'from collections import Counter\n'), ((4867, 4899), 'colorlog.info', 'colorlog.info', (['"""Get tfidf words"""'], {}), "('Get tfidf words')\n", (4880, 4899), False, 'import colorlog\n'), ((5610, 5656), 'colorlog.info', 'colorlog.info', (['"""Fit and transform train tfidf"""'], {}), "('Fit and transform train tfidf')\n", (5623, 5656), False, 'import colorlog\n'), ((5673, 5691), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (5689, 5691), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((6424, 6473), 'colorlog.info', 'colorlog.info', (['"""Extract tokens from tfidf matrix"""'], {}), "('Extract tokens from tfidf matrix')\n", (6437, 6473), False, 'import colorlog\n'), ((6826, 6871), 'colorlog.info', 'colorlog.info', (["('Create vocabulary %s' % fname)"], {}), "('Create vocabulary %s' % fname)\n", (6839, 6871), False, 'import colorlog\n'), ((8979, 9144), 'colorlog.basicConfig', 'colorlog.basicConfig', ([], {'filename': 'None', 'level': 'logging.INFO', 'format': '"""%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(filename=None, level=logging.INFO, format=\n '%(log_color)s[%(levelname)s:%(asctime)s]%(reset)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (8999, 9144), False, 'import colorlog\n'), ((1750, 1862), 're.compile', 're.compile', (["u'(([☀-➿])|([\\ud83c][\\udf00-\\udfff])|([\\ud83d][\\udc00-\\ude4f])|([\\ud83d][\\ude80-\\udeff]))'"], {}), "(\n u'(([☀-➿])|([\\ud83c][\\udf00-\\udfff])|([\\ud83d][\\udc00-\\ude4f])|([\\ud83d][\\ude80-\\udeff]))'\n )\n", (1760, 1862), False, 'import re\n'), ((2320, 2332), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2329, 2332), False, 'import json\n'), ((3343, 3386), 're.split', 're.split', (['"""[^a-zA-Z0-9#@\\\\\'\\\\-]+"""', 'sentence'], {}), '("[^a-zA-Z0-9#@\\\\\'\\\\-]+", sentence)\n', (3351, 3386), False, 'import re\n'), ((8723, 8761), 'os.path.join', 'os.path.join', (['output_path', '"""train.txt"""'], {}), "(output_path, 'train.txt')\n", (8735, 8761), False, 'import os\n'), ((8819, 8857), 'os.path.join', 'os.path.join', (['output_path', '"""test1.txt"""'], {}), "(output_path, 'test1.txt')\n", (8831, 8857), False, 'import os\n'), ((8915, 8953), 'os.path.join', 'os.path.join', (['output_path', '"""test2.txt"""'], {}), "(output_path, 'test2.txt')\n", (8927, 8953), False, 'import os\n'), ((9181, 9216), 'os.path.exists', 'os.path.exists', (['CAPTION_OUTPUT_PATH'], {}), '(CAPTION_OUTPUT_PATH)\n', (9195, 9216), False, 'import os\n'), ((9223, 9281), 'colorlog.info', 'colorlog.info', (["('Create directory %s' % CAPTION_OUTPUT_PATH)"], {}), "('Create directory %s' % CAPTION_OUTPUT_PATH)\n", (9236, 9281), False, 'import colorlog\n'), ((9289, 9321), 'os.makedirs', 'os.makedirs', (['CAPTION_OUTPUT_PATH'], {}), '(CAPTION_OUTPUT_PATH)\n', (9300, 9321), False, 'import os\n'), ((9332, 9367), 'os.path.exists', 'os.path.exists', (['HASHTAG_OUTPUT_PATH'], {}), '(HASHTAG_OUTPUT_PATH)\n', (9346, 9367), False, 'import os\n'), ((9374, 9432), 'colorlog.info', 'colorlog.info', (["('Create directory %s' % HASHTAG_OUTPUT_PATH)"], {}), "('Create directory %s' % HASHTAG_OUTPUT_PATH)\n", (9387, 9432), False, 'import colorlog\n'), ((9440, 9472), 'os.makedirs', 'os.makedirs', (['HASHTAG_OUTPUT_PATH'], {}), '(HASHTAG_OUTPUT_PATH)\n', (9451, 9472), False, 'import os\n'), ((2031, 2053), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (2050, 2053), False, 'import operator\n'), ((2125, 2147), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2144, 2147), False, 'import operator\n'), ((6017, 6039), 'numpy.argsort', 'np.argsort', (['(-tfidfs[i])'], {}), '(-tfidfs[i])\n', (6027, 6039), True, 'import numpy as np\n'), ((6073, 6092), 'numpy.sort', 'np.sort', (['(-tfidfs[i])'], {}), '(-tfidfs[i])\n', (6080, 6092), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# imagecodecs/setup.py
"""Imagecodecs package setuptools script."""
import sys
import re
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
buildnumber = '' # 'post0'
with open('imagecodecs/_imagecodecs.pyx') as fh:
code = fh.read()
version = re.search(r"__version__ = '(.*?)'", code).groups()[0]
version += ('.' + buildnumber) if buildnumber else ''
description = re.search(r'"""(.*)\.(?:\r\n|\r|\n)', code).groups()[0]
readme = re.search(r'(?:\r\n|\r|\n){2}"""(.*)"""(?:\r\n|\r|\n){2}__version__',
code, re.MULTILINE | re.DOTALL).groups()[0]
readme = '\n'.join([description, '=' * len(description)] +
readme.splitlines()[1:])
license = re.search(r'(# Copyright.*?(?:\r\n|\r|\n))(?:\r\n|\r|\n)+""', code,
re.MULTILINE | re.DOTALL).groups()[0]
license = license.replace('# ', '').replace('#', '')
if 'sdist' in sys.argv:
with open('LICENSE', 'w') as fh:
fh.write(license)
with open('README.rst', 'w') as fh:
fh.write(readme)
sources = [
'imagecodecs/opj_color.c',
'imagecodecs/jpeg_sof3.cpp',
'imagecodecs/_imagecodecs.pyx',
]
include_dirs = [
'imagecodecs',
]
try:
# running in Windows development environment
import _inclib # noqa
libraries = [
'zlib', 'lz4', 'webp', 'png', 'jpeg', 'lzf', 'libbz2',
'snappy', 'zstd_static', 'lzma-static', 'openjp2',
'lcms2']
define_macros = [('WIN32', 1), ('LZMA_API_STATIC', 1),
('OPJ_STATIC', 1), ('OPJ_HAVE_LIBLCMS2', 1),
('CHARLS_STATIC', 1)]
libraries_jpeg12 = ['jpeg12']
if sys.version_info < (3, 5):
# clarls-2.0 not compatible with msvc 9 or 10
libraries_jpegls = []
else:
libraries_jpegls = ['charls']
libraries_zfp = ['zfp']
openmp_args = ['/openmp']
except ImportError:
# this works with most recent Debian
libraries = ['jpeg', 'lz4', 'zstd', 'lzma', 'bz2', 'png', 'webp',
'openjp2', 'lcms2', 'z']
include_dirs.extend(
['/usr/include/openjpeg-2.1',
'/usr/include/openjpeg-2.2',
'/usr/include/openjpeg-2.3'])
define_macros = [('OPJ_HAVE_LIBLCMS2', 1)]
if sys.platform == 'win32':
define_macros.append(('WIN32', 1), ('CHARLS_STATIC', 1))
else:
libraries.append('m')
libraries_jpeg12 = [] # 'jpeg12'
libraries_jpegls = [] # 'charls'
libraries_zfp = [] # 'zfp'
openmp_args = ['-fopenmp']
if 'lzf' not in libraries and 'liblzf' not in libraries:
# use liblzf sources from sdist
sources.extend(['liblzf-3.6/lzf_c.c', 'liblzf-3.6/lzf_d.c'])
include_dirs.append('liblzf-3.6')
class build_ext(_build_ext):
"""Delay import numpy until build."""
def finalize_options(self):
_build_ext.finalize_options(self)
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# Work around "Cython in setup_requires doesn't work"
# https://github.com/pypa/setuptools/issues/1317
try:
import Cython # noqa
ext = '.pyx'
except ImportError:
ext = '.c'
ext_modules = [
Extension(
'imagecodecs._imagecodecs_lite',
['imagecodecs/imagecodecs.c', 'imagecodecs/_imagecodecs_lite' + ext],
include_dirs=['imagecodecs'],
libraries=[] if sys.platform == 'win32' else ['m'],
),
Extension(
'imagecodecs._imagecodecs',
sources,
include_dirs=include_dirs,
libraries=libraries,
define_macros=define_macros,
)
]
if libraries_jpeg12:
ext_modules += [
Extension(
'imagecodecs._jpeg12',
['imagecodecs/_jpeg12' + ext],
include_dirs=['imagecodecs'],
libraries=libraries_jpeg12,
define_macros=[('BITS_IN_JSAMPLE', 12)],
)
]
if libraries_jpegls:
ext_modules += [
Extension(
'imagecodecs._jpegls',
['imagecodecs/_jpegls' + ext],
include_dirs=['imagecodecs'],
libraries=libraries_jpegls,
define_macros=define_macros,
)
]
if libraries_zfp:
ext_modules += [
Extension(
'imagecodecs._zfp',
['imagecodecs/_zfp' + ext],
include_dirs=['imagecodecs'],
libraries=libraries_zfp,
define_macros=define_macros,
extra_compile_args=openmp_args
)
]
setup_args = dict(
name='imagecodecs',
version=version,
description=description,
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://www.lfd.uci.edu/~gohlke/',
python_requires='>=2.7',
install_requires=['numpy>=1.11.3'],
setup_requires=['setuptools>=18.0', 'numpy>=1.11.3'], # , 'cython>=0.29.0'
extras_require={'all': ['matplotlib>=2.2', 'tifffile>=2019.5.22']},
tests_require=['pytest', 'tifffile', 'zstd', 'lz4',
'python-lzf', 'scikit-image'], # zfpy
packages=['imagecodecs'],
package_data={'imagecodecs': ['licenses/*']},
entry_points={
'console_scripts': ['imagecodecs=imagecodecs.__main__:main']},
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
license='BSD',
zip_safe=False,
platforms=['any'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
if '--universal' in sys.argv:
del setup_args['ext_modules']
del setup_args['cmdclass']
del setup_args['package_data']
setup(**setup_args)
|
[
"setuptools.Extension",
"setuptools.setup",
"setuptools.command.build_ext.build_ext.finalize_options",
"numpy.get_include",
"re.search"
] |
[((6352, 6371), 'setuptools.setup', 'setup', ([], {}), '(**setup_args)\n', (6357, 6371), False, 'from setuptools import setup, Extension\n'), ((3348, 3551), 'setuptools.Extension', 'Extension', (['"""imagecodecs._imagecodecs_lite"""', "['imagecodecs/imagecodecs.c', 'imagecodecs/_imagecodecs_lite' + ext]"], {'include_dirs': "['imagecodecs']", 'libraries': "([] if sys.platform == 'win32' else ['m'])"}), "('imagecodecs._imagecodecs_lite', ['imagecodecs/imagecodecs.c', \n 'imagecodecs/_imagecodecs_lite' + ext], include_dirs=['imagecodecs'],\n libraries=[] if sys.platform == 'win32' else ['m'])\n", (3357, 3551), False, 'from setuptools import setup, Extension\n'), ((3593, 3720), 'setuptools.Extension', 'Extension', (['"""imagecodecs._imagecodecs"""', 'sources'], {'include_dirs': 'include_dirs', 'libraries': 'libraries', 'define_macros': 'define_macros'}), "('imagecodecs._imagecodecs', sources, include_dirs=include_dirs,\n libraries=libraries, define_macros=define_macros)\n", (3602, 3720), False, 'from setuptools import setup, Extension\n'), ((2968, 3001), 'setuptools.command.build_ext.build_ext.finalize_options', '_build_ext.finalize_options', (['self'], {}), '(self)\n', (2995, 3001), True, 'from setuptools.command.build_ext import build_ext as _build_ext\n'), ((3828, 3999), 'setuptools.Extension', 'Extension', (['"""imagecodecs._jpeg12"""', "['imagecodecs/_jpeg12' + ext]"], {'include_dirs': "['imagecodecs']", 'libraries': 'libraries_jpeg12', 'define_macros': "[('BITS_IN_JSAMPLE', 12)]"}), "('imagecodecs._jpeg12', ['imagecodecs/_jpeg12' + ext],\n include_dirs=['imagecodecs'], libraries=libraries_jpeg12, define_macros\n =[('BITS_IN_JSAMPLE', 12)])\n", (3837, 3999), False, 'from setuptools import setup, Extension\n'), ((4130, 4289), 'setuptools.Extension', 'Extension', (['"""imagecodecs._jpegls"""', "['imagecodecs/_jpegls' + ext]"], {'include_dirs': "['imagecodecs']", 'libraries': 'libraries_jpegls', 'define_macros': 'define_macros'}), "('imagecodecs._jpegls', ['imagecodecs/_jpegls' + ext],\n include_dirs=['imagecodecs'], libraries=libraries_jpegls, define_macros\n =define_macros)\n", (4139, 4289), False, 'from setuptools import setup, Extension\n'), ((4417, 4599), 'setuptools.Extension', 'Extension', (['"""imagecodecs._zfp"""', "['imagecodecs/_zfp' + ext]"], {'include_dirs': "['imagecodecs']", 'libraries': 'libraries_zfp', 'define_macros': 'define_macros', 'extra_compile_args': 'openmp_args'}), "('imagecodecs._zfp', ['imagecodecs/_zfp' + ext], include_dirs=[\n 'imagecodecs'], libraries=libraries_zfp, define_macros=define_macros,\n extra_compile_args=openmp_args)\n", (4426, 4599), False, 'from setuptools import setup, Extension\n'), ((348, 388), 're.search', 're.search', (['"""__version__ = \'(.*?)\'"""', 'code'], {}), '("__version__ = \'(.*?)\'", code)\n', (357, 388), False, 'import re\n'), ((476, 523), 're.search', 're.search', (['"""""\\"(.*)\\\\.(?:\\\\r\\\\n|\\\\r|\\\\n)"""', 'code'], {}), '(\'"""(.*)\\\\.(?:\\\\r\\\\n|\\\\r|\\\\n)\', code)\n', (485, 523), False, 'import re\n'), ((544, 656), 're.search', 're.search', (['"""(?:\\\\r\\\\n|\\\\r|\\\\n){2}""\\"(.*)""\\"(?:\\\\r\\\\n|\\\\r|\\\\n){2}__version__"""', 'code', '(re.MULTILINE | re.DOTALL)'], {}), '(\'(?:\\\\r\\\\n|\\\\r|\\\\n){2}"""(.*)"""(?:\\\\r\\\\n|\\\\r|\\\\n){2}__version__\',\n code, re.MULTILINE | re.DOTALL)\n', (553, 656), False, 'import re\n'), ((798, 903), 're.search', 're.search', (['"""(# Copyright.*?(?:\\\\r\\\\n|\\\\r|\\\\n))(?:\\\\r\\\\n|\\\\r|\\\\n)+"\\""""', 'code', '(re.MULTILINE | re.DOTALL)'], {}), '(\'(# Copyright.*?(?:\\\\r\\\\n|\\\\r|\\\\n))(?:\\\\r\\\\n|\\\\r|\\\\n)+""\', code, \n re.MULTILINE | re.DOTALL)\n', (807, 903), False, 'import re\n'), ((3104, 3123), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3121, 3123), False, 'import numpy\n')]
|
import os
import re
import sys
import numpy as np
from scipy.io import loadmat
import pandas as pd
DEFAULT_MAT_FILE = './data/nut_data_reps.mat'
DEFAULT_OUT_DIR = './output'
CSV_FILENAME = 'nes-lter-nutrient.csv'
if len(sys.argv) < 3:
in_mat_file = DEFAULT_MAT_FILE
out_dir = DEFAULT_OUT_DIR
else:
assert len(sys.argv) == 3, 'must specify input file and output directory'
in_mat_file, out_dir = sys.argv[1], sys.argv[2]
assert os.path.exists(in_mat_file), 'input mat file {} not found'.format(in_mat_file)
assert os.path.exists(out_dir), 'output directory {} does not exist'.format(out_dir)
# load the mat file
mat = loadmat(in_mat_file, squeeze_me=True)
# construct dataframe
# column renaming map
COL_MAP = {
'Event_Number': 'event_number',
'Event_Number_Niskin': 'event_number_niskin',
'Latitude': 'latitude',
'Longitude': 'longitude',
'Depth': 'depth',
'Nut_a_uM NO2- + NO3-': 'ntra_a',
'Nut_b_uM NO2- + NO3-': 'ntra_b',
'Nut_c_uM NO2- + NO3-': 'ntra_c',
'Nut_a_uM NH4+': 'amon_a',
'Nut_b_uM NH4+': 'amon_b',
'Nut_c_uM NH4+': 'amon_c',
'Nut_a_uM SiO2-': 'slca_a',
'Nut_b_uM SiO2-': 'slca_b',
'Nut_c_uM SiO2-': 'slca_c',
'Nut_a_uM PO43-': 'phos_a',
'Nut_b_uM PO43-': 'phos_b',
'Nut_c_uM PO43-': 'phos_c',
}
# now parse mat file
cols = mat['header_nut']
d = {}
for i, col in enumerate(cols):
d[col] = pd.Series(list(mat['MVCO_nut_reps'][:,i]))
df = pd.DataFrame(d, columns=cols)
# compute datetimes from start date and incorrect start time cols
dt = []
for d, t in zip(df['Start_Date'], df['Start_Time_UTC']):
dt.append(pd.to_datetime('{}T{}Z'.format(d[:10],t[11:])))
dt = pd.Series(dt)
# add to dataframe
df['time (UTC)'] = dt
del df['Start_Date']
del df['Start_Time_UTC']
# rename columns
df = df.rename(columns=COL_MAP)
# just outputting the dataframe using to_csv produces
# extra digits of precision, and applying a single float
# format is not appropriate because the columns have
# varying precision, so do this using string formatting
def convert_series_fixed(series, significant_digits=3):
fmt = r'{{:.{}f}}'.format(significant_digits)
for n in series:
if np.isnan(n):
yield 'NaN'
else:
yield fmt.format(n)
SIGNIFICANT_DIGITS = 3
# apply precision formatting
data_cols = []
for var in ['ntra', 'slca', 'phos', 'amon']:
for replicate in ['a', 'b', 'c']:
colname = '{}_{}'.format(var, replicate)
data_cols.append(colname)
for colname in data_cols:
df[colname] = list(convert_series_fixed(df[colname], SIGNIFICANT_DIGITS))
cols = ['time (UTC)', 'latitude', 'longitude', 'depth', 'event_number'] + data_cols
df = df[cols]
# chop off everything before april 2006
df = df[df['time (UTC)'] >= '2006-04-01']
df.to_csv(CSV_FILENAME, index=None)
|
[
"pandas.DataFrame",
"scipy.io.loadmat",
"os.path.exists",
"numpy.isnan",
"pandas.Series"
] |
[((448, 475), 'os.path.exists', 'os.path.exists', (['in_mat_file'], {}), '(in_mat_file)\n', (462, 475), False, 'import os\n'), ((534, 557), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (548, 557), False, 'import os\n'), ((639, 676), 'scipy.io.loadmat', 'loadmat', (['in_mat_file'], {'squeeze_me': '(True)'}), '(in_mat_file, squeeze_me=True)\n', (646, 676), False, 'from scipy.io import loadmat\n'), ((1415, 1444), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {'columns': 'cols'}), '(d, columns=cols)\n', (1427, 1444), True, 'import pandas as pd\n'), ((1645, 1658), 'pandas.Series', 'pd.Series', (['dt'], {}), '(dt)\n', (1654, 1658), True, 'import pandas as pd\n'), ((2159, 2170), 'numpy.isnan', 'np.isnan', (['n'], {}), '(n)\n', (2167, 2170), True, 'import numpy as np\n')]
|
import datetime
import h5py
import numpy as np
import torch
from torch.utils import data
import torch.nn.functional as F
import soundfile as sf
from transformData import mu_law_encode,quan_mu_law_encode
sampleSize = 16384 * 60
sample_rate = 16384 * 60
class Dataset(data.Dataset):
def __init__(self, listx, rootx,quan=False, transform=None):
self.rootx = rootx
self.quan = quan
print('quan'+str(quan))
self.listx = listx
#self.device=device
self.transform = transform
def __len__(self):
'Denotes the total number of samples'
return len(self.listx)
def __getitem__(self, index):
#print('dataset',np.random.get_state()[1][0])
np.random.seed()
namex = self.listx[index]
x, _ = sf.read(self.rootx + str(namex) + '/0.wav')
assert (_ == 16000)
y, _ = sf.read(self.rootx + str(namex) + '/1.wav')
assert (_ == 16000)
assert (x.shape == y.shape)
x = mu_law_encode(x)
y = mu_law_encode(y)
if (x.shape[0] <= sampleSize):
while (x.shape[0] <= sampleSize):
x = np.concatenate((x, x))
y = np.concatenate((y, y))
assert (x.shape == y.shape)
# print('xy', x.shape, y.shape, namex)
start = np.random.randint(0, x.shape[0] - sampleSize + 1, size=1)[0]
x = x[start:start + sampleSize]
y = y[start:start + sampleSize]
x = torch.from_numpy(x.reshape(1, -1)).type(torch.float32)
y = torch.from_numpy(y.reshape(1, -1)).type(torch.float32)
return namex,x, y
class RandomCrop(object):
def __init__(self, pad,output_size=sample_rate):
self.output_size = output_size
self.pad=pad
def __call__(self, sample):
#print('randomcrop',np.random.get_state()[1][0])
np.random.seed(datetime.datetime.now().second + datetime.datetime.now().microsecond)
x, y = sample['x'], sample['y']
shrink = 0
#startx = np.random.randint(self.pad + shrink * sampleSize, x.shape[-1] - sampleSize - self.pad - shrink * sampleSize)
#print(startx)
#x = x[startx - pad:startx + sampleSize + pad]
#y = y[startx:startx + sampleSize]
l = np.random.uniform(0.25, 0.5)
sp = np.random.uniform(0, 1 - l)
step = np.random.uniform(-0.5, 0.5)
ux = int(sp * sample_rate)
lx = int(l * sample_rate)
# x[ux:ux + lx] = librosa.effects.pitch_shift(x[ux:ux + lx], sample_rate, n_steps=step)
return {'x': x, 'y': y}
class Valtset(data.Dataset):
def __init__(self, listx, rootx,quan=False):
self.rootx = rootx
self.listx = listx
self.quan = quan
#self.device=device
def __len__(self):
'Denotes the total number of samples'
return len(self.listx)
def __getitem__(self, index):
'Generates one sample of data'
np.random.seed()
namex = self.listx[index]
x, _ = sf.read(self.rootx + str(namex) + '/0.wav')
assert (_ == 16000)
y, _ = sf.read(self.rootx + str(namex) + '/1.wav')
assert (_ == 16000)
assert (x.shape == y.shape)
x = mu_law_encode(x)
y = mu_law_encode(y)
if (x.shape[0] <= sampleSize):
while (x.shape[0] <= sampleSize):
x = np.concatenate((x, x))
y = np.concatenate((y, y))
assert (x.shape == y.shape)
start = np.random.randint(0, x.shape[0] - sampleSize + 1, size=1)[0]
x = x[start:start + sampleSize]
y = y[start:start + sampleSize]
x = torch.from_numpy(x.reshape(1, -1)).type(torch.float32)
y = torch.from_numpy(y.reshape(1, -1)).type(torch.float32)
return namex,x,y
class Testset(data.Dataset):
def __init__(self, listx, rootx,quan=False):
self.rootx = rootx
self.listx = listx
self.quan = quan
#self.device=device
def __len__(self):
'Denotes the total number of samples'
return len(self.listx)
def __getitem__(self, index):
'Generates one sample of data'
namex = self.listx[index]
x, _ = sf.read(self.rootx + str(namex) + '/0.wav')
assert (_ == 16000)
y, _ = sf.read(self.rootx + str(namex) + '/1.wav')
assert (_ == 16000)
assert (x.shape == y.shape)
x = mu_law_encode(x)
y = mu_law_encode(y)
x = torch.from_numpy(x.reshape(1, -1)).type(torch.float32)
y = torch.from_numpy(y.reshape(1, -1)).type(torch.float32)
return namex,x,y
|
[
"numpy.random.uniform",
"numpy.random.seed",
"transformData.mu_law_encode",
"numpy.random.randint",
"datetime.datetime.now",
"numpy.concatenate"
] |
[((722, 738), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (736, 738), True, 'import numpy as np\n'), ((997, 1013), 'transformData.mu_law_encode', 'mu_law_encode', (['x'], {}), '(x)\n', (1010, 1013), False, 'from transformData import mu_law_encode, quan_mu_law_encode\n'), ((1026, 1042), 'transformData.mu_law_encode', 'mu_law_encode', (['y'], {}), '(y)\n', (1039, 1042), False, 'from transformData import mu_law_encode, quan_mu_law_encode\n'), ((2270, 2298), 'numpy.random.uniform', 'np.random.uniform', (['(0.25)', '(0.5)'], {}), '(0.25, 0.5)\n', (2287, 2298), True, 'import numpy as np\n'), ((2312, 2339), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1 - l)'], {}), '(0, 1 - l)\n', (2329, 2339), True, 'import numpy as np\n'), ((2355, 2383), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (2372, 2383), True, 'import numpy as np\n'), ((2953, 2969), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (2967, 2969), True, 'import numpy as np\n'), ((3227, 3243), 'transformData.mu_law_encode', 'mu_law_encode', (['x'], {}), '(x)\n', (3240, 3243), False, 'from transformData import mu_law_encode, quan_mu_law_encode\n'), ((3256, 3272), 'transformData.mu_law_encode', 'mu_law_encode', (['y'], {}), '(y)\n', (3269, 3272), False, 'from transformData import mu_law_encode, quan_mu_law_encode\n'), ((4426, 4442), 'transformData.mu_law_encode', 'mu_law_encode', (['x'], {}), '(x)\n', (4439, 4442), False, 'from transformData import mu_law_encode, quan_mu_law_encode\n'), ((4455, 4471), 'transformData.mu_law_encode', 'mu_law_encode', (['y'], {}), '(y)\n', (4468, 4471), False, 'from transformData import mu_law_encode, quan_mu_law_encode\n'), ((1323, 1380), 'numpy.random.randint', 'np.random.randint', (['(0)', '(x.shape[0] - sampleSize + 1)'], {'size': '(1)'}), '(0, x.shape[0] - sampleSize + 1, size=1)\n', (1340, 1380), True, 'import numpy as np\n'), ((3502, 3559), 'numpy.random.randint', 'np.random.randint', (['(0)', '(x.shape[0] - sampleSize + 1)'], {'size': '(1)'}), '(0, x.shape[0] - sampleSize + 1, size=1)\n', (3519, 3559), True, 'import numpy as np\n'), ((1149, 1171), 'numpy.concatenate', 'np.concatenate', (['(x, x)'], {}), '((x, x))\n', (1163, 1171), True, 'import numpy as np\n'), ((1192, 1214), 'numpy.concatenate', 'np.concatenate', (['(y, y)'], {}), '((y, y))\n', (1206, 1214), True, 'import numpy as np\n'), ((3379, 3401), 'numpy.concatenate', 'np.concatenate', (['(x, x)'], {}), '((x, x))\n', (3393, 3401), True, 'import numpy as np\n'), ((3422, 3444), 'numpy.concatenate', 'np.concatenate', (['(y, y)'], {}), '((y, y))\n', (3436, 3444), True, 'import numpy as np\n'), ((1881, 1904), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1902, 1904), False, 'import datetime\n'), ((1914, 1937), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1935, 1937), False, 'import datetime\n')]
|
import numpy as np
import os
import sys
import tensorflow as tf
from imutils.video import VideoStream
import cv2
import imutils
import time
from imutils.video import FPS
from sklearn.metrics import pairwise
import copy
import pathlib
from collections import defaultdict
colors = np.random.uniform(0, 255, size=(100, 3))
font = cv2.FONT_HERSHEY_SIMPLEX
def estimate_stepping(indexesPersons , boxesPersons , image_np , flagPerson , areaPerson , areaDetails):
pedes_present = 0
details = []
for j in indexesPersons:
i = j[0]
xmin, ymin, w, h = boxesPersons[i]
curr_area = w * h
if curr_area > 9000:
areaPerson = curr_area
pedes_present = 1
flagPerson = 5
details.append([xmin, ymin, w, h])
if pedes_present == 0:
flagPerson = flagPerson - 1
else:
areaPerson = 0
for box in details:
xmin, ymin, w, h = box
boxArea = w * h
cv2.rectangle(image_np, (xmin, ymin), (xmin + w, ymin + h), (0,0,0), 3)
cv2.putText(image_np, str(boxArea), (xmin, ymin), font , 1.2, [0,0,0], 2)
if boxArea > areaPerson:
areaPerson = boxArea
areaDetails = details
if flagPerson > 0:
for box in areaDetails:
xmin, ymin, w, h = box
cv2.rectangle(image_np, (xmin, ymin), (xmin + w, ymin + h), (0,0,0), 3)
cv2.putText(image_np, str(areaPerson), (xmin, ymin), font , 1.2, [0,0,0], 2)
if areaPerson > 15000:
cv2.putText(image_np,"STOP IT !!! DON'T HIT THE PERSON " ,(270,120), font, 1.2,(0,0,255),2,cv2.LINE_AA)
else:
cv2.putText(image_np,"Drive slowly, people are around " ,(290,120), font, 1.2,(0,255,255),2,cv2.LINE_AA)
return image_np , flagPerson , areaPerson , areaDetails
# a.mp4 100*25 803*25(inside cars) 819*25 842*25 913*25
# k.mp4(30) 22(don't use, night) 83(don't night)
# m.mp4(24) 6 25
# o.mp4(30) 2
# p.mp4(30) 3
# q.mp4(30) 20 0
|
[
"numpy.random.uniform",
"cv2.putText",
"cv2.rectangle"
] |
[((280, 320), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(255)'], {'size': '(100, 3)'}), '(0, 255, size=(100, 3))\n', (297, 320), True, 'import numpy as np\n'), ((864, 937), 'cv2.rectangle', 'cv2.rectangle', (['image_np', '(xmin, ymin)', '(xmin + w, ymin + h)', '(0, 0, 0)', '(3)'], {}), '(image_np, (xmin, ymin), (xmin + w, ymin + h), (0, 0, 0), 3)\n', (877, 937), False, 'import cv2\n'), ((1167, 1240), 'cv2.rectangle', 'cv2.rectangle', (['image_np', '(xmin, ymin)', '(xmin + w, ymin + h)', '(0, 0, 0)', '(3)'], {}), '(image_np, (xmin, ymin), (xmin + w, ymin + h), (0, 0, 0), 3)\n', (1180, 1240), False, 'import cv2\n'), ((1349, 1463), 'cv2.putText', 'cv2.putText', (['image_np', '"""STOP IT !!! DON\'T HIT THE PERSON """', '(270, 120)', 'font', '(1.2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(image_np, "STOP IT !!! DON\'T HIT THE PERSON ", (270, 120), font,\n 1.2, (0, 0, 255), 2, cv2.LINE_AA)\n', (1360, 1463), False, 'import cv2\n'), ((1464, 1579), 'cv2.putText', 'cv2.putText', (['image_np', '"""Drive slowly, people are around """', '(290, 120)', 'font', '(1.2)', '(0, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), "(image_np, 'Drive slowly, people are around ', (290, 120), font,\n 1.2, (0, 255, 255), 2, cv2.LINE_AA)\n", (1475, 1579), False, 'import cv2\n')]
|
import os
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from bpe import Config
from bpe.agent import agents_bpe
from bpe.dataset.datasets_bpe import SARADataset
from bpe.functional.utils import cycle, move_to_device
from bpe.model import networks_bpe
torch.backends.cudnn.benchmark = True
def get_markdown_table(dict_data=None):
hps = ['| key | value |', '| --- | --- |']
if dict_data:
for key, value in dict_data.items():
value_str = str(value).replace('\n', '<br/>')
hps.append(f'| {key} | {value_str} |')
return "\r\n".join(hps)
def add_hps_using(config, train_tb):
white_list = set(["triplet_distance", "similarity_distance_metric", "dataset"])
config_all_dict = {k: v for k, v in Config.__dict__.items()}
config_all_dict.update(config.__dict__.copy())
config_logging_dict = {k: v for k, v in config_all_dict.items() if type(v) in [int, float, bool] or k in white_list}
train_tb.add_text('hyperparams', get_markdown_table(config_logging_dict))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', type=str, default='bpe', help='Experiment name')
parser.add_argument('-g', '--gpu_ids', type=str, default=0, required=False, help="specify gpu ids")
parser.add_argument('--dataset', choices=["unity", "mixamo"], default="unity",
help="whether to use one decoder per one body part")
parser.add_argument('--data_dir', default="", required=True, help="path to `SARA_released` dataset dir")
# Experiments argumen ts
parser.add_argument('--use_footvel_loss', action='store_true', help="use footvel loss")
parser.add_argument('--use_invisibility_aug', action='store_true',
help="change random joints' visibility to invisible during training")
parser.add_argument('--use_all_joints_on_each_bp', action='store_true',
help="using all joints on each body part as input, as opposed to particular body part")
parser.add_argument('--triplet_distance', choices=["cosine", "l2"], default=None)
parser.add_argument('--similarity_distance_metric', choices=["cosine", "l2"], default="cosine")
parser.add_argument('--sim_loss_weight', type=float, default=None)
parser.add_argument('--norecon', action='store_true')
parser.add_argument('--logdir', type=str, default=None, help="change model/logdir")
args = parser.parse_args()
config = Config(args)
# create the network
net = networks_bpe.AutoEncoder_bpe(config)
# print(net)
net = torch.nn.DataParallel(net)
net.to(config.device)
# create tensorboard writer
summary_writer = SummaryWriter(os.path.join(config.log_dir, 'train.events'))
add_hps_using(config, summary_writer)
# create dataloader
train_dataset = SARADataset('train', config)
val_dataset = SARADataset('test', config)
train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers,
worker_init_fn=lambda _: np.random.seed(), pin_memory=True)
val_loader = DataLoader(val_dataset, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers,
worker_init_fn=lambda _: np.random.seed(), pin_memory=True)
# validation is performed in the middle of training epoch
# as a single step, rather then a full val data pass
val_loader = cycle(val_loader)
# create training agent
tr_agent = agents_bpe.Agent3x_bpe(config, net)
clock = tr_agent.clock
summary_writer.add_scalar('learning_rate', config.lr, 0)
min_val_loss = np.inf
# start training
for e in range(config.nr_epochs):
epoch_val_loss = []
# begin iteration
pbar = tqdm(train_loader)
for b, data_input in enumerate(pbar):
# training
# move data to appropriate device
data_input = move_to_device(data_input, config.device, non_blocking=True)
# train step
losses = tr_agent.train_func(data_input)
losses_values = {k: v.item() for k, v in losses.items()}
# record loss to tensorboard
for k, v in losses_values.items():
summary_writer.add_scalar("train/" + k, v, clock.step)
summary_writer.add_scalar("train/total_loss", sum(losses_values.values()), clock.step)
pbar.set_description("EPOCH[{}][{}/{}]".format(e, b, len(train_loader)))
# validation step
if clock.step % config.val_frequency == 0:
data_input_val = next(val_loader)
# move data to appropriate device
data_input_val = move_to_device(data_input_val, config.device)
losses = tr_agent.val_func(data_input_val)
losses_values = {k: v.item() for k, v in losses.items()}
for k, v in losses_values.items():
summary_writer.add_scalar("valid/" + k, v, clock.step)
summary_writer.add_scalar("valid/total_loss", sum(losses_values.values()), clock.step)
epoch_val_loss.append(sum(losses_values.values()))
if clock.lr_minibatch >= (len(pbar) // config.lr_update_frequency_per_epoch) - 1:
clock.lr_step_update()
tr_agent.update_learning_rate()
clock.lr_minibatch = 0
summary_writer.add_scalar('learning_rate', tr_agent.optimizer.param_groups[-1]['lr'], clock.step + 1)
clock.tick()
if clock.epoch % config.save_frequency == 0:
tr_agent.save_network()
tr_agent.save_network('latest.pth.tar')
mean_epoch_val_loss = sum(epoch_val_loss) / len(epoch_val_loss)
if min_val_loss > mean_epoch_val_loss:
print("saving model model_best.pth.tar")
tr_agent.save_network('model_best.pth.tar')
min_val_loss = mean_epoch_val_loss
clock.tock()
# close tensorboard writers
if summary_writer is not None:
summary_writer.close()
if __name__ == '__main__':
main()
|
[
"tqdm.tqdm",
"numpy.random.seed",
"bpe.dataset.datasets_bpe.SARADataset",
"argparse.ArgumentParser",
"bpe.agent.agents_bpe.Agent3x_bpe",
"bpe.Config",
"bpe.functional.utils.cycle",
"bpe.Config.__dict__.items",
"bpe.model.networks_bpe.AutoEncoder_bpe",
"torch.nn.DataParallel",
"bpe.functional.utils.move_to_device",
"os.path.join"
] |
[((1162, 1187), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1185, 1187), False, 'import argparse\n'), ((2575, 2587), 'bpe.Config', 'Config', (['args'], {}), '(args)\n', (2581, 2587), False, 'from bpe import Config\n'), ((2624, 2660), 'bpe.model.networks_bpe.AutoEncoder_bpe', 'networks_bpe.AutoEncoder_bpe', (['config'], {}), '(config)\n', (2652, 2660), False, 'from bpe.model import networks_bpe\n'), ((2688, 2714), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (2709, 2714), False, 'import torch\n'), ((2942, 2970), 'bpe.dataset.datasets_bpe.SARADataset', 'SARADataset', (['"""train"""', 'config'], {}), "('train', config)\n", (2953, 2970), False, 'from bpe.dataset.datasets_bpe import SARADataset\n'), ((2989, 3016), 'bpe.dataset.datasets_bpe.SARADataset', 'SARADataset', (['"""test"""', 'config'], {}), "('test', config)\n", (3000, 3016), False, 'from bpe.dataset.datasets_bpe import SARADataset\n'), ((3571, 3588), 'bpe.functional.utils.cycle', 'cycle', (['val_loader'], {}), '(val_loader)\n', (3576, 3588), False, 'from bpe.functional.utils import cycle, move_to_device\n'), ((3633, 3668), 'bpe.agent.agents_bpe.Agent3x_bpe', 'agents_bpe.Agent3x_bpe', (['config', 'net'], {}), '(config, net)\n', (3655, 3668), False, 'from bpe.agent import agents_bpe\n'), ((2809, 2853), 'os.path.join', 'os.path.join', (['config.log_dir', '"""train.events"""'], {}), "(config.log_dir, 'train.events')\n", (2821, 2853), False, 'import os\n'), ((3924, 3942), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (3928, 3942), False, 'from tqdm import tqdm\n'), ((860, 883), 'bpe.Config.__dict__.items', 'Config.__dict__.items', ([], {}), '()\n', (881, 883), False, 'from bpe import Config\n'), ((4084, 4144), 'bpe.functional.utils.move_to_device', 'move_to_device', (['data_input', 'config.device'], {'non_blocking': '(True)'}), '(data_input, config.device, non_blocking=True)\n', (4098, 4144), False, 'from bpe.functional.utils import cycle, move_to_device\n'), ((3193, 3209), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3207, 3209), True, 'import numpy as np\n'), ((3399, 3415), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3413, 3415), True, 'import numpy as np\n'), ((4858, 4903), 'bpe.functional.utils.move_to_device', 'move_to_device', (['data_input_val', 'config.device'], {}), '(data_input_val, config.device)\n', (4872, 4903), False, 'from bpe.functional.utils import cycle, move_to_device\n')]
|
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils.extmath import cartesian
# import matplotlib.pyplot as plt
class MPHP:
'''Multidimensional Periodic Hawkes Process
Captures rates with periodic component depending on the day of week
'''
def __init__(self, alpha=[[0.5]], mu=[0.1], mu_day=np.ones(7), omega1=5.0, omega2=5.0, verbose=False):
'''params should be of form:
alpha: numpy.array((u,u)), mu: numpy.array((,u)), omega: float'''
self.data = []
self.alpha, self.mu, self.mu_day, self.omega1, self.omega2 = np.array(alpha), np.array(mu), np.array(mu_day), omega1, omega2
self.dim = self.mu.shape[0]
# self.check_stability(verbose)
def check_stability(self, verbose):
''' check stability of process (max alpha eigenvalue < 1)'''
w, v = np.linalg.eig(self.alpha)
me = np.amax(np.abs(w))
# if verbose:
# print('Max eigenvalue: %1.5f' % me)
# if me >= 1.:
# print('(WARNING) Unstable.')
def generate_seq(self, window=np.inf, N_events=np.inf, last_rates=[], seq=None):
'''Generate a sequence based on mu, alpha, omega values.
Uses Ogata's thinning method, with some speedups, noted below
Specify either window or N_events
window: time period for which to simulate (in days)
N_events: generate N_events events
Start simulation from previous history
last_rates: np.array of last rates
seq: for last event, np.array([time, event type])
'''
mu_day_max = np.max(self.mu_day)
last_rates_given = True
if len(last_rates) == 0:
seq = []
M = np.sum(self.mu)
Dstar = np.sum(self.mu_day)
while True:
s = np.random.exponential(scale=1. / M)
day = int(np.floor(s) % 7)
# attribute (weighted random sample, since sum(self.mu)==M)
U = np.random.uniform()
if U <= self.mu_day[day]/Dstar:
event_type = np.random.choice(np.arange(self.dim), 1, p=(self.mu / M))
seq.append([s, event_type])
break
last_rates_given = False
last_rates = self.mu * self.mu_day[day]
last_day = day
horizon = window
else:
seq = [tuple(seq)]
s = seq[0][0]
horizon = s + window
last_day = int(np.floor(seq[0][0]) % 7)
last_rates_given = True
num_events = 1
while True:
tj, uj = seq[-1][0], int(seq[-1][1]) #tj, uj - time, type of last event
# recalculate M (inclusive of last event
M = mu_day_max*np.sum(self.mu) + np.sum(last_rates) + self.omega1 * np.sum(self.alpha[:, uj])
# generate new event
s += np.random.exponential(scale=1. / M)
day = int(np.floor(s) % 7)
# calc rates at time s (use trick to take advantage of rates at last event)
rates = self.mu*self.mu_day[day] + np.exp(-self.omega2*(s - tj)) * \
(self.omega1*self.alpha[:, uj].flatten() + last_rates - self.mu*self.mu_day[last_day])
# attribution/rejection test
# handle attribution and thinning in one step as weighted random sample
diff = M - np.sum(rates)
event_type = np.random.choice(np.arange(self.dim + 1), 1,
p=(np.append(rates, diff) / M))
if event_type < self.dim:
seq.append([s, event_type])
last_day = day
last_rates = rates.copy()
num_events += 1
# if past horizon, done
if (s >= horizon) or (num_events >= N_events):
if last_rates_given:
seq.pop(0)
seq = np.array(seq)
if len(seq) > 0:
seq = seq[seq[:, 0] < horizon]
return seq
def EM(self, Ahat, mhat, mhatday, omega1, omega2, seq=[], a=np.ones(7), smx=[], tmx=[],
Tm=-1, maxiter=100, epsilon=0.01, verbose=True, return_latent=False):
'''implements MAP EM.
seq[0, :] Time of event in days (float)
seq[1, :] Event type, indexed 0 to dim-1
Optional regularization:
- On excitation matrix Ahat:
`smx` and `tmx` matrix (shape=(dim,dim)).
In general, the `tmx` matrix is a pseudocount of parent events from column j,
and the `smx` matrix is a pseudocount of child events from column j -> i,
however, for more details/usage see https://stmorse.github.io/docs/orc-thesis.pdf
- On day of week parameter mhatday:
a[i] is a pseudocount of events on the ith day of the week
a[i] = 1 corresponds to no regularization for ith day
'''
# if no sequence passed, uses class instance data:
if len(seq) == 0:
seq = self.data
N = len(seq)
day = (np.floor(seq[:, 0]) % 7).astype(int)
self.dim = mhat.shape[0]
Tm = float(seq[-1, 0]) if Tm < 0 else float(Tm)
sequ = seq[:, 1].astype(int)
smx = np.array(smx); tmx = np.array(tmx)
p_ii = np.random.uniform(0.01, 0.99, size=N)
p_ij = np.random.uniform(0.01, 0.99, size=(N, N))
# PRECOMPUTATIONS
# diffs[i,j] = t_i - t_j for j < i (o.w. zero)
diffs = pairwise_distances(np.array([seq[:, 0]]).T, metric='euclidean')
diffs[np.triu_indices(N)] = 0
# kern[i,j] = omega1*np.exp(-omega2*diffs[i,j])
kern = omega1 * np.exp(-omega2 * diffs)
colidx = np.tile(sequ.reshape((1, N)), (N, 1))
rowidx = np.tile(sequ.reshape((N, 1)), (1, N))
# approx of Gt sum in a_{uu'} denom **
seqcnts = np.array([len(np.where(sequ == i)[0]) for i in range(self.dim)])
seqcnts = np.tile(seqcnts, (self.dim, 1))
# returns sum of all pmat vals where u_i=a, u_j=b
# *IF* pmat upper tri set to zero, this is
# \sum_{u_i=u}\sum_{u_j=u', j<i} p_{ij}
def sum_pij(a, b):
c = cartesian([np.where(seq[:, 1] == int(a))[0], np.where(seq[:, 1] == int(b))[0]])
return np.sum(p_ij[c[:, 0], c[:, 1]])
vp = np.vectorize(sum_pij)
# \int_0^t g(t') dt' with g(t)=we^{-wt}
# def G(t): return 1 - np.exp(-omega * t)
# vg = np.vectorize(G)
# Gdenom = np.array([np.sum(vg(diffs[-1,np.where(seq[:,1]==i)])) for i in range(dim)])
k = 0
old_LL = -10000
while k < maxiter:
Auu = Ahat[rowidx, colidx] #ahat[i, j] = a_ui, uj
ag = np.multiply(Auu, kern)
ag[np.triu_indices(N)] = 0
# compute m_{u_i}
self.mu = mhat[sequ]
# compute delta_{d_i}
self.mu_day = mhatday[day]
# compute rates of u_i at time i for all times i
rates = self.mu*self.mu_day + np.sum(ag, axis=1)
# compute matrix of p_ii and p_ij (keep separate for later computations)
p_ij = np.divide(ag, np.tile(np.array([rates]).T, (1, N)))
p_ii = np.divide(self.mu*self.mu_day, rates)
# compute mhat: mhat_u = (\sum_{u_i=u} p_ii) / T
mhat = np.array([np.sum(p_ii[np.where(seq[:, 1] == i)])
for i in range(self.dim)]) / Tm
mhatday = np.array([np.divide(np.sum(p_ii[np.where(day == i)]) + a[i] - 1,
np.sum(p_ii)/7 + a[i] - 1) for i in range(7)])
# ahat_{u,u'} = (\sum_{u_i=u}\sum_{u_j=u', j<i} p_ij) / \sum_{u_j=u'} G(T-t_j)
# approximate with G(T-T_j) = 1
if len(smx) > 0:
Ahat = np.divide(np.fromfunction(lambda i, j: vp(i, j), (self.dim, self.dim)) + (smx - 1),
seqcnts + tmx)
else:
Ahat = np.divide(np.fromfunction(lambda i, j: vp(i, j), (self.dim, self.dim)),
seqcnts)
if k % 10 == 0:
term1 = np.sum(np.log(rates))
term2 = Tm * np.sum(mhat)
term3 = np.sum(np.sum(Ahat[u, int(seq[j, 1])] for j in range(N)) for u in range(self.dim))
new_LL = (1./N) * (term1 - term2 - term3)
#new_LL = (1. / N) * (term1 - term3)
if abs(new_LL - old_LL) <= epsilon:
if verbose:
print('Reached stopping criterion. (Old: %1.3f New: %1.3f)' % (old_LL, new_LL))
self.alpha = Ahat
self.mu = mhat
self.mu_day = mhatday
if return_latent:
return Ahat, mhat, mhatday, p_ii, p_ij
return Ahat, mhat, mhatday
if verbose:
print('After ITER %d (old: %1.3f new: %1.3f)' % (k, old_LL, new_LL))
print(' terms %1.4f, %1.4f, %1.4f' % (term1, term2, term3))
old_LL = new_LL
k += 1
if verbose:
print('Reached max iter (%d).' % maxiter)
self.alpha = Ahat
self.mu = mhat
self.mu_day = mhatday
if return_latent:
return Ahat, mhat, mhatday, p_ii, p_ij
return Ahat, mhat, mhatday
def get_ll(self, omega1, omega2, ahat, mhat, mhatday, seq = [], Tm = 0):
if len(seq) == 0:
seq = self.data
N = len(seq)
day = (np.floor(seq[:, 0]) % 7).astype(int)
if Tm==0:
Tm = np.ceil(seq[-1, 0])
sequ = seq[:, 1].astype(int)
dim = mhat.shape[0]
# diffs[i,j] = t_i - t_j for j < i (o.w. zero)
diffs = pairwise_distances(np.array([seq[:, 0]]).T, metric='euclidean')
diffs[np.triu_indices(N)] = 0
# kern[i,j] = omega*np.exp(-omega*diffs[i,j])
kern = omega1 * np.exp(-omega2 * diffs)
colidx = np.tile(sequ.reshape((1, N)), (N, 1))
rowidx = np.tile(sequ.reshape((N, 1)), (1, N))
Auu = ahat[rowidx, colidx]
ag = np.multiply(Auu, kern)
ag[np.triu_indices(N)] = 0
# compute total rates of u_i at time i
rates = mhat[sequ]*mhatday[day] + np.sum(ag, axis=1)
term1 = np.sum(np.log(rates))
term2 = Tm * np.sum(mhat)
term3 = np.sum(np.sum(ahat[u, int(seq[j, 1])] for j in range(N)) for u in range(dim))
loglik = (1./N) * (term1 - term2 - term3)
return loglik
def get_rate(self, ct, d, seq=None):
# return rate at time ct in dimension d WHERE ct is time of an event!
if len(seq) == 0:
seq = np.array(self.data)
else:
seq = np.array(seq)
if not np.all(ct > seq[:, 0]):
seq = seq[seq[:, 0] < ct]
day = int(np.floor(ct) % 7)
return self.mu[d]*self.mu_day[day] + \
np.sum([self.alpha[d, int(j)] * self.omega1 * np.exp(-self.omega2 * (ct - t)) for t, j in seq])
|
[
"numpy.abs",
"numpy.sum",
"numpy.floor",
"numpy.random.exponential",
"numpy.ones",
"numpy.arange",
"numpy.tile",
"numpy.exp",
"numpy.multiply",
"numpy.linalg.eig",
"numpy.append",
"numpy.max",
"numpy.divide",
"numpy.vectorize",
"numpy.ceil",
"numpy.triu_indices",
"numpy.all",
"numpy.random.uniform",
"numpy.log",
"numpy.where",
"numpy.array"
] |
[((354, 364), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (361, 364), True, 'import numpy as np\n'), ((875, 900), 'numpy.linalg.eig', 'np.linalg.eig', (['self.alpha'], {}), '(self.alpha)\n', (888, 900), True, 'import numpy as np\n'), ((1641, 1660), 'numpy.max', 'np.max', (['self.mu_day'], {}), '(self.mu_day)\n', (1647, 1660), True, 'import numpy as np\n'), ((4193, 4203), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (4200, 4203), True, 'import numpy as np\n'), ((5335, 5348), 'numpy.array', 'np.array', (['smx'], {}), '(smx)\n', (5343, 5348), True, 'import numpy as np\n'), ((5356, 5369), 'numpy.array', 'np.array', (['tmx'], {}), '(tmx)\n', (5364, 5369), True, 'import numpy as np\n'), ((5386, 5423), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.99)'], {'size': 'N'}), '(0.01, 0.99, size=N)\n', (5403, 5423), True, 'import numpy as np\n'), ((5439, 5481), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.99)'], {'size': '(N, N)'}), '(0.01, 0.99, size=(N, N))\n', (5456, 5481), True, 'import numpy as np\n'), ((6048, 6079), 'numpy.tile', 'np.tile', (['seqcnts', '(self.dim, 1)'], {}), '(seqcnts, (self.dim, 1))\n', (6055, 6079), True, 'import numpy as np\n'), ((6424, 6445), 'numpy.vectorize', 'np.vectorize', (['sum_pij'], {}), '(sum_pij)\n', (6436, 6445), True, 'import numpy as np\n'), ((10328, 10350), 'numpy.multiply', 'np.multiply', (['Auu', 'kern'], {}), '(Auu, kern)\n', (10339, 10350), True, 'import numpy as np\n'), ((610, 625), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (618, 625), True, 'import numpy as np\n'), ((627, 639), 'numpy.array', 'np.array', (['mu'], {}), '(mu)\n', (635, 639), True, 'import numpy as np\n'), ((641, 657), 'numpy.array', 'np.array', (['mu_day'], {}), '(mu_day)\n', (649, 657), True, 'import numpy as np\n'), ((922, 931), 'numpy.abs', 'np.abs', (['w'], {}), '(w)\n', (928, 931), True, 'import numpy as np\n'), ((1765, 1780), 'numpy.sum', 'np.sum', (['self.mu'], {}), '(self.mu)\n', (1771, 1780), True, 'import numpy as np\n'), ((1801, 1820), 'numpy.sum', 'np.sum', (['self.mu_day'], {}), '(self.mu_day)\n', (1807, 1820), True, 'import numpy as np\n'), ((2956, 2992), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1.0 / M)'}), '(scale=1.0 / M)\n', (2977, 2992), True, 'import numpy as np\n'), ((5659, 5677), 'numpy.triu_indices', 'np.triu_indices', (['N'], {}), '(N)\n', (5674, 5677), True, 'import numpy as np\n'), ((5764, 5787), 'numpy.exp', 'np.exp', (['(-omega2 * diffs)'], {}), '(-omega2 * diffs)\n', (5770, 5787), True, 'import numpy as np\n'), ((6380, 6410), 'numpy.sum', 'np.sum', (['p_ij[c[:, 0], c[:, 1]]'], {}), '(p_ij[c[:, 0], c[:, 1]])\n', (6386, 6410), True, 'import numpy as np\n'), ((6819, 6841), 'numpy.multiply', 'np.multiply', (['Auu', 'kern'], {}), '(Auu, kern)\n', (6830, 6841), True, 'import numpy as np\n'), ((7320, 7359), 'numpy.divide', 'np.divide', (['(self.mu * self.mu_day)', 'rates'], {}), '(self.mu * self.mu_day, rates)\n', (7329, 7359), True, 'import numpy as np\n'), ((9797, 9816), 'numpy.ceil', 'np.ceil', (['seq[-1, 0]'], {}), '(seq[-1, 0])\n', (9804, 9816), True, 'import numpy as np\n'), ((10040, 10058), 'numpy.triu_indices', 'np.triu_indices', (['N'], {}), '(N)\n', (10055, 10058), True, 'import numpy as np\n'), ((10143, 10166), 'numpy.exp', 'np.exp', (['(-omega2 * diffs)'], {}), '(-omega2 * diffs)\n', (10149, 10166), True, 'import numpy as np\n'), ((10362, 10380), 'numpy.triu_indices', 'np.triu_indices', (['N'], {}), '(N)\n', (10377, 10380), True, 'import numpy as np\n'), ((10476, 10494), 'numpy.sum', 'np.sum', (['ag'], {'axis': '(1)'}), '(ag, axis=1)\n', (10482, 10494), True, 'import numpy as np\n'), ((10519, 10532), 'numpy.log', 'np.log', (['rates'], {}), '(rates)\n', (10525, 10532), True, 'import numpy as np\n'), ((10555, 10567), 'numpy.sum', 'np.sum', (['mhat'], {}), '(mhat)\n', (10561, 10567), True, 'import numpy as np\n'), ((10899, 10918), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (10907, 10918), True, 'import numpy as np\n'), ((10951, 10964), 'numpy.array', 'np.array', (['seq'], {}), '(seq)\n', (10959, 10964), True, 'import numpy as np\n'), ((10980, 11002), 'numpy.all', 'np.all', (['(ct > seq[:, 0])'], {}), '(ct > seq[:, 0])\n', (10986, 11002), True, 'import numpy as np\n'), ((1866, 1902), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1.0 / M)'}), '(scale=1.0 / M)\n', (1887, 1902), True, 'import numpy as np\n'), ((2042, 2061), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2059, 2061), True, 'import numpy as np\n'), ((3454, 3467), 'numpy.sum', 'np.sum', (['rates'], {}), '(rates)\n', (3460, 3467), True, 'import numpy as np\n'), ((3511, 3534), 'numpy.arange', 'np.arange', (['(self.dim + 1)'], {}), '(self.dim + 1)\n', (3520, 3534), True, 'import numpy as np\n'), ((4000, 4013), 'numpy.array', 'np.array', (['seq'], {}), '(seq)\n', (4008, 4013), True, 'import numpy as np\n'), ((5600, 5621), 'numpy.array', 'np.array', (['[seq[:, 0]]'], {}), '([seq[:, 0]])\n', (5608, 5621), True, 'import numpy as np\n'), ((6857, 6875), 'numpy.triu_indices', 'np.triu_indices', (['N'], {}), '(N)\n', (6872, 6875), True, 'import numpy as np\n'), ((7124, 7142), 'numpy.sum', 'np.sum', (['ag'], {'axis': '(1)'}), '(ag, axis=1)\n', (7130, 7142), True, 'import numpy as np\n'), ((9981, 10002), 'numpy.array', 'np.array', (['[seq[:, 0]]'], {}), '([seq[:, 0]])\n', (9989, 10002), True, 'import numpy as np\n'), ((11061, 11073), 'numpy.floor', 'np.floor', (['ct'], {}), '(ct)\n', (11069, 11073), True, 'import numpy as np\n'), ((2554, 2573), 'numpy.floor', 'np.floor', (['seq[0][0]'], {}), '(seq[0][0])\n', (2562, 2573), True, 'import numpy as np\n'), ((2844, 2862), 'numpy.sum', 'np.sum', (['last_rates'], {}), '(last_rates)\n', (2850, 2862), True, 'import numpy as np\n'), ((2879, 2904), 'numpy.sum', 'np.sum', (['self.alpha[:, uj]'], {}), '(self.alpha[:, uj])\n', (2885, 2904), True, 'import numpy as np\n'), ((3014, 3025), 'numpy.floor', 'np.floor', (['s'], {}), '(s)\n', (3022, 3025), True, 'import numpy as np\n'), ((3168, 3199), 'numpy.exp', 'np.exp', (['(-self.omega2 * (s - tj))'], {}), '(-self.omega2 * (s - tj))\n', (3174, 3199), True, 'import numpy as np\n'), ((5158, 5177), 'numpy.floor', 'np.floor', (['seq[:, 0]'], {}), '(seq[:, 0])\n', (5166, 5177), True, 'import numpy as np\n'), ((8264, 8277), 'numpy.log', 'np.log', (['rates'], {}), '(rates)\n', (8270, 8277), True, 'import numpy as np\n'), ((8308, 8320), 'numpy.sum', 'np.sum', (['mhat'], {}), '(mhat)\n', (8314, 8320), True, 'import numpy as np\n'), ((9725, 9744), 'numpy.floor', 'np.floor', (['seq[:, 0]'], {}), '(seq[:, 0])\n', (9733, 9744), True, 'import numpy as np\n'), ((1928, 1939), 'numpy.floor', 'np.floor', (['s'], {}), '(s)\n', (1936, 1939), True, 'import numpy as np\n'), ((2161, 2180), 'numpy.arange', 'np.arange', (['self.dim'], {}), '(self.dim)\n', (2170, 2180), True, 'import numpy as np\n'), ((2826, 2841), 'numpy.sum', 'np.sum', (['self.mu'], {}), '(self.mu)\n', (2832, 2841), True, 'import numpy as np\n'), ((3580, 3602), 'numpy.append', 'np.append', (['rates', 'diff'], {}), '(rates, diff)\n', (3589, 3602), True, 'import numpy as np\n'), ((5979, 5998), 'numpy.where', 'np.where', (['(sequ == i)'], {}), '(sequ == i)\n', (5987, 5998), True, 'import numpy as np\n'), ((7271, 7288), 'numpy.array', 'np.array', (['[rates]'], {}), '([rates])\n', (7279, 7288), True, 'import numpy as np\n'), ((11184, 11215), 'numpy.exp', 'np.exp', (['(-self.omega2 * (ct - t))'], {}), '(-self.omega2 * (ct - t))\n', (11190, 11215), True, 'import numpy as np\n'), ((7462, 7486), 'numpy.where', 'np.where', (['(seq[:, 1] == i)'], {}), '(seq[:, 1] == i)\n', (7470, 7486), True, 'import numpy as np\n'), ((7681, 7693), 'numpy.sum', 'np.sum', (['p_ii'], {}), '(p_ii)\n', (7687, 7693), True, 'import numpy as np\n'), ((7605, 7623), 'numpy.where', 'np.where', (['(day == i)'], {}), '(day == i)\n', (7613, 7623), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
n_files = 100
path = './experiments/test9/PES'
name = '/test9_PES_f_'
n_iterations = 100
log_regret = np.zeros((n_iterations,n_files))
time = np.zeros((n_iterations,n_files))
real_opt = 0.#test7:4.389940124468381 #test5:-0.5369910241891562#test-0.42973174#test_linear2:1.382850185589923#test_linear4:0.90579135#test_linear8:0.84455888
for i in range(n_files):
print(i)
#path = './experiments/test9/PES'
#name = '/test9_'
log_regret[:, i] = np.log10(np.abs(real_opt - np.loadtxt(path + name+str(i)+'.txt', unpack=True)))[0:n_iterations]
#time[:, i] = np.loadtxt(path + name + str(i) + '.txt', unpack=True)[1, 0:n_iterations]/60
log_regret_stats = np.zeros((n_iterations, 2))
log_regret_stats[:, 0] = np.mean(log_regret, axis=1)
log_regret_stats[:, 1] = np.std(log_regret, axis=1)
#time_stats = np.zeros((n_iterations, 2))
#time_stats[:, 0] = np.mean(time, axis=1)
#time_stats[:, 1] = np.std(time, axis=1)
np.savetxt(path + name + 'log_regret_stats.txt', log_regret_stats)
#np.savetxt(path + name + '_time_stats.txt', time_stats)
plt.figure()
plt.plot(log_regret_stats[:, 0], label=name)
plt.plot(log_regret_stats[:, 0] + log_regret_stats[:, 1])
plt.plot(log_regret_stats[:, 0] - log_regret_stats[:, 1])
#plt.figure()
#plt.plot(time_stats[:, 0], label=name)
#plt.plot(time_stats[:, 0] + time_stats[:, 1])
#plt.plot(time_stats[:, 0] - time_stats[:, 1])
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.savetxt",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.mean"
] |
[((154, 187), 'numpy.zeros', 'np.zeros', (['(n_iterations, n_files)'], {}), '((n_iterations, n_files))\n', (162, 187), True, 'import numpy as np\n'), ((195, 228), 'numpy.zeros', 'np.zeros', (['(n_iterations, n_files)'], {}), '((n_iterations, n_files))\n', (203, 228), True, 'import numpy as np\n'), ((720, 747), 'numpy.zeros', 'np.zeros', (['(n_iterations, 2)'], {}), '((n_iterations, 2))\n', (728, 747), True, 'import numpy as np\n'), ((773, 800), 'numpy.mean', 'np.mean', (['log_regret'], {'axis': '(1)'}), '(log_regret, axis=1)\n', (780, 800), True, 'import numpy as np\n'), ((826, 852), 'numpy.std', 'np.std', (['log_regret'], {'axis': '(1)'}), '(log_regret, axis=1)\n', (832, 852), True, 'import numpy as np\n'), ((984, 1050), 'numpy.savetxt', 'np.savetxt', (["(path + name + 'log_regret_stats.txt')", 'log_regret_stats'], {}), "(path + name + 'log_regret_stats.txt', log_regret_stats)\n", (994, 1050), True, 'import numpy as np\n'), ((1108, 1120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1118, 1120), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1165), 'matplotlib.pyplot.plot', 'plt.plot', (['log_regret_stats[:, 0]'], {'label': 'name'}), '(log_regret_stats[:, 0], label=name)\n', (1129, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1223), 'matplotlib.pyplot.plot', 'plt.plot', (['(log_regret_stats[:, 0] + log_regret_stats[:, 1])'], {}), '(log_regret_stats[:, 0] + log_regret_stats[:, 1])\n', (1174, 1223), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1281), 'matplotlib.pyplot.plot', 'plt.plot', (['(log_regret_stats[:, 0] - log_regret_stats[:, 1])'], {}), '(log_regret_stats[:, 0] - log_regret_stats[:, 1])\n', (1232, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1430, 1440), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1438, 1440), True, 'import matplotlib.pyplot as plt\n')]
|
import pygame
from random import randint
import numpy as np
# Programa por Magnus e Rudigus
pygame.init()
screen = pygame.display.set_mode((620, 620))
myfont = pygame.font.SysFont("monospace", 30, 1)
done = False
is_blue = 0
quantBlocos = [10, 10]
blocos = []
minas = np.zeros((quantBlocos[0], quantBlocos[1]))
minas[:2,] = 1
minas = minas.ravel()
np.random.shuffle(minas)
minas = minas.reshape(10, 10)
aux = []
for i in range(quantBlocos[0]):
for j in range(quantBlocos[1]):
aux.append(pygame.Rect(60 * i + 10, 60 * j + 10, 59, 59))
blocos.append(aux)
aux = []
blocosAcertados = np.zeros((quantBlocos[0], quantBlocos[1]))
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
posMouse = pygame.mouse.get_pos()
for i in range(quantBlocos[0]):
for j in range(quantBlocos[1]):
if(blocos[i][j].collidepoint(posMouse)):
is_blue = (i,j)
botoes = pygame.mouse.get_pressed()
mouse = botoes[0]
minaEscolhida = None
if(mouse):
minaEscolhida = minas[is_blue[0]][is_blue[1]]
print("Deu certo doido da maconha")
if(minaEscolhida == 1):
print("Booooooooom")
done = True
else:
blocosAcertados[is_blue[0]][is_blue[1]] = 1
print("Ahhhh safado")
for i in range(quantBlocos[0]):
for j in range(quantBlocos[1]):
if (blocosAcertados[i][j] == 1):
color = (0, 255, 0)
elif not((is_blue) == (i, j)):
color = (0, 128, 255)
elif (minaEscolhida == 1):
color = (255, 0, 0)
else:
color = (255, 100, 0)
pygame.draw.rect(screen, color, blocos[i][j])
if (blocosAcertados[i][j] == 1):
label = myfont.render("5", 0, (0, 0, 0))
screen.blit(label, (60 * i + 30, 60 * j + 25))
minaEscolhida = None
pygame.display.flip()
|
[
"pygame.mouse.get_pressed",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.draw.rect",
"pygame.Rect",
"numpy.zeros",
"pygame.init",
"pygame.display.flip",
"pygame.mouse.get_pos",
"numpy.random.shuffle"
] |
[((94, 107), 'pygame.init', 'pygame.init', ([], {}), '()\n', (105, 107), False, 'import pygame\n'), ((117, 152), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(620, 620)'], {}), '((620, 620))\n', (140, 152), False, 'import pygame\n'), ((162, 201), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""monospace"""', '(30)', '(1)'], {}), "('monospace', 30, 1)\n", (181, 201), False, 'import pygame\n'), ((271, 313), 'numpy.zeros', 'np.zeros', (['(quantBlocos[0], quantBlocos[1])'], {}), '((quantBlocos[0], quantBlocos[1]))\n', (279, 313), True, 'import numpy as np\n'), ((351, 375), 'numpy.random.shuffle', 'np.random.shuffle', (['minas'], {}), '(minas)\n', (368, 375), True, 'import numpy as np\n'), ((603, 645), 'numpy.zeros', 'np.zeros', (['(quantBlocos[0], quantBlocos[1])'], {}), '((quantBlocos[0], quantBlocos[1]))\n', (611, 645), True, 'import numpy as np\n'), ((680, 698), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (696, 698), False, 'import pygame\n'), ((2112, 2133), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2131, 2133), False, 'import pygame\n'), ((781, 803), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (801, 803), False, 'import pygame\n'), ((998, 1024), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (1022, 1024), False, 'import pygame\n'), ((502, 547), 'pygame.Rect', 'pygame.Rect', (['(60 * i + 10)', '(60 * j + 10)', '(59)', '(59)'], {}), '(60 * i + 10, 60 * j + 10, 59, 59)\n', (513, 547), False, 'import pygame\n'), ((1856, 1901), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'color', 'blocos[i][j]'], {}), '(screen, color, blocos[i][j])\n', (1872, 1901), False, 'import pygame\n')]
|
#!/usr/bin/env python
"""Script to run RM2 ALM case."""
import argparse
import os
import subprocess
from subprocess import call, check_output
import numpy as np
import pandas as pd
import glob
import foampy
from foampy.dictionaries import replace_value
import shutil
from pyrm2tf import processing as pr
def get_mesh_dims():
"""Get mesh dimensions by grepping `blockMeshDict`."""
raw = check_output("grep blocks system/blockMeshDict -A3",
shell=True).decode().split("\n")[3]
raw = raw.replace("(", "").replace(")", "").split()
return {"nx": int(raw[0]), "ny": int(raw[1]), "nz": int(raw[2])}
def get_dt():
"""Read ``deltaT`` from ``controlDict``."""
return foampy.dictionaries.read_single_line_value("controlDict",
keyword="deltaT")
def log_perf(param="tsr", append=True):
"""Log performance to file."""
if not os.path.isdir("processed"):
os.mkdir("processed")
fpath = "processed/{}_sweep.csv".format(param)
if append and os.path.isfile(fpath):
df = pd.read_csv(fpath)
else:
df = pd.DataFrame(columns=["nx", "ny", "nz", "dt", "tsr", "cp", "cd"])
d = pr.calc_perf(t1=3.0)
d.update(get_mesh_dims())
d["dt"] = get_dt()
df = df.append(d, ignore_index=True)
df.to_csv(fpath, index=False)
def set_blockmesh_resolution(nx=48, ny=None, nz=None):
"""Set mesh resolution in `blockMeshDict`.
If only ``nx`` is provided, the default resolutions for other dimensions are
scaled proportionally.
"""
defaults = {"nx": 48, "ny": 48, "nz": 32}
if ny is None:
ny = nx
if nz is None:
nz = int(nx*defaults["nz"]/defaults["nx"])
print("Setting blockMesh resolution to ({} {} {})".format(nx, ny, nz))
foampy.fill_template("system/blockMeshDict.template", nx=nx, ny=ny, nz=nz)
def set_dt(dt=0.005, tsr=None, tsr_0=3.1):
"""Set `deltaT` in `controlDict`. Will scale proportionally if `tsr` and
`tsr_0` are supplied, such that steps-per-rev is consistent with `tsr_0`.
"""
if tsr is not None:
dt = dt*tsr_0/tsr
print("Setting deltaT = dt*tsr_0/tsr = {:.3f}".format(dt))
dt = str(dt)
foampy.fill_template("system/controlDict.template", dt=dt)
def set_talpha(val=6.25):
"""Set `TAlpha` value for the Leishman--Beddoes SGC dynamic stall model."""
foampy.dictionaries.replace_value("system/fvOptions", "TAlpha", str(val))
def gen_sets_file():
"""Generate ``sets`` file for post-processing."""
# Input parameters
setformat = "raw"
interpscheme = "cellPoint"
fields = ["UMean", "UPrime2Mean", "kMean"]
x = 1.0
ymax = 1.5
ymin = -1.5
ny = 51
z_H_max = 1.25
z_H_min = -1.25
nz = 19
H = 0.807
zmax = z_H_max*H
zmin = z_H_min*H
z_array = np.linspace(zmin, zmax, nz)
txt = "\ntype sets;\n"
txt +='libs ("libsampling.so");\n'
txt += "setFormat " + setformat + ";\n"
txt += "interpolationScheme " + interpscheme + ";\n\n"
txt += "sets \n ( \n"
for z in z_array:
# Fix interpolation issues if directly on a face
if z == 0.0:
z += 1e-5
txt += " " + "profile_" + str(z) + "\n"
txt += " { \n"
txt += " type uniform; \n"
txt += " axis y; \n"
txt += " start (" + str(x) + " " + str(ymin) + " " \
+ str(z) + ");\n"
txt += " end (" + str(x) + " " + str(ymax) + " " \
+ str(z) + ");\n"
txt += " nPoints " + str(ny) + ";\n }\n\n"
txt += ");\n\n"
txt += "fields \n(\n"
for field in fields:
txt += " " + field + "\n"
txt += "); \n\n"
txt += "//\
*********************************************************************** //\
\n"
with open("system/sets", "w") as f:
f.write(txt)
def post_process(parallel=False, tee=False, overwrite=True):
"""Execute all post-processing."""
gen_sets_file()
foampy.run("postProcess", args="-func -vorticity", parallel=parallel,
logname="log.vorticity", tee=tee, overwrite=overwrite)
foampy.run("postProcess", args="-dict system/controlDict.recovery "
" -latestTime", parallel=parallel, logname="log.recovery",
tee=tee, overwrite=overwrite)
foampy.run("postProcess", args="-func sets -latestTime",
logname="log.sample", parallel=parallel, overwrite=overwrite,
tee=tee)
foampy.run("funkyDoCalc", args="system/funkyDoCalcDict -latestTime",
parallel=parallel, tee=tee, overwrite=overwrite)
def param_sweep(param="tsr", start=None, stop=None, step=None, dtype=float,
append=False, parallel=True, tee=False, **kwargs):
"""Run multiple simulations, varying ``quantity``.
``step`` is not included.
"""
print("Running {} sweep".format(param))
fpath = "processed/{}_sweep.csv".format(param)
if not append and os.path.isfile(fpath):
os.remove(fpath)
if param == "nx":
dtype = int
param_list = np.arange(start, stop, step, dtype=dtype)
for p in param_list:
print("Running with {} = {}".format(param, p))
if param == "talpha":
set_talpha(p)
if p == param_list[0] or param == "nx":
foampy.clean(remove_zero=True)
mesh = True
else:
mesh = False
# Update kwargs for this value
kwargs.update({param: p})
run(parallel=parallel, tee=tee, mesh=mesh, reconstruct=False,
post=False, **kwargs)
os.rename("log.pimpleFoam", "log.pimpleFoam." + str(p))
log_perf(param=param, append=True)
foampy.clean(leave_mesh=True, remove_zero=True)
# Set parameters back to defaults
if param == "talpha":
set_talpha()
def set_turbine_op_params(tsr=3.1, tsr_amp=0.0, tsr_phase=1.8):
"""Write file defining turbine operating parameters.
``tsr_phase`` is in radians.
"""
txt="""
tipSpeedRatio {tsr};
tsrAmplitude {tsr_amp};
tsrPhase {tsr_phase};
""".format(tsr=tsr, tsr_amp=tsr_amp, tsr_phase=tsr_phase)
with open("system/turbineOperatingParams", "w") as f:
f.write(txt)
def run(tsr=3.1, tsr_amp=0.0, tsr_phase=1.8, nx=48, mesh=True, parallel=False,
dt=0.005, tee=False, reconstruct=True, overwrite=False, post=True):
"""Run simulation once."""
print("Setting TSR to", tsr)
set_turbine_op_params(tsr=tsr, tsr_amp=tsr_amp, tsr_phase=tsr_phase)
set_dt(dt=dt, tsr=tsr)
if mesh:
# Create blockMeshDict
set_blockmesh_resolution(nx=nx)
foampy.run("blockMesh", tee=tee)
# Copy over initial conditions
subprocess.call("cp -rf 0.orig 0 > /dev/null 2>&1", shell=True)
if parallel and not glob.glob("processor*"):
foampy.run("decomposePar", tee=tee)
subprocess.call("for PROC in processor*; do cp -rf 0.orig/* $PROC/0; "
" done", shell=True)
if mesh:
foampy.run("snappyHexMesh", args="-overwrite", tee=tee,
parallel=parallel)
foampy.run("topoSet", parallel=parallel, tee=tee)
if parallel:
foampy.run("reconstructParMesh", args="-constant -time 0", tee=tee)
foampy.run("pimpleFoam", parallel=parallel, tee=tee, overwrite=overwrite)
if parallel and reconstruct:
foampy.run("reconstructPar", tee=tee, overwrite=overwrite)
if post:
post_process(overwrite=overwrite, parallel=parallel)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run RM2 ALM case")
parser.add_argument("--tsr", "-t", default=3.1, type=float, help="TSR")
parser.add_argument("--nx", "-x", default=48, type=int, help="Number of "
"cells in the x-direction for the base mesh")
parser.add_argument("--dt", default=0.005, type=float, help="Time step")
parser.add_argument("--leave-mesh", "-l", default=False,
action="store_true", help="Leave existing mesh")
parser.add_argument("--post", "-P", default=False, action="store_true",
help="Run post-processing (done by default at end of "
" run)")
parser.add_argument("--param-sweep", "-p",
help="Run multiple simulations varying a parameter",
choices=["tsr", "nx", "dt", "talpha"])
parser.add_argument("--start", default=1.1, type=float)
parser.add_argument("--stop", default=4.7, type=float)
parser.add_argument("--step", default=0.5, type=float)
parser.add_argument("--serial", "-S", default=False, action="store_true")
parser.add_argument("--append", "-a", default=False, action="store_true")
parser.add_argument("--tee", "-T", default=False, action="store_true",
help="Print log files to terminal while running")
args = parser.parse_args()
if args.param_sweep:
param_sweep(args.param_sweep, args.start, args.stop, args.step,
append=args.append, parallel=not args.serial,
tee=args.tee, nx=args.nx, dt=args.dt, tsr=args.tsr)
elif not args.post:
run(tsr=args.tsr, nx=args.nx, dt=args.dt, parallel=not args.serial,
tee=args.tee, mesh=not args.leave_mesh, overwrite=args.leave_mesh)
if args.post:
post_process(parallel=not args.serial)
|
[
"pandas.DataFrame",
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"os.path.isdir",
"pandas.read_csv",
"foampy.clean",
"subprocess.check_output",
"pyrm2tf.processing.calc_perf",
"foampy.dictionaries.read_single_line_value",
"os.path.isfile",
"numpy.arange",
"subprocess.call",
"numpy.linspace",
"glob.glob",
"foampy.run",
"foampy.fill_template"
] |
[((709, 784), 'foampy.dictionaries.read_single_line_value', 'foampy.dictionaries.read_single_line_value', (['"""controlDict"""'], {'keyword': '"""deltaT"""'}), "('controlDict', keyword='deltaT')\n", (751, 784), False, 'import foampy\n'), ((1206, 1226), 'pyrm2tf.processing.calc_perf', 'pr.calc_perf', ([], {'t1': '(3.0)'}), '(t1=3.0)\n', (1218, 1226), True, 'from pyrm2tf import processing as pr\n'), ((1806, 1880), 'foampy.fill_template', 'foampy.fill_template', (['"""system/blockMeshDict.template"""'], {'nx': 'nx', 'ny': 'ny', 'nz': 'nz'}), "('system/blockMeshDict.template', nx=nx, ny=ny, nz=nz)\n", (1826, 1880), False, 'import foampy\n'), ((2227, 2285), 'foampy.fill_template', 'foampy.fill_template', (['"""system/controlDict.template"""'], {'dt': 'dt'}), "('system/controlDict.template', dt=dt)\n", (2247, 2285), False, 'import foampy\n'), ((2848, 2875), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', 'nz'], {}), '(zmin, zmax, nz)\n', (2859, 2875), True, 'import numpy as np\n'), ((4057, 4185), 'foampy.run', 'foampy.run', (['"""postProcess"""'], {'args': '"""-func -vorticity"""', 'parallel': 'parallel', 'logname': '"""log.vorticity"""', 'tee': 'tee', 'overwrite': 'overwrite'}), "('postProcess', args='-func -vorticity', parallel=parallel,\n logname='log.vorticity', tee=tee, overwrite=overwrite)\n", (4067, 4185), False, 'import foampy\n'), ((4201, 4363), 'foampy.run', 'foampy.run', (['"""postProcess"""'], {'args': '"""-dict system/controlDict.recovery -latestTime"""', 'parallel': 'parallel', 'logname': '"""log.recovery"""', 'tee': 'tee', 'overwrite': 'overwrite'}), "('postProcess', args=\n '-dict system/controlDict.recovery -latestTime', parallel=parallel,\n logname='log.recovery', tee=tee, overwrite=overwrite)\n", (4211, 4363), False, 'import foampy\n'), ((4392, 4524), 'foampy.run', 'foampy.run', (['"""postProcess"""'], {'args': '"""-func sets -latestTime"""', 'logname': '"""log.sample"""', 'parallel': 'parallel', 'overwrite': 'overwrite', 'tee': 'tee'}), "('postProcess', args='-func sets -latestTime', logname=\n 'log.sample', parallel=parallel, overwrite=overwrite, tee=tee)\n", (4402, 4524), False, 'import foampy\n'), ((4554, 4675), 'foampy.run', 'foampy.run', (['"""funkyDoCalc"""'], {'args': '"""system/funkyDoCalcDict -latestTime"""', 'parallel': 'parallel', 'tee': 'tee', 'overwrite': 'overwrite'}), "('funkyDoCalc', args='system/funkyDoCalcDict -latestTime',\n parallel=parallel, tee=tee, overwrite=overwrite)\n", (4564, 4675), False, 'import foampy\n'), ((5150, 5191), 'numpy.arange', 'np.arange', (['start', 'stop', 'step'], {'dtype': 'dtype'}), '(start, stop, step, dtype=dtype)\n', (5159, 5191), True, 'import numpy as np\n'), ((6789, 6852), 'subprocess.call', 'subprocess.call', (['"""cp -rf 0.orig 0 > /dev/null 2>&1"""'], {'shell': '(True)'}), "('cp -rf 0.orig 0 > /dev/null 2>&1', shell=True)\n", (6804, 6852), False, 'import subprocess\n'), ((7348, 7421), 'foampy.run', 'foampy.run', (['"""pimpleFoam"""'], {'parallel': 'parallel', 'tee': 'tee', 'overwrite': 'overwrite'}), "('pimpleFoam', parallel=parallel, tee=tee, overwrite=overwrite)\n", (7358, 7421), False, 'import foampy\n'), ((7638, 7693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run RM2 ALM case"""'}), "(description='Run RM2 ALM case')\n", (7661, 7693), False, 'import argparse\n'), ((927, 953), 'os.path.isdir', 'os.path.isdir', (['"""processed"""'], {}), "('processed')\n", (940, 953), False, 'import os\n'), ((963, 984), 'os.mkdir', 'os.mkdir', (['"""processed"""'], {}), "('processed')\n", (971, 984), False, 'import os\n'), ((1054, 1075), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (1068, 1075), False, 'import os\n'), ((1090, 1108), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (1101, 1108), True, 'import pandas as pd\n'), ((1132, 1197), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['nx', 'ny', 'nz', 'dt', 'tsr', 'cp', 'cd']"}), "(columns=['nx', 'ny', 'nz', 'dt', 'tsr', 'cp', 'cd'])\n", (1144, 1197), True, 'import pandas as pd\n'), ((5043, 5064), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (5057, 5064), False, 'import os\n'), ((5074, 5090), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (5083, 5090), False, 'import os\n'), ((5774, 5821), 'foampy.clean', 'foampy.clean', ([], {'leave_mesh': '(True)', 'remove_zero': '(True)'}), '(leave_mesh=True, remove_zero=True)\n', (5786, 5821), False, 'import foampy\n'), ((6717, 6749), 'foampy.run', 'foampy.run', (['"""blockMesh"""'], {'tee': 'tee'}), "('blockMesh', tee=tee)\n", (6727, 6749), False, 'import foampy\n'), ((6910, 6945), 'foampy.run', 'foampy.run', (['"""decomposePar"""'], {'tee': 'tee'}), "('decomposePar', tee=tee)\n", (6920, 6945), False, 'import foampy\n'), ((6954, 7046), 'subprocess.call', 'subprocess.call', (['"""for PROC in processor*; do cp -rf 0.orig/* $PROC/0; done"""'], {'shell': '(True)'}), "('for PROC in processor*; do cp -rf 0.orig/* $PROC/0; done',\n shell=True)\n", (6969, 7046), False, 'import subprocess\n'), ((7091, 7165), 'foampy.run', 'foampy.run', (['"""snappyHexMesh"""'], {'args': '"""-overwrite"""', 'tee': 'tee', 'parallel': 'parallel'}), "('snappyHexMesh', args='-overwrite', tee=tee, parallel=parallel)\n", (7101, 7165), False, 'import foampy\n'), ((7193, 7242), 'foampy.run', 'foampy.run', (['"""topoSet"""'], {'parallel': 'parallel', 'tee': 'tee'}), "('topoSet', parallel=parallel, tee=tee)\n", (7203, 7242), False, 'import foampy\n'), ((7463, 7521), 'foampy.run', 'foampy.run', (['"""reconstructPar"""'], {'tee': 'tee', 'overwrite': 'overwrite'}), "('reconstructPar', tee=tee, overwrite=overwrite)\n", (7473, 7521), False, 'import foampy\n'), ((5388, 5418), 'foampy.clean', 'foampy.clean', ([], {'remove_zero': '(True)'}), '(remove_zero=True)\n', (5400, 5418), False, 'import foampy\n'), ((6877, 6900), 'glob.glob', 'glob.glob', (['"""processor*"""'], {}), "('processor*')\n", (6886, 6900), False, 'import glob\n'), ((7276, 7343), 'foampy.run', 'foampy.run', (['"""reconstructParMesh"""'], {'args': '"""-constant -time 0"""', 'tee': 'tee'}), "('reconstructParMesh', args='-constant -time 0', tee=tee)\n", (7286, 7343), False, 'import foampy\n'), ((397, 461), 'subprocess.check_output', 'check_output', (['"""grep blocks system/blockMeshDict -A3"""'], {'shell': '(True)'}), "('grep blocks system/blockMeshDict -A3', shell=True)\n", (409, 461), False, 'from subprocess import call, check_output\n')]
|
import numpy as np
import argparse
from matplotlib import pyplot as plt
rewards = []
EPOSIDES = 250
lineStyle = ['-b','--r','.g']
def plot(f, arr, strLabel):
strLine = f.readline()
start = strLine.find('INFO')
if start != -1:
start += len('INFO:')
tittle = strLine[start:-1]
else :
tittle = 'tittle'
while True:
strLine = f.readline()
if strLine == '':
break
start = strLine.find('reward')
if start != -1:
start += len('reward:')
rewards.append(strLine[start:-1])
f.close()
y1 = np.asarray(rewards, dtype=np.float)
rewardLen = len(rewards)
x = np.arange(0,rewardLen)
plt.title(tittle)
plt.xlabel("eposides")
plt.ylabel("rewards")
i = arr % 3
plt.plot(x,y1,lineStyle[i],label= strLabel)
plt.legend()
rewards.clear()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot from log")
parser.add_argument('--file','-f', action='append', dest='files', help='input the log file',type=argparse.FileType('r'))
args = parser.parse_args()
num =len(args.files)
i=0
for f in args.files:
plot(f, i, f.name)
f.close
i+=1
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"argparse.FileType"
] |
[((601, 636), 'numpy.asarray', 'np.asarray', (['rewards'], {'dtype': 'np.float'}), '(rewards, dtype=np.float)\n', (611, 636), True, 'import numpy as np\n'), ((675, 698), 'numpy.arange', 'np.arange', (['(0)', 'rewardLen'], {}), '(0, rewardLen)\n', (684, 698), True, 'import numpy as np\n'), ((703, 720), 'matplotlib.pyplot.title', 'plt.title', (['tittle'], {}), '(tittle)\n', (712, 720), True, 'from matplotlib import pyplot as plt\n'), ((725, 747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""eposides"""'], {}), "('eposides')\n", (735, 747), True, 'from matplotlib import pyplot as plt\n'), ((752, 773), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""rewards"""'], {}), "('rewards')\n", (762, 773), True, 'from matplotlib import pyplot as plt\n'), ((794, 839), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1', 'lineStyle[i]'], {'label': 'strLabel'}), '(x, y1, lineStyle[i], label=strLabel)\n', (802, 839), True, 'from matplotlib import pyplot as plt\n'), ((842, 854), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (852, 854), True, 'from matplotlib import pyplot as plt\n'), ((918, 970), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot from log"""'}), "(description='Plot from log')\n", (941, 970), False, 'import argparse\n'), ((1245, 1255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1253, 1255), True, 'from matplotlib import pyplot as plt\n'), ((1072, 1094), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1089, 1094), False, 'import argparse\n')]
|
# -*- coding: utf-8 -*-
"""
Module that loads data distributed at http://jmcauley.ucsd.edu/data/amazon/
The dataset was presented on the following papers:
<NAME>, <NAME>. 2016. Ups and downs: Modeling the visual evolution of fashion
trends with one-class collaborative filtering. WWW.
<NAME>, <NAME>, <NAME>, <NAME>. 2015. Image-based
recommendations on styles and substitutes. SIGIR.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import json
import logging
import nltk
import six
nltk.download(info_or_id='punkt') # need it before importing nltk.tokenize
import numpy as np
from nltk.tokenize import word_tokenize
from multidomain_sentiment.word_embedding import load_word_embedding
from multidomain_sentiment.dataset.common import create_dataset
logger = logging.getLogger(__name__)
def read_amazon_reviews(
domain_paths, vocab, label_dict, domain_dict=None, max_tokens=10000):
texts = []
labels = []
domains = []
create_domain = domain_dict is None
domain_dict = domain_dict or {}
for domain_name, path in domain_paths:
if create_domain:
domain_dict[domain_name] = len(domain_dict)
d = domain_dict[domain_name]
for t, l in read_single_review(path, vocab, label_dict, max_tokens):
texts.append(t)
labels.append(l)
domains.append(d)
labels = np.asarray(labels, np.int32)
domains = np.asarray(domains, np.int32)
return create_dataset(texts, labels, domains), domain_dict
def prepare_mcauley_data(train_paths, test_paths, word2vec_path):
logger.info("Preparing data")
label_dict, label_inv_dict = get_sentiment_label_dict()
logger.info("Loading word embedding")
w2v, vocab = load_word_embedding(word2vec_path, max_vocab=100000)
logger.info("Creating dataset")
train, domain_dict = read_amazon_reviews(train_paths, vocab, label_dict, max_tokens=50)
if len(test_paths) > 0:
assert len(train_paths) == len(test_paths)
test, _ = read_amazon_reviews(train_paths, vocab, label_dict,
domain_dict=domain_dict, max_tokens=50)
else:
test = None
# Reverse domain_dict
domain_dict = {v: k for k, v in six.iteritems(domain_dict)}
return w2v, vocab, train, test, label_inv_dict, domain_dict
def read_single_review(path, vocab, label_dict, max_tokens):
with open(path) as fin:
for line in fin:
data = json.loads(line.strip())
label = int(data['overall'])
if label in label_dict:
words = []
for i, w in enumerate(word_tokenize(data['reviewText'])):
if i >= max_tokens:
break
words.append(vocab.get(w, vocab['<unk>']))
if len(words) == 0:
continue
yield np.array(words, np.int32), label_dict[label]
def get_sentiment_label_dict():
return ({
1: 0,
2: 0,
3: 0,
5: 1,
}, {0: "neg", 1: "pos"})
|
[
"nltk.tokenize.word_tokenize",
"numpy.asarray",
"numpy.array",
"multidomain_sentiment.dataset.common.create_dataset",
"nltk.download",
"multidomain_sentiment.word_embedding.load_word_embedding",
"six.iteritems",
"logging.getLogger"
] |
[((534, 567), 'nltk.download', 'nltk.download', ([], {'info_or_id': '"""punkt"""'}), "(info_or_id='punkt')\n", (547, 567), False, 'import nltk\n'), ((814, 841), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (831, 841), False, 'import logging\n'), ((1410, 1438), 'numpy.asarray', 'np.asarray', (['labels', 'np.int32'], {}), '(labels, np.int32)\n', (1420, 1438), True, 'import numpy as np\n'), ((1453, 1482), 'numpy.asarray', 'np.asarray', (['domains', 'np.int32'], {}), '(domains, np.int32)\n', (1463, 1482), True, 'import numpy as np\n'), ((1769, 1821), 'multidomain_sentiment.word_embedding.load_word_embedding', 'load_word_embedding', (['word2vec_path'], {'max_vocab': '(100000)'}), '(word2vec_path, max_vocab=100000)\n', (1788, 1821), False, 'from multidomain_sentiment.word_embedding import load_word_embedding\n'), ((1494, 1532), 'multidomain_sentiment.dataset.common.create_dataset', 'create_dataset', (['texts', 'labels', 'domains'], {}), '(texts, labels, domains)\n', (1508, 1532), False, 'from multidomain_sentiment.dataset.common import create_dataset\n'), ((2271, 2297), 'six.iteritems', 'six.iteritems', (['domain_dict'], {}), '(domain_dict)\n', (2284, 2297), False, 'import six\n'), ((2666, 2699), 'nltk.tokenize.word_tokenize', 'word_tokenize', (["data['reviewText']"], {}), "(data['reviewText'])\n", (2679, 2699), False, 'from nltk.tokenize import word_tokenize\n'), ((2922, 2947), 'numpy.array', 'np.array', (['words', 'np.int32'], {}), '(words, np.int32)\n', (2930, 2947), True, 'import numpy as np\n')]
|
import os
import numpy as np
import sys
import SimpleITK as sitk
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from data_io_utils import DataIO
class MaskBoundingUtils:
def __init__(self):
print('init MaskBoundingUtils class')
@staticmethod
def extract_mask_file_bounding(infile, is_dcm=False, is_print=False):
if is_dcm:
data = DataIO.load_dicom_series(infile)
else:
data = DataIO.load_nii_image(infile)
arr = data['image']
return MaskBoundingUtils.extract_mask_arr_bounding(arr, is_print)
@staticmethod
def extract_mask_arr_bounding(in_arr, is_print=False):
ranges = np.where(in_arr > 0)
[z_min, y_min, x_min] = np.min(np.array(ranges), axis=1)
[z_max, y_max, x_max] = np.max(np.array(ranges), axis=1)
if is_print:
print('mask shape:\t', in_arr.shape)
print('z ranges: [{}\t{}], len:\t{}'.format(z_min, z_max, z_max-z_min))
print('y ranges: [{}\t{}], len:\t{}'.format(y_min, y_max, y_max-y_min))
print('x ranges: [{}\t{}], len:\t{}'.format(x_min, x_max, x_max-x_min))
print('mask valid bounding shape:\t[{}, {}, {}]'.format(z_max-z_min, y_max-y_min, x_max-x_min))
return z_min, y_min, x_min, z_max, y_max, x_max
@staticmethod
def extract_target_area_by_boundary_info(infile, out_file, boundary_info, is_dcm=False, padding=None):
'''
boudary_info: [min_z, min_y, min_x, max_z, max_y, max_x], make sure, boundary_info is valid!!!!!
'''
if is_dcm:
data = DataIO.load_dicom_series(infile)
else:
data = DataIO.load_nii_image(infile)
arr = data['image']
[min_z, min_y, min_x, max_z, max_y, max_x] = boundary_info
if isinstance(padding, list) and len(padding) == 3:
min_z = max(0, min_z-padding[2])
min_y = max(0, min_z-padding[1])
min_x = max(0, min_z-padding[0])
target_arr = arr[min_z:max_z+1+padding[2], min_y:max_y+1+padding[1], min_x:max_x+1+padding[0]]
elif isinstance(padding, int):
min_z = max(0, min_z-padding)
min_y = max(0, min_z-padding)
min_x = max(0, min_z-padding)
target_arr = arr[min_z:max_z+1+padding, min_y:max_y+1+padding, min_x:max_x+1+padding]
else:
target_arr = arr[min_z:max_z+1, min_y:max_y+1, min_x:max_x+1]
if out_file is not None:
os.makedirs(os.path.dirname(out_file), exist_ok=True)
DataIO.save_medical_info_and_data(target_arr, data['origin'], data['spacing'], data['direction'], out_file)
@staticmethod
def extract_segmentation_pairs_by_boundary_info(in_image_file, in_mask_file,
out_image_file, out_mask_file, boundary_info, is_dcm=False, padding=None):
'''
boudary_info: [min_z, min_y, min_x, max_z, max_y, max_x], make sure, boundary_info is valid!!!!!
'''
if is_dcm:
image_data = DataIO.load_dicom_series(in_image_file)
else:
image_data = DataIO.load_nii_image(in_image_file)
mask_data = DataIO.load_nii_image(in_mask_file)
[min_z, min_y, min_x, max_z, max_y, max_x] = boundary_info
arr = image_data['image']
if isinstance(padding, list) and len(padding) == 3:
min_z = max(0, min_z-padding[2])
min_y = max(0, min_z-padding[1])
min_x = max(0, min_z-padding[0])
image_target_arr = arr[min_z:max_z+1+padding[2], min_y:max_y+1+padding[1], min_x:max_x+1+padding[0]]
elif isinstance(padding, int):
min_z = max(0, min_z-padding)
min_y = max(0, min_z-padding)
min_x = max(0, min_z-padding)
if isinstance(padding, list) and len(padding) == 3:
image_target_arr = arr[min_z:max_z+1+padding[2], min_y:max_y+1+padding[1], min_x:max_x+1+padding[0]]
elif isinstance(padding, int):
image_target_arr = arr[min_z:max_z+1+padding, min_y:max_y+1+padding, min_x:max_x+1+padding]
else:
image_target_arr = arr[min_z:max_z+1, min_y:max_y+1, min_x:max_x+1]
out_sitk_image = sitk.GetImageFromArray(image_target_arr)
out_sitk_image.SetSpacing(image_data['sitk_image'].GetSpacing())
out_sitk_image.SetOrigin(image_data['sitk_image'].GetOrigin())
out_sitk_image.SetDirection(image_data['sitk_image'].GetDirection())
if out_image_file is not None:
os.makedirs(os.path.dirname(out_image_file), exist_ok=True)
sitk.WriteImage(out_sitk_image, out_image_file)
arr = mask_data['image']
if isinstance(padding, list) and len(padding) == 3:
mask_target_arr = arr[min_z:max_z+1+padding[2], min_y:max_y+1+padding[1], min_x:max_x+1+padding[0]]
elif isinstance(padding, int):
mask_target_arr = arr[min_z:max_z+1+padding, min_y:max_y+1+padding, min_x:max_x+1+padding]
else:
mask_target_arr = arr[min_z:max_z+1, min_y:max_y+1, min_x:max_x+1]
out_sitk_mask = sitk.GetImageFromArray(mask_target_arr)
out_sitk_mask.CopyInformation(out_sitk_image)
if out_mask_file is not None:
os.makedirs(os.path.dirname(out_mask_file), exist_ok=True)
sitk.WriteImage(out_sitk_mask, out_mask_file)
'''
extract images region from z-min to z-max
'''
@staticmethod
def extract_target_area_by_mask_zboundary(in_image, in_mask, padding=0):
mask_arr = sitk.GetArrayFromImage(in_mask)
z_min, y_min, x_min, z_max, y_max, x_max = MaskBoundingUtils.extract_mask_arr_bounding(mask_arr)
z_min = max(0, z_min-padding)
z_max = z_max+padding
image_arr = sitk.GetArrayFromImage(in_image)
out_image_arr = image_arr[z_min:z_max+1]
out_image = sitk.GetImageFromArray(out_image_arr)
out_image.SetDirection(in_image.GetDirection())
out_image.SetSpacing(in_image.GetSpacing())
out_image.SetOrigin(in_image.GetOrigin())
mask_arr = sitk.GetArrayFromImage(in_mask)
out_mask_arr = mask_arr[z_min:z_max+1]
out_mask = sitk.GetImageFromArray(out_mask_arr)
out_mask.CopyInformation(out_image)
return out_image, out_mask
@staticmethod
def fix_mask_sitk_info(root='/data/medical/lung/airway/segmentation'):
image_root = os.path.join(root, 'images')
mask_root = os.path.join(root, 'masks')
out_mask_root = os.path.join(root, 'masks_x')
os.makedirs(out_mask_root, exist_ok=True)
for f in tqdm(os.listdir(mask_root)):
basename = f.replace('.nii.gz', '')
image_file = os.path.join(image_root, '{}.nii.gz'.format(basename))
mask_file = os.path.join(mask_root, '{}.nii.gz'.format(basename))
out_mask_file = os.path.join(out_mask_root, '{}.nii.gz'.format(basename))
image = sitk.ReadImage(image_file)
mask = sitk.ReadImage(mask_file)
mask.CopyInformation(image)
sitk.WriteImage(mask, out_mask_file)
def test_extract_target_area_by_mask_zboundary():
image_file = '/data/medical/brain/gan/cta2dwi_multi_classified/3.sorted_mask/5016897/CTA/CTA.nii.gz'
mask_file = '/data/medical/brain/gan/cta2dwi_multi_classified/3.sorted_mask/5016897/CTA/CTA_MASK_connected.nii.gz'
in_image = sitk.ReadImage(image_file)
in_mask = sitk.ReadImage(mask_file)
out_image, out_mask = MaskBoundingUtils.extract_target_area_by_mask_zboundary(in_image, in_mask)
out_dir = '/data/medical/tmp/5016897'
os.makedirs(out_dir, exist_ok=True)
out_image_file = os.path.join(out_dir, 'image.nii.gz')
sitk.WriteImage(out_image, out_image_file)
out_mask_file = os.path.join(out_dir, 'mask.nii.gz')
sitk.WriteImage(out_mask, out_mask_file)
if __name__ == '__main__':
print(__file__)
test_extract_target_area_by_mask_zboundary()
|
[
"os.path.abspath",
"os.makedirs",
"SimpleITK.ReadImage",
"os.path.dirname",
"data_io_utils.DataIO.load_dicom_series",
"data_io_utils.DataIO.load_nii_image",
"SimpleITK.GetArrayFromImage",
"data_io_utils.DataIO.save_medical_info_and_data",
"numpy.where",
"numpy.array",
"SimpleITK.WriteImage",
"SimpleITK.GetImageFromArray",
"os.path.join",
"os.listdir"
] |
[((7520, 7546), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_file'], {}), '(image_file)\n', (7534, 7546), True, 'import SimpleITK as sitk\n'), ((7561, 7586), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mask_file'], {}), '(mask_file)\n', (7575, 7586), True, 'import SimpleITK as sitk\n'), ((7734, 7769), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (7745, 7769), False, 'import os\n'), ((7791, 7828), 'os.path.join', 'os.path.join', (['out_dir', '"""image.nii.gz"""'], {}), "(out_dir, 'image.nii.gz')\n", (7803, 7828), False, 'import os\n'), ((7833, 7875), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_image', 'out_image_file'], {}), '(out_image, out_image_file)\n', (7848, 7875), True, 'import SimpleITK as sitk\n'), ((7896, 7932), 'os.path.join', 'os.path.join', (['out_dir', '"""mask.nii.gz"""'], {}), "(out_dir, 'mask.nii.gz')\n", (7908, 7932), False, 'import os\n'), ((7937, 7977), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_mask', 'out_mask_file'], {}), '(out_mask, out_mask_file)\n', (7952, 7977), True, 'import SimpleITK as sitk\n'), ((99, 124), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (114, 124), False, 'import os\n'), ((685, 705), 'numpy.where', 'np.where', (['(in_arr > 0)'], {}), '(in_arr > 0)\n', (693, 705), True, 'import numpy as np\n'), ((3199, 3234), 'data_io_utils.DataIO.load_nii_image', 'DataIO.load_nii_image', (['in_mask_file'], {}), '(in_mask_file)\n', (3220, 3234), False, 'from data_io_utils import DataIO\n'), ((4296, 4336), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image_target_arr'], {}), '(image_target_arr)\n', (4318, 4336), True, 'import SimpleITK as sitk\n'), ((5202, 5241), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['mask_target_arr'], {}), '(mask_target_arr)\n', (5224, 5241), True, 'import SimpleITK as sitk\n'), ((5640, 5671), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['in_mask'], {}), '(in_mask)\n', (5662, 5671), True, 'import SimpleITK as sitk\n'), ((5873, 5905), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['in_image'], {}), '(in_image)\n', (5895, 5905), True, 'import SimpleITK as sitk\n'), ((5975, 6012), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['out_image_arr'], {}), '(out_image_arr)\n', (5997, 6012), True, 'import SimpleITK as sitk\n'), ((6199, 6230), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['in_mask'], {}), '(in_mask)\n', (6221, 6230), True, 'import SimpleITK as sitk\n'), ((6297, 6333), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['out_mask_arr'], {}), '(out_mask_arr)\n', (6319, 6333), True, 'import SimpleITK as sitk\n'), ((6529, 6557), 'os.path.join', 'os.path.join', (['root', '"""images"""'], {}), "(root, 'images')\n", (6541, 6557), False, 'import os\n'), ((6578, 6605), 'os.path.join', 'os.path.join', (['root', '"""masks"""'], {}), "(root, 'masks')\n", (6590, 6605), False, 'import os\n'), ((6630, 6659), 'os.path.join', 'os.path.join', (['root', '"""masks_x"""'], {}), "(root, 'masks_x')\n", (6642, 6659), False, 'import os\n'), ((6668, 6709), 'os.makedirs', 'os.makedirs', (['out_mask_root'], {'exist_ok': '(True)'}), '(out_mask_root, exist_ok=True)\n', (6679, 6709), False, 'import os\n'), ((388, 420), 'data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['infile'], {}), '(infile)\n', (412, 420), False, 'from data_io_utils import DataIO\n'), ((454, 483), 'data_io_utils.DataIO.load_nii_image', 'DataIO.load_nii_image', (['infile'], {}), '(infile)\n', (475, 483), False, 'from data_io_utils import DataIO\n'), ((745, 761), 'numpy.array', 'np.array', (['ranges'], {}), '(ranges)\n', (753, 761), True, 'import numpy as np\n'), ((810, 826), 'numpy.array', 'np.array', (['ranges'], {}), '(ranges)\n', (818, 826), True, 'import numpy as np\n'), ((1615, 1647), 'data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['infile'], {}), '(infile)\n', (1639, 1647), False, 'from data_io_utils import DataIO\n'), ((1681, 1710), 'data_io_utils.DataIO.load_nii_image', 'DataIO.load_nii_image', (['infile'], {}), '(infile)\n', (1702, 1710), False, 'from data_io_utils import DataIO\n'), ((2594, 2706), 'data_io_utils.DataIO.save_medical_info_and_data', 'DataIO.save_medical_info_and_data', (['target_arr', "data['origin']", "data['spacing']", "data['direction']", 'out_file'], {}), "(target_arr, data['origin'], data[\n 'spacing'], data['direction'], out_file)\n", (2627, 2706), False, 'from data_io_utils import DataIO\n'), ((3063, 3102), 'data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['in_image_file'], {}), '(in_image_file)\n', (3087, 3102), False, 'from data_io_utils import DataIO\n'), ((3142, 3178), 'data_io_utils.DataIO.load_nii_image', 'DataIO.load_nii_image', (['in_image_file'], {}), '(in_image_file)\n', (3163, 3178), False, 'from data_io_utils import DataIO\n'), ((4681, 4728), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_sitk_image', 'out_image_file'], {}), '(out_sitk_image, out_image_file)\n', (4696, 4728), True, 'import SimpleITK as sitk\n'), ((5417, 5462), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_sitk_mask', 'out_mask_file'], {}), '(out_sitk_mask, out_mask_file)\n', (5432, 5462), True, 'import SimpleITK as sitk\n'), ((6732, 6753), 'os.listdir', 'os.listdir', (['mask_root'], {}), '(mask_root)\n', (6742, 6753), False, 'import os\n'), ((7068, 7094), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_file'], {}), '(image_file)\n', (7082, 7094), True, 'import SimpleITK as sitk\n'), ((7114, 7139), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mask_file'], {}), '(mask_file)\n', (7128, 7139), True, 'import SimpleITK as sitk\n'), ((7192, 7228), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['mask', 'out_mask_file'], {}), '(mask, out_mask_file)\n', (7207, 7228), True, 'import SimpleITK as sitk\n'), ((2540, 2565), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (2555, 2565), False, 'import os\n'), ((4621, 4652), 'os.path.dirname', 'os.path.dirname', (['out_image_file'], {}), '(out_image_file)\n', (4636, 4652), False, 'import os\n'), ((5358, 5388), 'os.path.dirname', 'os.path.dirname', (['out_mask_file'], {}), '(out_mask_file)\n', (5373, 5388), False, 'import os\n')]
|
import numpy as np
class Network:
def __init__(self):
self.L1 = Layer(layer_width=2, input_width=1, bias=[0, -1])
def forward(self, x):
x = self.L1.forward(x)
return np.sum(x)
def update_weights(self, adj):
self.L1.update_weights(adj)
class Layer:
def __init__(self, layer_width=1, input_width=1, bias=None):
if bias is None:
bias = np.zeros(layer_width)
else:
bias = np.array(bias)
if bias.shape[0] != layer_width:
raise Exception("Bias must be same shape as layer")
self.units = []
for u in range(layer_width):
self.units.append(Unit(input_width, bias[u]))
def update_weights(self, adj):
for u in self.units:
u.weight = u.weight + adj
def forward(self, x):
ret = []
for u in self.units:
ret.append(u.activation(x))
return ret
class Unit:
def __init__(self, input_shape=2, bias=0):
self.weight = np.random.uniform(0, 1, input_shape)
self.bias = bias
def activation(self, x):
return self.ReLu((x * self.weight) + self.bias)
def ReLu(self, x):
ret = []
if len(x.shape) > 1:
for x_i in x:
ret.append(self.ReLu(x_i))
else:
for x_i in x:
ret.append(max(0, x_i))
return ret
|
[
"numpy.zeros",
"numpy.random.uniform",
"numpy.sum",
"numpy.array"
] |
[((201, 210), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (207, 210), True, 'import numpy as np\n'), ((1019, 1055), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'input_shape'], {}), '(0, 1, input_shape)\n', (1036, 1055), True, 'import numpy as np\n'), ((407, 428), 'numpy.zeros', 'np.zeros', (['layer_width'], {}), '(layer_width)\n', (415, 428), True, 'import numpy as np\n'), ((462, 476), 'numpy.array', 'np.array', (['bias'], {}), '(bias)\n', (470, 476), True, 'import numpy as np\n')]
|
# download data from here: https://press.liacs.nl/mirflickr/mirdownload.html
# import hashlib
# with open("mirflickr25k.zip","rb") as f:
# md5_obj = hashlib.md5()
# md5_obj.update(f.read())
# hash_code = md5_obj.hexdigest()
# print(str(hash_code).upper() == "A23D0A8564EE84CDA5622A6C2F947785")
import os
import numpy as np
all_label_data = np.zeros((25000, 38), dtype=np.int8)
label_index = -1
label_dir_name = "mirflickr25k_annotations_v080"
for label_file in os.listdir(label_dir_name):
if "README.txt" != label_file:
label_index += 1
with open(os.path.join(label_dir_name, label_file), "r") as f:
for line in f.readlines():
all_label_data[int(line.strip()) - 1][label_index] = 1
train_num = 4000
test_num = 1000
perm_index = np.random.permutation(all_label_data.shape[0])
train_data_index = perm_index[:train_num]
test_data_index = perm_index[train_num:train_num + test_num]
database_data_index = perm_index[train_num + test_num:]
with open("database.txt", "w") as f:
for index in database_data_index:
line = "im" + str(index + 1) + ".jpg " + str(all_label_data[index].tolist())[1:-1].replace(", ", " ") + "\n"
f.write(line)
with open("train.txt", "w") as f:
for index in train_data_index:
line = "im" + str(index + 1) + ".jpg " + str(all_label_data[index].tolist())[1:-1].replace(", ", " ") + "\n"
f.write(line)
with open("test.txt", "w") as f:
for index in test_data_index:
line = "im" + str(index + 1) + ".jpg " + str(all_label_data[index].tolist())[1:-1].replace(", ", " ") + "\n"
f.write(line)
|
[
"numpy.random.permutation",
"numpy.zeros",
"os.listdir",
"os.path.join"
] |
[((359, 395), 'numpy.zeros', 'np.zeros', (['(25000, 38)'], {'dtype': 'np.int8'}), '((25000, 38), dtype=np.int8)\n', (367, 395), True, 'import numpy as np\n'), ((480, 506), 'os.listdir', 'os.listdir', (['label_dir_name'], {}), '(label_dir_name)\n', (490, 506), False, 'import os\n'), ((796, 842), 'numpy.random.permutation', 'np.random.permutation', (['all_label_data.shape[0]'], {}), '(all_label_data.shape[0])\n', (817, 842), True, 'import numpy as np\n'), ((586, 626), 'os.path.join', 'os.path.join', (['label_dir_name', 'label_file'], {}), '(label_dir_name, label_file)\n', (598, 626), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 01:03:24 2021
@author: Mahfuz_Shazol
"""
import numpy as np
X=np.array([
[4,2],
[-5,-3]
])
result =np.linalg.det(X)
print(result)
N=np.array([
[-4,1],
[-8,2]
])
result =np.linalg.det(N)
print(result)
|
[
"numpy.linalg.det",
"numpy.array"
] |
[((115, 143), 'numpy.array', 'np.array', (['[[4, 2], [-5, -3]]'], {}), '([[4, 2], [-5, -3]])\n', (123, 143), True, 'import numpy as np\n'), ((165, 181), 'numpy.linalg.det', 'np.linalg.det', (['X'], {}), '(X)\n', (178, 181), True, 'import numpy as np\n'), ((201, 229), 'numpy.array', 'np.array', (['[[-4, 1], [-8, 2]]'], {}), '([[-4, 1], [-8, 2]])\n', (209, 229), True, 'import numpy as np\n'), ((251, 267), 'numpy.linalg.det', 'np.linalg.det', (['N'], {}), '(N)\n', (264, 267), True, 'import numpy as np\n')]
|
"""
https://circuitdigest.com/microcontroller-projects/license-plate-recognition-using-raspberry-pi-and-opencv
"""
import logging
import typing as t
import imutils
import numpy as np
import pytesseract
from cv2 import cv2
from car_plate_recognizer.handlers.base import BaseHandler, Plate, save_img
logger = logging.getLogger(__name__)
class CircuitDigestHandler(BaseHandler):
def get_plates(self, image: np.ndarray, frame_index: int) -> t.List[Plate]:
plates = []
image = cv2.resize(image, (620, 480)) # move to top level
img_edged, img_gray = preprocess(image)
contours = get_contours(img_edged)
mask = np.zeros(img_gray.shape, np.uint8)
for index, contour in enumerate(contours):
new_image = cv2.drawContours(mask, [contour], 0, 255, -1)
save_img(new_image, f"Circuit{frame_index}-{index}-new_image-1.jpg")
new_image = cv2.bitwise_and(image, image, mask=mask)
save_img(new_image, f"Circuit{frame_index}-{index}-new_image-2.jpg")
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
cropped = img_gray[topx : bottomx + 1, topy : bottomy + 1]
# Read the number plate
text = pytesseract.image_to_string(cropped, config="--psm 11")
print("Detected Number is:", text)
save_img(image, f"Circuit{frame_index}-{index}-origin.jpg")
save_img(cropped, f"Circuit{frame_index}-{index}-cropped.jpg")
return plates
def preprocess(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to grey scale
gray = cv2.bilateralFilter(gray, 11, 17, 17) # Blur to reduce noise
return cv2.Canny(gray, 30, 200), gray # Perform Edge detection
def get_contours(image):
contours = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]
filtered_contours = []
# loop over our contours
for contour in contours:
# approximate the contour
peri = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.018 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
filtered_contours.append(approx)
# save_img(image, f'Circuit-before.jpg')
for index, contour in enumerate(filtered_contours):
cv2.drawContours(image, [contour], -1, (0, 255, 0), 3)
# save_img(image, f'Circuit-after-{index}.jpg')
# save_img(image, f'Circuit-after.jpg')
# if detected == 1:
# cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
print("filtered_contours count", len(filtered_contours))
return filtered_contours
|
[
"cv2.cv2.Canny",
"cv2.cv2.arcLength",
"cv2.cv2.drawContours",
"cv2.cv2.bitwise_and",
"cv2.cv2.bilateralFilter",
"numpy.zeros",
"cv2.cv2.findContours",
"cv2.cv2.resize",
"pytesseract.image_to_string",
"cv2.cv2.approxPolyDP",
"numpy.min",
"numpy.where",
"numpy.max",
"imutils.grab_contours",
"car_plate_recognizer.handlers.base.save_img",
"cv2.cv2.cvtColor",
"logging.getLogger"
] |
[((311, 338), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (328, 338), False, 'import logging\n'), ((1650, 1689), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1662, 1689), False, 'from cv2 import cv2\n'), ((1726, 1763), 'cv2.cv2.bilateralFilter', 'cv2.bilateralFilter', (['gray', '(11)', '(17)', '(17)'], {}), '(gray, 11, 17, 17)\n', (1745, 1763), False, 'from cv2 import cv2\n'), ((1898, 1961), 'cv2.cv2.findContours', 'cv2.findContours', (['image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1914, 1961), False, 'from cv2 import cv2\n'), ((1977, 2008), 'imutils.grab_contours', 'imutils.grab_contours', (['contours'], {}), '(contours)\n', (1998, 2008), False, 'import imutils\n'), ((498, 527), 'cv2.cv2.resize', 'cv2.resize', (['image', '(620, 480)'], {}), '(image, (620, 480))\n', (508, 527), False, 'from cv2 import cv2\n'), ((657, 691), 'numpy.zeros', 'np.zeros', (['img_gray.shape', 'np.uint8'], {}), '(img_gray.shape, np.uint8)\n', (665, 691), True, 'import numpy as np\n'), ((1799, 1823), 'cv2.cv2.Canny', 'cv2.Canny', (['gray', '(30)', '(200)'], {}), '(gray, 30, 200)\n', (1808, 1823), False, 'from cv2 import cv2\n'), ((2216, 2244), 'cv2.cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (2229, 2244), False, 'from cv2 import cv2\n'), ((2262, 2307), 'cv2.cv2.approxPolyDP', 'cv2.approxPolyDP', (['contour', '(0.018 * peri)', '(True)'], {}), '(contour, 0.018 * peri, True)\n', (2278, 2307), False, 'from cv2 import cv2\n'), ((2607, 2661), 'cv2.cv2.drawContours', 'cv2.drawContours', (['image', '[contour]', '(-1)', '(0, 255, 0)', '(3)'], {}), '(image, [contour], -1, (0, 255, 0), 3)\n', (2623, 2661), False, 'from cv2 import cv2\n'), ((767, 812), 'cv2.cv2.drawContours', 'cv2.drawContours', (['mask', '[contour]', '(0)', '(255)', '(-1)'], {}), '(mask, [contour], 0, 255, -1)\n', (783, 812), False, 'from cv2 import cv2\n'), ((825, 893), 'car_plate_recognizer.handlers.base.save_img', 'save_img', (['new_image', 'f"""Circuit{frame_index}-{index}-new_image-1.jpg"""'], {}), "(new_image, f'Circuit{frame_index}-{index}-new_image-1.jpg')\n", (833, 893), False, 'from car_plate_recognizer.handlers.base import BaseHandler, Plate, save_img\n'), ((918, 958), 'cv2.cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (933, 958), False, 'from cv2 import cv2\n'), ((971, 1039), 'car_plate_recognizer.handlers.base.save_img', 'save_img', (['new_image', 'f"""Circuit{frame_index}-{index}-new_image-2.jpg"""'], {}), "(new_image, f'Circuit{frame_index}-{index}-new_image-2.jpg')\n", (979, 1039), False, 'from car_plate_recognizer.handlers.base import BaseHandler, Plate, save_img\n'), ((1085, 1106), 'numpy.where', 'np.where', (['(mask == 255)'], {}), '(mask == 255)\n', (1093, 1106), True, 'import numpy as np\n'), ((1340, 1395), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['cropped'], {'config': '"""--psm 11"""'}), "(cropped, config='--psm 11')\n", (1367, 1395), False, 'import pytesseract\n'), ((1456, 1515), 'car_plate_recognizer.handlers.base.save_img', 'save_img', (['image', 'f"""Circuit{frame_index}-{index}-origin.jpg"""'], {}), "(image, f'Circuit{frame_index}-{index}-origin.jpg')\n", (1464, 1515), False, 'from car_plate_recognizer.handlers.base import BaseHandler, Plate, save_img\n'), ((1528, 1590), 'car_plate_recognizer.handlers.base.save_img', 'save_img', (['cropped', 'f"""Circuit{frame_index}-{index}-cropped.jpg"""'], {}), "(cropped, f'Circuit{frame_index}-{index}-cropped.jpg')\n", (1536, 1590), False, 'from car_plate_recognizer.handlers.base import BaseHandler, Plate, save_img\n'), ((1135, 1144), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1141, 1144), True, 'import numpy as np\n'), ((1146, 1155), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1152, 1155), True, 'import numpy as np\n'), ((1191, 1200), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1197, 1200), True, 'import numpy as np\n'), ((1202, 1211), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1208, 1211), True, 'import numpy as np\n')]
|
"""
<NAME>
<NAME>
March 2021
Final project for Climate Dynamics
presented to Kyle Armour and <NAME>
Ocean 2-layer model
"""
## Import packages ##
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pht = os.path.abspath('/Users/jadesauve/Documents/Python/scripts/2_layer_carbon/')
if pht not in sys.path:
sys.path.append(pht)
from modules import *
#### Parameters ####
directory_out_fig = ''
# model constants
rho = 1025 # density of sea water kg/m3
cw = 3985 # specific heat of sea water J/Kg/C
secondsperyear = 30.25*24*60*60*12
# parameters
gamma = 0.7 # ocean heat uptake efficiency parameter (W/m2/K), strength of coupling between layers
lambda0 = -1.2 #r adiative feedback parameter (W/m2/K)
e = 1.3 # ocean heat uptake efficacy (unitless)
hsfc = 100 # upper ocean depth, meters
hdeep = 1000 # deep ocean depth, meters
# R = 4 #forcing for CO2 doubling, (W/m2)
### uses R from kyle_model - UPDATE
num_years = 300 # length of run in years
###################
# coefficients
csfc = rho * cw * hsfc # upper ocean, units J/m2/C
cdeep = rho * cw * hdeep # deep ocean, units J/m2/C
# define time variable
years = np.arange(num_years)
# define dataframe to hold Tsfc and Tdeep
df = pd.DataFrame(index=years,columns=['Tsfc','Tdeep'])
# this assumes that the initial temperature anomalies for the surface ocean and the deep ocean are 0C
for i in np.arange(0,num_years-1):
df.loc[i+1,'Tsfc'] = df.loc[i,'Tsfc'] + ((lambda0 * df.loc[i,'Tsfc'] + R[i] + gamma*e*(df.loc[i,'Tdeep'] - df.loc[i,'Tsfc']))/csfc * secondsperyear)
df.loc[i+1,'Tdeep'] = df.loc[i,'Tdeep'] + ((gamma * (df.loc[i,'Tsfc'] - df.loc[i,'Tdeep']))/ cdeep * secondsperyear)
toa_imbalance = R + lambda0*df['Tsfc'] - (e-1)*gamma*(df['Tsfc'] - df['Tdeep']) # top of atmosphere radiative imbalance
title = ''
plt.figure()
plt.plot(years, df['Tsfc'], label='Surface Layer')
plt.plot(years, df['Tdeep'],label='Deep Ocean Layer')
plt.xlabel('Years', size = 'xx-large')
plt.ylabel('Temperature Anomaly (K)', size = 'xx-large')
plt.legend(legend)
plt.title(title)
file_out = ''
# plt.savefig(directory_out_fig + file_out,format='eps',dpi=200)
plt.show()
title = ''
plt.figure()
plt.plot(df['Tsfc'], toa_imbalance)
plt.xlabel('Temperature Anomaly (K)', size = 'xx-large')
plt.ylabel('TOA imbalance (W/m2/K)', size = 'xx-large')
plt.title(title)
file_out = ''
# plt.savefig(directory_out_fig + file_out,format='eps',dpi=200)
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.title",
"os.path.abspath",
"sys.path.append",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((249, 325), 'os.path.abspath', 'os.path.abspath', (['"""/Users/jadesauve/Documents/Python/scripts/2_layer_carbon/"""'], {}), "('/Users/jadesauve/Documents/Python/scripts/2_layer_carbon/')\n", (264, 325), False, 'import os\n'), ((1174, 1194), 'numpy.arange', 'np.arange', (['num_years'], {}), '(num_years)\n', (1183, 1194), True, 'import numpy as np\n'), ((1243, 1295), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'years', 'columns': "['Tsfc', 'Tdeep']"}), "(index=years, columns=['Tsfc', 'Tdeep'])\n", (1255, 1295), True, 'import pandas as pd\n'), ((1406, 1433), 'numpy.arange', 'np.arange', (['(0)', '(num_years - 1)'], {}), '(0, num_years - 1)\n', (1415, 1433), True, 'import numpy as np\n'), ((1839, 1851), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1849, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1902), 'matplotlib.pyplot.plot', 'plt.plot', (['years', "df['Tsfc']"], {'label': '"""Surface Layer"""'}), "(years, df['Tsfc'], label='Surface Layer')\n", (1860, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1903, 1957), 'matplotlib.pyplot.plot', 'plt.plot', (['years', "df['Tdeep']"], {'label': '"""Deep Ocean Layer"""'}), "(years, df['Tdeep'], label='Deep Ocean Layer')\n", (1911, 1957), True, 'import matplotlib.pyplot as plt\n'), ((1957, 1993), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Years"""'], {'size': '"""xx-large"""'}), "('Years', size='xx-large')\n", (1967, 1993), True, 'import matplotlib.pyplot as plt\n'), ((1996, 2050), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Temperature Anomaly (K)"""'], {'size': '"""xx-large"""'}), "('Temperature Anomaly (K)', size='xx-large')\n", (2006, 2050), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2071), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {}), '(legend)\n', (2063, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2072, 2088), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2081, 2088), True, 'import matplotlib.pyplot as plt\n'), ((2168, 2178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2176, 2178), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2205), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2203, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2206, 2241), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Tsfc']", 'toa_imbalance'], {}), "(df['Tsfc'], toa_imbalance)\n", (2214, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2296), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature Anomaly (K)"""'], {'size': '"""xx-large"""'}), "('Temperature Anomaly (K)', size='xx-large')\n", (2252, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2299, 2352), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TOA imbalance (W/m2/K)"""'], {'size': '"""xx-large"""'}), "('TOA imbalance (W/m2/K)', size='xx-large')\n", (2309, 2352), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2371), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2364, 2371), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2459, 2461), True, 'import matplotlib.pyplot as plt\n'), ((354, 374), 'sys.path.append', 'sys.path.append', (['pht'], {}), '(pht)\n', (369, 374), False, 'import sys\n')]
|
"""Contains the logic for handling read model corruption invocation"""
from multiprocessing import Process, Queue
import time
import pysam
import numpy as np
from mitty.simulation.sequencing.writefastq import writer, load_qname_sidecar, parse_qname
import logging
logger = logging.getLogger(__name__)
SEED_MAX = (1 << 32) - 1 # Used for seeding rng
__process_stop_code__ = 'SETECASTRONOMY'
def multi_process(read_module, read_model, fastq1_in, fastq1_out, sidecar_in, sidecar_out,
fastq2_in=None, fastq2_out=None, processes=2, seed=7):
"""
:param read_module:
:param read_model:
:param fastq1_in:
:param fastq1_out:
:param sidecar_in:
:param sidecar_out:
:param fastq2_in:
:param fastq2_out:
:param processes:
:param seed:
:return:
"""
long_qname_table = load_qname_sidecar(sidecar_in)
seed_rng = np.random.RandomState(seed)
logger.debug('Starting {} workers'.format(processes))
in_queue, out_queue = Queue(10000), Queue(10000)
p_list = [Process(target=worker,
args=(i, read_module, read_model, long_qname_table, in_queue, out_queue, seed_rng.randint(SEED_MAX)))
for i in range(processes)]
for p in p_list:
p.start()
logger.debug('Starting writer process')
wr = Process(target=writer, args=(fastq1_out, sidecar_out, fastq2_out, out_queue))
wr.start()
t0 = time.time()
# Burn through file
logger.debug('Starting to read FASTQ file')
fastq_l = [pysam.FastxFile(fastq1_in)]
if fastq2_in is not None: fastq_l += [pysam.FastxFile(fastq2_in)]
cnt = 0
for cnt, reads in enumerate(zip(*fastq_l)):
# [(qname, seq, seq) ... ]
in_queue.put((reads[0].name,) + tuple(r.sequence for r in reads))
if cnt % 100000 == 0:
logger.debug('Read {} templates'.format(cnt))
logger.debug('Stopping child processes')
for i in range(processes):
in_queue.put(__process_stop_code__)
for p in p_list:
p.join()
logger.debug('Stopping writer')
out_queue.put(__process_stop_code__)
wr.join()
t1 = time.time()
logger.debug('Processed {} templates in {:0.2f}s ({:0.2f} t/s)'.format(cnt, t1 - t0, cnt/(t1 - t0)))
def worker(worker_id, read_module, read_model, long_qname_table, in_queue, out_queue, seed):
"""Worker grabs templates from the in_queue, passes them through the read corrupter and then back to the
out_queue
:param worker_id: Just an int to help us identify things if needed
:param read_model:
:param long_qname_table: a dict mapping qname index to full qname for qnames > 254 chars in length
:param in_queue:
:param out_queue:
:param seed:
:return:
"""
corrupt_rng = np.random.RandomState(seed)
logger.debug('Starting worker {} ...'.format(worker_id))
cnt, t0 = -1, time.time()
for cnt, template in enumerate(iter(in_queue.get, __process_stop_code__)):
# template - (qname, seq, seq)
ri = parse_qname(template[0], long_qname_table)
# read_module.corrupt_template gets and returns data in the form
# (
# index, - same as the original index from the perfect reads
# sample_name,
# chrom,
# copy,
# (
# (strand, pos, cigar, (v1,v2,...), MD, seq, qual)
# ... [repeated as for as many reads in this template]
# )
# )
out_queue.put(read_module.corrupt_template(
read_model,
(
ri[0].index,
ri[0].sample,
ri[0].chrom,
ri[0].cpy,
((r.strand, r.pos, r.special_cigar or r.cigar, r.v_list, r.md, seq, None) for r, seq in zip(ri, template[1:]))
),
corrupt_rng))
if cnt % 100000 == 0:
t1 = time.time()
logger.debug('Worker {}: Processed {} templates ({} t/s)'.format(worker_id, cnt + 1, (cnt + 1) / (t1 - t0)))
t1 = time.time()
logger.debug('... worker {} processed {} templates ({} t/s)'.format(worker_id, cnt + 1, (cnt + 1) / (t1 - t0)))
|
[
"pysam.FastxFile",
"mitty.simulation.sequencing.writefastq.load_qname_sidecar",
"numpy.random.RandomState",
"time.time",
"mitty.simulation.sequencing.writefastq.parse_qname",
"multiprocessing.Queue",
"multiprocessing.Process",
"logging.getLogger"
] |
[((276, 303), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (293, 303), False, 'import logging\n'), ((811, 841), 'mitty.simulation.sequencing.writefastq.load_qname_sidecar', 'load_qname_sidecar', (['sidecar_in'], {}), '(sidecar_in)\n', (829, 841), False, 'from mitty.simulation.sequencing.writefastq import writer, load_qname_sidecar, parse_qname\n'), ((856, 883), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (877, 883), True, 'import numpy as np\n'), ((1271, 1348), 'multiprocessing.Process', 'Process', ([], {'target': 'writer', 'args': '(fastq1_out, sidecar_out, fastq2_out, out_queue)'}), '(target=writer, args=(fastq1_out, sidecar_out, fastq2_out, out_queue))\n', (1278, 1348), False, 'from multiprocessing import Process, Queue\n'), ((1370, 1381), 'time.time', 'time.time', ([], {}), '()\n', (1379, 1381), False, 'import time\n'), ((2035, 2046), 'time.time', 'time.time', ([], {}), '()\n', (2044, 2046), False, 'import time\n'), ((2643, 2670), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2664, 2670), True, 'import numpy as np\n'), ((3744, 3755), 'time.time', 'time.time', ([], {}), '()\n', (3753, 3755), False, 'import time\n'), ((965, 977), 'multiprocessing.Queue', 'Queue', (['(10000)'], {}), '(10000)\n', (970, 977), False, 'from multiprocessing import Process, Queue\n'), ((979, 991), 'multiprocessing.Queue', 'Queue', (['(10000)'], {}), '(10000)\n', (984, 991), False, 'from multiprocessing import Process, Queue\n'), ((1464, 1490), 'pysam.FastxFile', 'pysam.FastxFile', (['fastq1_in'], {}), '(fastq1_in)\n', (1479, 1490), False, 'import pysam\n'), ((2746, 2757), 'time.time', 'time.time', ([], {}), '()\n', (2755, 2757), False, 'import time\n'), ((2879, 2921), 'mitty.simulation.sequencing.writefastq.parse_qname', 'parse_qname', (['template[0]', 'long_qname_table'], {}), '(template[0], long_qname_table)\n', (2890, 2921), False, 'from mitty.simulation.sequencing.writefastq import writer, load_qname_sidecar, parse_qname\n'), ((1532, 1558), 'pysam.FastxFile', 'pysam.FastxFile', (['fastq2_in'], {}), '(fastq2_in)\n', (1547, 1558), False, 'import pysam\n'), ((3610, 3621), 'time.time', 'time.time', ([], {}), '()\n', (3619, 3621), False, 'import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions to deal sample split test."""
import copy
import numpy as np
from scipy.spatial import cKDTree
from scipy.optimize import curve_fit
from astropy.table import Column, vstack
from . import wlensing
from . import visual
__all__ = ["get_mask_strait_line", "strait_line", "get_mask_rank_split", "sample_split_test",
"dsig_compare_matched_sample"]
def strait_line(x, A, B):
"""Simple strait line model."""
return A * x + B
def get_mask_strait_line(x_arr, y_arr, mask=None, sigma=1.5, return_result=True):
"""Separte the sample using scaling relation."""
if mask is None:
mask = np.isfinite(x_arr)
popt, pcov = curve_fit(strait_line, x_arr[mask], y_arr[mask])
perr = np.sqrt(np.diag(pcov))
slope, inter = popt[0], popt[1]
slope_err, inter_err = perr[0], perr[1]
mask_upp = (
(y_arr > x_arr * slope + inter + sigma * inter_err) & mask
)
mask_low = (
(y_arr < x_arr * slope + inter - sigma * inter_err) & mask
)
if return_result:
result = {"slope": slope, "inter": inter,
"slope_err": slope_err, "inter_err": inter_err,
"sigma": sigma}
return [mask_low, mask_upp], result
return [mask_low, mask_upp]
def get_mask_rank_split(cat, X_col, Y_col, n_bins=5, n_sample=2, X_min=None, X_max=None,
X_bins=None, select=1, return_data=False, mask=None):
"""Split sample into N_sample with fixed distribution in X, but different
rank orders in Y."""
if mask is not None:
data = copy.deepcopy(cat[mask])
else:
data = copy.deepcopy(cat)
if isinstance(X_col, str):
X = data[X_col]
else:
if mask is None:
X = X_col
else:
X = X_col[mask]
data.add_column(Column(data=X, name='X'))
if isinstance(Y_col, str):
data.rename_column(Y_col, 'Y')
else:
if mask is None:
data.add_column(Column(data=Y_col, name='Y'))
else:
data.add_column(Column(data=Y_col[mask], name='Y'))
X_len = len(X)
if X_bins is None:
if X_min is None:
X_min = np.nanmin(X)
if X_max is None:
X_max = np.nanmax(X)
msg = '# Sample size should be much larger than number of bins in X'
assert X_len > (2 * n_bins), msg
X_bins = np.linspace(X_min * 0.95, X_max * 1.05, (n_bins + 1))
else:
n_bins = (len(X_bins) - 1)
# Place holder for sample ID
data.add_column(Column(data=(np.arange(X_len) * 0), name='sample_id'))
data.add_column(Column(data=np.arange(X_len), name='index_ori'))
# Create index array for object in each bin
X_idxbins = np.digitize(X, X_bins, right=True)
bin_list = []
for ii in range(n_bins):
subbin = data[X_idxbins == (ii + 1)]
if isinstance(Y_col, str):
subbin.sort(Y_col)
else:
subbin.sort('Y')
subbin_len = len(subbin)
subbin_size = int(np.ceil(subbin_len / n_sample))
idx_start, idx_end = 0, subbin_size
for jj in range(n_sample):
if idx_end > subbin_len:
idx_end = subbin_len
subbin['sample_id'][idx_start:idx_end] = (jj + 1)
idx_start = idx_end
idx_end += subbin_size
bin_list.append(subbin)
new_data = vstack(bin_list)
new_data.sort('index_ori')
new_data.meta = cat.meta
if n_sample <= select:
raise ValueError("n_sample needs to be larger than select!")
mask_1 = (new_data['sample_id'] <= select)
mask_2 = (new_data['sample_id'] > (n_sample - select))
if return_data:
return [mask_1, mask_2], new_data
return [mask_1, mask_2]
def sample_split_test(cat, x_arr, y_arr, rand, mask=None, n_rand=150000, n_boot=0,
bootstrap=False, sigma=1.5, n_bins=30, n_sample=5, select=2,
n_jk=45, plot=True, rank=True, input_masks=None, **plot_kwargs):
"""Sample split DeltaSigma test."""
if isinstance(x_arr, str) and isinstance(y_arr, str):
x_arr = cat[x_arr]
y_arr = cat[y_arr]
# Using the best-fit scaling relation
if input_masks is None:
mask_list, line_result = get_mask_strait_line(x_arr, y_arr, mask=mask, sigma=sigma)
else:
mask_list, line_result = input_masks, None
if mask is not None:
mask_list = [(m & mask) for m in mask_list]
rank = False
dsig_line = wlensing.batch_dsigma_profiles(
cat, rand, mask_list, n_rand=n_rand, n_boot=n_boot, bootstrap=bootstrap,
n_jk=n_jk, verbose=True, n_jobs=None)
if plot:
_ = visual.show_split_result(
dsig_line, x_arr, y_arr, mask_list, mask=mask, line_result=line_result, **plot_kwargs)
# Rank order splitting
if rank:
mask_list, data_new = get_mask_rank_split(
cat, x_arr, y_arr, n_bins=n_bins, n_sample=n_sample, select=select,
mask=mask, return_data=True)
dsig_rank = wlensing.batch_dsigma_profiles(
data_new, rand, mask_list, n_rand=n_rand, n_boot=n_boot, bootstrap=bootstrap,
n_jk=n_jk, verbose=True, n_jobs=None)
if plot:
_ = visual.show_split_result(
dsig_rank, x_arr[mask], y_arr[mask], mask_list, mask=None, **plot_kwargs)
return dsig_line, dsig_rank
return dsig_line
def dsig_compare_matched_sample(sample, target, col_1, col_2, leaf_size=9, query_size=2,
unique=True):
"""Compare the DSigma profiles of two samples after matching them."""
data_ref = np.c_[
np.asarray(sample[col_1]), np.asarray(sample[col_2])]
data_tar = np.c_[
np.asarray(target[col_1]), np.asarray(target[col_2])]
tree = cKDTree(data_ref, leafsize=leaf_size)
_, index = tree.query(data_tar, k=query_size)
index_match = index.flatten()
if unique:
match = sample[np.unique(index_match)]
else:
match = sample[index_match]
target.add_column(Column(
data=(['target'] * len(target)), name='type'))
match.add_column(Column(
data=(['match'] * len(match)), name='type'))
sample_new = vstack([target, match])
sample_new.meta = sample.meta
return index_match, sample_new
|
[
"copy.deepcopy",
"numpy.ceil",
"numpy.asarray",
"numpy.unique",
"numpy.isfinite",
"numpy.nanmin",
"scipy.optimize.curve_fit",
"astropy.table.vstack",
"numpy.arange",
"numpy.linspace",
"scipy.spatial.cKDTree",
"astropy.table.Column",
"numpy.diag",
"numpy.digitize",
"numpy.nanmax"
] |
[((713, 761), 'scipy.optimize.curve_fit', 'curve_fit', (['strait_line', 'x_arr[mask]', 'y_arr[mask]'], {}), '(strait_line, x_arr[mask], y_arr[mask])\n', (722, 761), False, 'from scipy.optimize import curve_fit\n'), ((2775, 2809), 'numpy.digitize', 'np.digitize', (['X', 'X_bins'], {'right': '(True)'}), '(X, X_bins, right=True)\n', (2786, 2809), True, 'import numpy as np\n'), ((3436, 3452), 'astropy.table.vstack', 'vstack', (['bin_list'], {}), '(bin_list)\n', (3442, 3452), False, 'from astropy.table import Column, vstack\n'), ((5876, 5913), 'scipy.spatial.cKDTree', 'cKDTree', (['data_ref'], {'leafsize': 'leaf_size'}), '(data_ref, leafsize=leaf_size)\n', (5883, 5913), False, 'from scipy.spatial import cKDTree\n'), ((6294, 6317), 'astropy.table.vstack', 'vstack', (['[target, match]'], {}), '([target, match])\n', (6300, 6317), False, 'from astropy.table import Column, vstack\n'), ((676, 694), 'numpy.isfinite', 'np.isfinite', (['x_arr'], {}), '(x_arr)\n', (687, 694), True, 'import numpy as np\n'), ((781, 794), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (788, 794), True, 'import numpy as np\n'), ((1620, 1644), 'copy.deepcopy', 'copy.deepcopy', (['cat[mask]'], {}), '(cat[mask])\n', (1633, 1644), False, 'import copy\n'), ((1670, 1688), 'copy.deepcopy', 'copy.deepcopy', (['cat'], {}), '(cat)\n', (1683, 1688), False, 'import copy\n'), ((2433, 2484), 'numpy.linspace', 'np.linspace', (['(X_min * 0.95)', '(X_max * 1.05)', '(n_bins + 1)'], {}), '(X_min * 0.95, X_max * 1.05, n_bins + 1)\n', (2444, 2484), True, 'import numpy as np\n'), ((1868, 1892), 'astropy.table.Column', 'Column', ([], {'data': 'X', 'name': '"""X"""'}), "(data=X, name='X')\n", (1874, 1892), False, 'from astropy.table import Column, vstack\n'), ((2225, 2237), 'numpy.nanmin', 'np.nanmin', (['X'], {}), '(X)\n', (2234, 2237), True, 'import numpy as np\n'), ((2284, 2296), 'numpy.nanmax', 'np.nanmax', (['X'], {}), '(X)\n', (2293, 2296), True, 'import numpy as np\n'), ((3072, 3102), 'numpy.ceil', 'np.ceil', (['(subbin_len / n_sample)'], {}), '(subbin_len / n_sample)\n', (3079, 3102), True, 'import numpy as np\n'), ((5726, 5751), 'numpy.asarray', 'np.asarray', (['sample[col_1]'], {}), '(sample[col_1])\n', (5736, 5751), True, 'import numpy as np\n'), ((5753, 5778), 'numpy.asarray', 'np.asarray', (['sample[col_2]'], {}), '(sample[col_2])\n', (5763, 5778), True, 'import numpy as np\n'), ((5810, 5835), 'numpy.asarray', 'np.asarray', (['target[col_1]'], {}), '(target[col_1])\n', (5820, 5835), True, 'import numpy as np\n'), ((5837, 5862), 'numpy.asarray', 'np.asarray', (['target[col_2]'], {}), '(target[col_2])\n', (5847, 5862), True, 'import numpy as np\n'), ((6038, 6060), 'numpy.unique', 'np.unique', (['index_match'], {}), '(index_match)\n', (6047, 6060), True, 'import numpy as np\n'), ((2028, 2056), 'astropy.table.Column', 'Column', ([], {'data': 'Y_col', 'name': '"""Y"""'}), "(data=Y_col, name='Y')\n", (2034, 2056), False, 'from astropy.table import Column, vstack\n'), ((2100, 2134), 'astropy.table.Column', 'Column', ([], {'data': 'Y_col[mask]', 'name': '"""Y"""'}), "(data=Y_col[mask], name='Y')\n", (2106, 2134), False, 'from astropy.table import Column, vstack\n'), ((2673, 2689), 'numpy.arange', 'np.arange', (['X_len'], {}), '(X_len)\n', (2682, 2689), True, 'import numpy as np\n'), ((2599, 2615), 'numpy.arange', 'np.arange', (['X_len'], {}), '(X_len)\n', (2608, 2615), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 18:30:44 2019
@author: <NAME>
Edited on Apr 18th 2019
@author: <NAME>
"""
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Input, Dense, LeakyReLU, Dropout
from keras.optimizers import Adam
import numpy as np
import matplotlib.pyplot as plt
# Load data
(X_train, _), (_, _) = mnist.load_data()
# Preprocessing
X_train = X_train.reshape(-1, 784)
X_train = X_train.astype('float32')/255
# Set the dimensions of the noise
z_dim = 100
# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)
# Generator
g = Sequential()
g.add(Dense(256, input_dim = 100))
g.add(LeakyReLU(0.2))
g.add(Dense(512))
g.add(LeakyReLU(0.2))
g.add(Dense(1024))
g.add(LeakyReLU(0.2))
g.add(Dense(784, activation='sigmoid'))
g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
# Discrinimator
d = Sequential()
d.add(Dense(1024, input_dim = 784))
d.add(LeakyReLU(0.2))
d.add(Dropout(0.3))
d.add(Dense(512))
d.add(LeakyReLU(0.2))
d.add(Dropout(0.3))
d.add(Dense(256))
d.add(LeakyReLU(0.2))
d.add(Dropout(0.3))
d.add(Dense(1, activation='sigmoid'))
d.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
# GAN
d.trainable = False
inputs = Input(shape=(z_dim, ))
hidden = g(inputs)
output = d(hidden)
gan = Model(inputs, output)
gan.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
dLoss = []
gLoss = []
# Training
def train(epochs=1, plt_frq=1, BATCH_SIZE=128):
batchCount = int(X_train.shape[0] / BATCH_SIZE)
print('Epochs:', epochs)
print('Batch size:', BATCH_SIZE)
print('Batches per epoch:', batchCount)
for e in (range(1, epochs+1)):
print("Epoch:",e)
for _ in range(batchCount):
# Create a batch by drawing random index numbers from the training set
image_batch = X_train[np.random.randint(0, X_train.shape[0], size=BATCH_SIZE)]
# Create noise vectors for the generator
noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))
# Generate the images from the noise
generated_images = g.predict(noise)
X = np.concatenate((image_batch, generated_images))
# Create labels
y = np.zeros(2*BATCH_SIZE)
y[:BATCH_SIZE] = 1
# Train discriminator on generated images
d.trainable = True
d_loss = d.train_on_batch(X, y)
# Train generator
noise = np.random.normal(0, 1, size=(BATCH_SIZE, z_dim))
y2 = np.ones(BATCH_SIZE)
d.trainable = False
g_loss = gan.train_on_batch(noise, y2)
dLoss.append(d_loss[1])
gLoss.append(g_loss[1])
if e%plt_frq == 0:
generate_image()
def generate_image():
np.random.seed(504)
h = w = 28
num_gen = 25
z = np.random.normal(size=[num_gen, z_dim])
generated_images = g.predict(z)
# plot of generation
n = np.sqrt(num_gen).astype(np.int32)
I_generated = np.empty((h*n, w*n))
for i in range(n):
for j in range(n):
I_generated[i*h:(i+1)*h, j*w:(j+1)*w] = generated_images[i*n+j, :].reshape(28, 28)
plt.figure(figsize=(4, 4))
plt.axis("off")
plt.imshow(I_generated, cmap='gray')
plt.show()
train(400, 20)
# Draw the loss
plt.figure(figsize=(10, 8))
plt.plot(dLoss[0], label='Discriminitive loss')
plt.plot(gLoss[0], label='Generative loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
# serialize model to JSON
model_json = g.to_json()
with open("generator.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
g.save_weights("generator.h5")
print("Saved model to disk")
# Generate images
np.random.seed(504)
h = w = 28
num_gen = 25
z = np.random.normal(size=[num_gen, z_dim])
generated_images = g.predict(z)
# plot of generation
n = np.sqrt(num_gen).astype(np.int32)
I_generated = np.empty((h*n, w*n))
for i in range(n):
for j in range(n):
I_generated[i*h:(i+1)*h, j*w:(j+1)*w] = generated_images[i*n+j, :].reshape(28, 28)
plt.figure(figsize=(4, 4))
plt.axis("off")
plt.imshow(I_generated, cmap='gray')
plt.show()
|
[
"numpy.random.seed",
"numpy.empty",
"numpy.ones",
"keras.models.Model",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.random.normal",
"keras.layers.Input",
"matplotlib.pyplot.imshow",
"keras.layers.LeakyReLU",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"keras.optimizers.Adam",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"keras.datasets.mnist.load_data",
"numpy.zeros",
"matplotlib.pyplot.axis",
"keras.layers.Dense",
"keras.models.Sequential",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((404, 421), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (419, 421), False, 'from keras.datasets import mnist\n'), ((581, 608), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (585, 608), False, 'from keras.optimizers import Adam\n'), ((626, 638), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (636, 638), False, 'from keras.models import Sequential, Model\n'), ((915, 927), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (925, 927), False, 'from keras.models import Sequential, Model\n'), ((1276, 1297), 'keras.layers.Input', 'Input', ([], {'shape': '(z_dim,)'}), '(shape=(z_dim,))\n', (1281, 1297), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1343, 1364), 'keras.models.Model', 'Model', (['inputs', 'output'], {}), '(inputs, output)\n', (1348, 1364), False, 'from keras.models import Sequential, Model\n'), ((3443, 3470), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (3453, 3470), True, 'import matplotlib.pyplot as plt\n'), ((3471, 3518), 'matplotlib.pyplot.plot', 'plt.plot', (['dLoss[0]'], {'label': '"""Discriminitive loss"""'}), "(dLoss[0], label='Discriminitive loss')\n", (3479, 3518), True, 'import matplotlib.pyplot as plt\n'), ((3519, 3562), 'matplotlib.pyplot.plot', 'plt.plot', (['gLoss[0]'], {'label': '"""Generative loss"""'}), "(gLoss[0], label='Generative loss')\n", (3527, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3582), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (3573, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3583, 3601), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3593, 3601), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3614), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3612, 3614), True, 'import matplotlib.pyplot as plt\n'), ((3615, 3625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3623, 3625), True, 'import matplotlib.pyplot as plt\n'), ((3868, 3887), 'numpy.random.seed', 'np.random.seed', (['(504)'], {}), '(504)\n', (3882, 3887), True, 'import numpy as np\n'), ((3917, 3956), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_gen, z_dim]'}), '(size=[num_gen, z_dim])\n', (3933, 3956), True, 'import numpy as np\n'), ((4062, 4086), 'numpy.empty', 'np.empty', (['(h * n, w * n)'], {}), '((h * n, w * n))\n', (4070, 4086), True, 'import numpy as np\n'), ((4217, 4243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (4227, 4243), True, 'import matplotlib.pyplot as plt\n'), ((4244, 4259), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4252, 4259), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4296), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I_generated'], {'cmap': '"""gray"""'}), "(I_generated, cmap='gray')\n", (4270, 4296), True, 'import matplotlib.pyplot as plt\n'), ((4297, 4307), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4305, 4307), True, 'import matplotlib.pyplot as plt\n'), ((645, 670), 'keras.layers.Dense', 'Dense', (['(256)'], {'input_dim': '(100)'}), '(256, input_dim=100)\n', (650, 670), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((680, 694), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (689, 694), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((702, 712), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (707, 712), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((720, 734), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (729, 734), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((742, 753), 'keras.layers.Dense', 'Dense', (['(1024)'], {}), '(1024)\n', (747, 753), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((761, 775), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (770, 775), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((783, 815), 'keras.layers.Dense', 'Dense', (['(784)'], {'activation': '"""sigmoid"""'}), "(784, activation='sigmoid')\n", (788, 815), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((934, 960), 'keras.layers.Dense', 'Dense', (['(1024)'], {'input_dim': '(784)'}), '(1024, input_dim=784)\n', (939, 960), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((970, 984), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (979, 984), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((992, 1004), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (999, 1004), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1012, 1022), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (1017, 1022), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1030, 1044), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1039, 1044), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1052, 1064), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1059, 1064), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1072, 1082), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (1077, 1082), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1090, 1104), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1099, 1104), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1112, 1124), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1119, 1124), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((1132, 1162), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1137, 1162), False, 'from keras.layers import Input, Dense, LeakyReLU, Dropout\n'), ((2900, 2919), 'numpy.random.seed', 'np.random.seed', (['(504)'], {}), '(504)\n', (2914, 2919), True, 'import numpy as np\n'), ((2965, 3004), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[num_gen, z_dim]'}), '(size=[num_gen, z_dim])\n', (2981, 3004), True, 'import numpy as np\n'), ((3126, 3150), 'numpy.empty', 'np.empty', (['(h * n, w * n)'], {}), '((h * n, w * n))\n', (3134, 3150), True, 'import numpy as np\n'), ((3301, 3327), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (3311, 3327), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3347), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3340, 3347), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3388), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I_generated'], {'cmap': '"""gray"""'}), "(I_generated, cmap='gray')\n", (3362, 3388), True, 'import matplotlib.pyplot as plt\n'), ((3393, 3403), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3401, 3403), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4030), 'numpy.sqrt', 'np.sqrt', (['num_gen'], {}), '(num_gen)\n', (4021, 4030), True, 'import numpy as np\n'), ((2039, 2087), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(BATCH_SIZE, z_dim)'}), '(0, 1, size=(BATCH_SIZE, z_dim))\n', (2055, 2087), True, 'import numpy as np\n'), ((2214, 2261), 'numpy.concatenate', 'np.concatenate', (['(image_batch, generated_images)'], {}), '((image_batch, generated_images))\n', (2228, 2261), True, 'import numpy as np\n'), ((2306, 2330), 'numpy.zeros', 'np.zeros', (['(2 * BATCH_SIZE)'], {}), '(2 * BATCH_SIZE)\n', (2314, 2330), True, 'import numpy as np\n'), ((2562, 2610), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(BATCH_SIZE, z_dim)'}), '(0, 1, size=(BATCH_SIZE, z_dim))\n', (2578, 2610), True, 'import numpy as np\n'), ((2628, 2647), 'numpy.ones', 'np.ones', (['BATCH_SIZE'], {}), '(BATCH_SIZE)\n', (2635, 2647), True, 'import numpy as np\n'), ((3074, 3090), 'numpy.sqrt', 'np.sqrt', (['num_gen'], {}), '(num_gen)\n', (3081, 3090), True, 'import numpy as np\n'), ((1909, 1964), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]'], {'size': 'BATCH_SIZE'}), '(0, X_train.shape[0], size=BATCH_SIZE)\n', (1926, 1964), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import math
import argparse
import itertools
import concurrent.futures
import pyproj
import numpy as np
import scipy.ndimage
from PIL import Image
from osgeo import gdal
gdal.UseExceptions()
def num2deg(xtile, ytile, zoom):
n = 2 ** zoom
lat = math.degrees(math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))))
lon = xtile / n * 360 - 180
return (lat, lon)
def deg2num(lat, lon, zoom):
lat_r = math.radians(lat)
n = 2 ** zoom
xtile = ((lon + 180) / 360 * n)
ytile = ((1 - math.log(math.tan(lat_r) + 1/math.cos(lat_r)) / math.pi) / 2 * n)
return (xtile, ytile)
def transform_tile(imgdata, invmatrix, crs,
corner_x, corner_y, size, zoom, dirname, outname):
projrev = pyproj.Transformer.from_crs("EPSG:4326", crs)
newimgdata = np.zeros((imgdata.shape[0], size, size), dtype=float)
def transform(out_coords):
# px -> [corner_xy + 0, corner_xy + 1] -> 4326 -> projrev -> img
tile_x = corner_x + (out_coords[1] / size)
tile_y = corner_y + (out_coords[0] / size)
lat, lon = num2deg(tile_x, tile_y, zoom)
local_xy = projrev.transform(lat, lon)
img_xy = invmatrix.dot(np.array(local_xy + (1,)))[:2]
return tuple(reversed(img_xy))
for b, band in enumerate(imgdata):
newband = scipy.ndimage.geometric_transform(
band.astype(float), transform, (size, size), float,
mode='constant', cval=(255 if b < 3 else 0))
newimgdata[b] = np.clip(newband, 0, 255)
newim = Image.fromarray(np.rollaxis(newimgdata, 0, 3).astype('uint8'))
outpath = os.path.join(dirname, outname.format(x=corner_x, y=corner_y, z=zoom))
os.makedirs(os.path.dirname(outpath), exist_ok=True)
newim.save(outpath)
def split_tile(imgfile, dirname, outname, zoom, size, proj=None, threads=None):
# img --aff--> local --proj--> 4326 -> tile xyz (corners)
# -> 4326 --proj--> 3857 --> tile
img = gdal.Open(imgfile)
crs = proj or img.GetProjection() or "EPSG:3857"
projfwd = pyproj.Transformer.from_crs(crs, "EPSG:4326")
# x' = a*x + b*y + c
# y' = d*x + e*y + f
try:
tfwfile = os.path.splitext(imgfile)[0] + '.tfw'
with open(tfwfile, 'r', encoding='utf-8') as f:
content = tuple(map(float, f.read().strip().split()))
imgmatrix = np.array((
(content[0], content[2], content[4]),
(content[1], content[3], content[5]),
(0, 0, 1)
))
except Exception:
geotrans = img.GetGeoTransform()
# (c, a, b, f, d, e)
imgmatrix = np.array((
(geotrans[1], geotrans[2], geotrans[0]),
(geotrans[4], geotrans[5], geotrans[3]),
(0, 0, 1)
))
invmatrix = np.linalg.inv(imgmatrix)
local_corners = imgmatrix.dot(np.array((
(0, 0, 1),
(img.RasterXSize, 0, 1),
(0, img.RasterYSize, 1),
(img.RasterXSize, img.RasterYSize, 1),
)).T)[:2].T
latlon_corners = np.array(tuple(
projfwd.transform(*row) for row in local_corners))
min_lat, min_lon = np.amin(latlon_corners, axis=0)
max_lat, max_lon = np.amax(latlon_corners, axis=0)
tile_x0, tile_y0 = deg2num(min_lat, min_lon, zoom)
tile_x1, tile_y1 = deg2num(max_lat, max_lon, zoom)
if tile_x0 > tile_x1:
tile_x0, tile_x1 = tile_x1, tile_x0
if tile_y0 > tile_y1:
tile_y0, tile_y1 = tile_y1, tile_y0
corners = tuple(itertools.product(
range(math.floor(tile_x0), math.ceil(tile_x1)),
range(math.floor(tile_y0), math.ceil(tile_y1))))
totalnum = len(corners)
imgdata = img.ReadAsArray()
worker_num = threads or os.cpu_count()
with concurrent.futures.ProcessPoolExecutor(max_workers=worker_num) as exc:
futures = []
for corner_x, corner_y in corners:
futures.append(exc.submit(
transform_tile, imgdata, invmatrix, crs,
corner_x, corner_y, size, zoom, dirname, outname))
for k, future in enumerate(futures):
future.result()
print('Image %d/%d' % (k, totalnum))
def main():
parser = argparse.ArgumentParser(
description="Split a big GeoTIFF image to TMS tiles.",
epilog="-z is required")
parser.add_argument("-z", "--zoom", help="zoom level(s), eg. 15 or 14-17")
parser.add_argument("-n", "--name", default='{z}_{x}_{y}.png', help="image file name format, default {z}_{x}_{y}.png")
parser.add_argument("-s", "--size", type=int, default=256,
help="image size in px, default 256px")
parser.add_argument("-p", "--proj", help="set projection id")
parser.add_argument("-t", "--threads", type=int, help="set thread number")
parser.add_argument("inputfile", help="input GeoTIFF file")
parser.add_argument("outputdir", help="output directory")
args = parser.parse_args()
if not hasattr(args, 'zoom'):
parser.print_help()
return 1
zooms = args.zoom.split('-')
try:
if len(zooms) == 1:
zoomrange = (int(zooms[0]),)
elif len(zooms) == 2:
zoomrange = range(int(zooms[0]), int(zooms[1])+1)
else:
raise ValueError
except (TypeError, ValueError):
parser.print_help()
return 1
for zoom in zoomrange:
split_tile(
args.inputfile, args.outputdir, args.name,
zoom, args.size, args.proj, args.threads
)
return 0
if __name__ == '__main__':
import sys
sys.exit(main())
|
[
"numpy.amin",
"argparse.ArgumentParser",
"numpy.clip",
"math.radians",
"os.path.dirname",
"math.cos",
"numpy.rollaxis",
"math.sinh",
"math.ceil",
"osgeo.gdal.UseExceptions",
"numpy.linalg.inv",
"osgeo.gdal.Open",
"math.tan",
"numpy.zeros",
"math.floor",
"numpy.amax",
"os.cpu_count",
"numpy.array",
"os.path.splitext",
"pyproj.Transformer.from_crs"
] |
[((230, 250), 'osgeo.gdal.UseExceptions', 'gdal.UseExceptions', ([], {}), '()\n', (248, 250), False, 'from osgeo import gdal\n'), ((477, 494), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (489, 494), False, 'import math\n'), ((774, 819), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['"""EPSG:4326"""', 'crs'], {}), "('EPSG:4326', crs)\n", (801, 819), False, 'import pyproj\n'), ((837, 890), 'numpy.zeros', 'np.zeros', (['(imgdata.shape[0], size, size)'], {'dtype': 'float'}), '((imgdata.shape[0], size, size), dtype=float)\n', (845, 890), True, 'import numpy as np\n'), ((1990, 2008), 'osgeo.gdal.Open', 'gdal.Open', (['imgfile'], {}), '(imgfile)\n', (1999, 2008), False, 'from osgeo import gdal\n'), ((2076, 2121), 'pyproj.Transformer.from_crs', 'pyproj.Transformer.from_crs', (['crs', '"""EPSG:4326"""'], {}), "(crs, 'EPSG:4326')\n", (2103, 2121), False, 'import pyproj\n'), ((2821, 2845), 'numpy.linalg.inv', 'np.linalg.inv', (['imgmatrix'], {}), '(imgmatrix)\n', (2834, 2845), True, 'import numpy as np\n'), ((3158, 3189), 'numpy.amin', 'np.amin', (['latlon_corners'], {'axis': '(0)'}), '(latlon_corners, axis=0)\n', (3165, 3189), True, 'import numpy as np\n'), ((3213, 3244), 'numpy.amax', 'np.amax', (['latlon_corners'], {'axis': '(0)'}), '(latlon_corners, axis=0)\n', (3220, 3244), True, 'import numpy as np\n'), ((4207, 4315), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Split a big GeoTIFF image to TMS tiles."""', 'epilog': '"""-z is required"""'}), "(description=\n 'Split a big GeoTIFF image to TMS tiles.', epilog='-z is required')\n", (4230, 4315), False, 'import argparse\n'), ((1532, 1556), 'numpy.clip', 'np.clip', (['newband', '(0)', '(255)'], {}), '(newband, 0, 255)\n', (1539, 1556), True, 'import numpy as np\n'), ((1733, 1757), 'os.path.dirname', 'os.path.dirname', (['outpath'], {}), '(outpath)\n', (1748, 1757), False, 'import os\n'), ((3736, 3750), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (3748, 3750), False, 'import os\n'), ((337, 377), 'math.sinh', 'math.sinh', (['(math.pi * (1 - 2 * ytile / n))'], {}), '(math.pi * (1 - 2 * ytile / n))\n', (346, 377), False, 'import math\n'), ((2383, 2484), 'numpy.array', 'np.array', (['((content[0], content[2], content[4]), (content[1], content[3], content[5]),\n (0, 0, 1))'], {}), '(((content[0], content[2], content[4]), (content[1], content[3],\n content[5]), (0, 0, 1)))\n', (2391, 2484), True, 'import numpy as np\n'), ((2655, 2763), 'numpy.array', 'np.array', (['((geotrans[1], geotrans[2], geotrans[0]), (geotrans[4], geotrans[5],\n geotrans[3]), (0, 0, 1))'], {}), '(((geotrans[1], geotrans[2], geotrans[0]), (geotrans[4], geotrans[5\n ], geotrans[3]), (0, 0, 1)))\n', (2663, 2763), True, 'import numpy as np\n'), ((1224, 1249), 'numpy.array', 'np.array', (['(local_xy + (1,))'], {}), '(local_xy + (1,))\n', (1232, 1249), True, 'import numpy as np\n'), ((1586, 1615), 'numpy.rollaxis', 'np.rollaxis', (['newimgdata', '(0)', '(3)'], {}), '(newimgdata, 0, 3)\n', (1597, 1615), True, 'import numpy as np\n'), ((2199, 2224), 'os.path.splitext', 'os.path.splitext', (['imgfile'], {}), '(imgfile)\n', (2215, 2224), False, 'import os\n'), ((3548, 3567), 'math.floor', 'math.floor', (['tile_x0'], {}), '(tile_x0)\n', (3558, 3567), False, 'import math\n'), ((3569, 3587), 'math.ceil', 'math.ceil', (['tile_x1'], {}), '(tile_x1)\n', (3578, 3587), False, 'import math\n'), ((3604, 3623), 'math.floor', 'math.floor', (['tile_y0'], {}), '(tile_y0)\n', (3614, 3623), False, 'import math\n'), ((3625, 3643), 'math.ceil', 'math.ceil', (['tile_y1'], {}), '(tile_y1)\n', (3634, 3643), False, 'import math\n'), ((2880, 2995), 'numpy.array', 'np.array', (['((0, 0, 1), (img.RasterXSize, 0, 1), (0, img.RasterYSize, 1), (img.\n RasterXSize, img.RasterYSize, 1))'], {}), '(((0, 0, 1), (img.RasterXSize, 0, 1), (0, img.RasterYSize, 1), (img\n .RasterXSize, img.RasterYSize, 1)))\n', (2888, 2995), True, 'import numpy as np\n'), ((576, 591), 'math.tan', 'math.tan', (['lat_r'], {}), '(lat_r)\n', (584, 591), False, 'import math\n'), ((596, 611), 'math.cos', 'math.cos', (['lat_r'], {}), '(lat_r)\n', (604, 611), False, 'import math\n')]
|
"""
Use the EMNIST datasets to test for SGD convergence vs randomization
"""
# ===============================================================================
#
# Imports
#
# ===============================================================================
from __future__ import print_function
import os
import argparse
import keras
from keras.datasets import mnist, fashion_mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.optimizers import RMSprop, adam
import scipy.io
import numpy as np
import pickle
import multiprocessing
# ===============================================================================
#
# Function definitions
#
# ===============================================================================
# Function to load emnist datasets
def load_emnist(data="byclass"):
emnist = scipy.io.loadmat("../../data/emnist-dataset/matlab/emnist-%s.mat" % data)
# load training dataset
x_train = emnist["dataset"][0][0][0][0][0][0]
x_train = x_train.astype(np.float32)
# load training labels
y_train = emnist["dataset"][0][0][0][0][0][1]
num_classes = np.unique(y_train).size
# load test dataset
x_test = emnist["dataset"][0][0][1][0][0][0]
x_test = x_test.astype(np.float32)
# load test labels
y_test = emnist["dataset"][0][0][1][0][0][1]
# normalize
x_train /= 255
x_test /= 255
# reshape using matlab order
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28, order="A")
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28, order="A")
x_train = x_train.reshape(x_train.shape[0], 784)
x_test = x_test.reshape(x_test.shape[0], 784)
# for some reason, letters has y from 1-26, change to 0-25
if data == "letters":
y_train = y_train - 1
y_test = y_test - 1
return (num_classes, (x_train, y_train), (x_test, y_test))
# ===============================================================================
# Function to load the keras mnist dataset
def load_mnist(data="digits"):
if data == "fashion":
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
else:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
num_classes = np.unique(y_train).size
return (num_classes, (x_train, y_train), (x_test, y_test))
# ===============================================================================
# Function to train a feed forward neural network with dropout regularization
def train_model_ff(x_train, y_train, x_test, y_test, shuffle=False):
model = Sequential()
model.add(Dense(512, activation="relu", input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation="softmax"))
model.summary()
model.compile(
loss="categorical_crossentropy", optimizer=adam(), metrics=["accuracy"]
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
shuffle=shuffle,
)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
return history
# ===============================================================================
# Function to train a CNN with dropout regularization
def train_model_cnn(
num_classes, x_train, y_train, x_test, y_test, shuffle=False, batch_size=128
):
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
model = Sequential()
model.add(
Conv2D(
filters=32,
kernel_size=3,
padding="same",
activation="relu",
input_shape=(28, 28, 1),
)
)
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Conv2D(filters=64, kernel_size=3, padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
model.summary()
model.compile(
loss="categorical_crossentropy", optimizer=adam(), metrics=["accuracy"]
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
shuffle=shuffle,
)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
return (model, history)
# ===============================================================================
# Function to do custom batch ordering
def custom_batch_order(x_train, y_train, order="block", batch_size=128):
# Option to just return the dataset as is
if order == "shuffled":
return (x_train, y_train)
# First sort by class
ind = np.lexsort(y_train.T)
y_train_new = y_train[ind]
x_train_new = x_train[ind]
if order == "sorted":
return (x_train_new, y_train_new)
# Custom batching on sorted classes
# Split sorted data into blocks (same size as the mini-batch)
# Shuffle these batches around
# So, every mini-batch iteration will still just see one class, but the next iteration will
# most likely have a different class
batches = np.arange(np.ceil(y_train.shape[0] / batch_size))
np.random.shuffle(batches)
new_idx = np.array(
[np.arange(i * batch_size, (i + 1) * batch_size) for i in batches], dtype=int
).flatten()
new_idx = new_idx[new_idx < y_train.shape[0]]
return (x_train_new[new_idx], y_train_new[new_idx])
# ===============================================================================
def get_results(dataset, save_path, batch_size=128):
# Load data
if dataset == "fashion":
num_classes, (x_train, y_train), (x_test, y_test) = load_mnist("fashion")
else:
num_classes, (x_train, y_train), (x_test, y_test) = load_emnist(dataset)
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
orders = ["shuffled", "block", "sorted"]
histories = {}
for ordering in orders:
# Custom order data for study
x_train_new, y_train_new = custom_batch_order(
x_train, y_train, ordering, batch_size
)
# Finally, train the model, print scores, etc
model, history = train_model_cnn(
num_classes,
x_train_new,
y_train_new,
x_test,
y_test,
ordering == "shuffled",
batch_size,
)
histories[ordering] = history.history
model_string = "%s/%s-%d" % (save_path, dataset, batch_size)
model.save("%s.h5" % model_string)
with open("%s.pkl" % (model_string), "wb") as fw:
pickle.dump(histories, fw)
return histories
# ===============================================================================
#
# Main
#
# ===============================================================================
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser(description="Run classification problems")
parser.add_argument(
"-d",
"--datasets",
nargs="+",
help="""Dataset to run ["fashion", "digits", "letters", "byclass", "bymerge", "balanced", "mnist"]""",
type=str,
default=["fashion"],
)
args = parser.parse_args()
# Deterministic results in this randomization study!
np.random.seed(28)
# Init params
batch_sizes_base = [32, 64, 128, 256, 512, 1024, 2048]
batch_size_dataset = {
"fashion": [32, 64, 128, 256, 512, 1024, 2048, 4096, 5120],
"digits": [32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 20480],
"balanced": [32, 64, 128, 256, 512, 1024, 1536, 2048],
"mnist": [32, 64, 128, 256, 512, 1024, 2048, 4096, 5120],
"letters": [32, 64, 128, 256, 512, 1024, 2048, 4096],
"byclass": batch_sizes_base + [4096],
"bymerge": batch_sizes_base + [4096],
}
epochs = 50
save_path = "batch_size_study"
if not os.path.exists(save_path):
os.makedirs(save_path)
for dataset in args.datasets:
for batch_size in batch_size_dataset[dataset]:
res = get_results(dataset, save_path, batch_size)
|
[
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.arange",
"numpy.unique",
"keras.optimizers.adam",
"keras.layers.Flatten",
"os.path.exists",
"keras.layers.MaxPooling2D",
"numpy.random.shuffle",
"keras.utils.to_categorical",
"numpy.ceil",
"keras.layers.Dropout",
"keras.layers.Conv2D",
"os.makedirs",
"numpy.lexsort",
"keras.datasets.mnist.load_data",
"keras.layers.Dense",
"keras.models.Sequential",
"keras.datasets.fashion_mnist.load_data"
] |
[((2788, 2800), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2798, 2800), False, 'from keras.models import Sequential\n'), ((3871, 3883), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3881, 3883), False, 'from keras.models import Sequential\n'), ((5282, 5303), 'numpy.lexsort', 'np.lexsort', (['y_train.T'], {}), '(y_train.T)\n', (5292, 5303), True, 'import numpy as np\n'), ((5781, 5807), 'numpy.random.shuffle', 'np.random.shuffle', (['batches'], {}), '(batches)\n', (5798, 5807), True, 'import numpy as np\n'), ((6463, 6511), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (6489, 6511), False, 'import keras\n'), ((6525, 6572), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (6551, 6572), False, 'import keras\n'), ((7601, 7667), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run classification problems"""'}), "(description='Run classification problems')\n", (7624, 7667), False, 'import argparse\n'), ((8005, 8023), 'numpy.random.seed', 'np.random.seed', (['(28)'], {}), '(28)\n', (8019, 8023), True, 'import numpy as np\n'), ((1166, 1184), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (1175, 1184), True, 'import numpy as np\n'), ((2142, 2167), 'keras.datasets.fashion_mnist.load_data', 'fashion_mnist.load_data', ([], {}), '()\n', (2165, 2167), False, 'from keras.datasets import mnist, fashion_mnist\n'), ((2225, 2242), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (2240, 2242), False, 'from keras.datasets import mnist, fashion_mnist\n'), ((2458, 2476), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2467, 2476), True, 'import numpy as np\n'), ((2815, 2864), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'input_shape': '(784,)'}), "(512, activation='relu', input_shape=(784,))\n", (2820, 2864), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2880, 2892), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2887, 2892), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2908, 2937), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (2913, 2937), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2953, 2965), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2960, 2965), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2981, 3021), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (2986, 3021), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((3907, 4004), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(28, 28, 1)'}), "(filters=32, kernel_size=3, padding='same', activation='relu',\n input_shape=(28, 28, 1))\n", (3913, 4004), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4092, 4117), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4104, 4117), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4133, 4145), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (4140, 4145), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4161, 4229), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=3, padding='same', activation='relu')\n", (4167, 4229), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4245, 4270), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4257, 4270), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4286, 4298), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (4293, 4298), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4314, 4323), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4321, 4323), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4339, 4368), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (4344, 4368), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4384, 4396), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4391, 4396), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((4412, 4452), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (4417, 4452), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((5737, 5775), 'numpy.ceil', 'np.ceil', (['(y_train.shape[0] / batch_size)'], {}), '(y_train.shape[0] / batch_size)\n', (5744, 5775), True, 'import numpy as np\n'), ((7313, 7339), 'pickle.dump', 'pickle.dump', (['histories', 'fw'], {}), '(histories, fw)\n', (7324, 7339), False, 'import pickle\n'), ((8631, 8656), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (8645, 8656), False, 'import os\n'), ((8666, 8688), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (8677, 8688), False, 'import os\n'), ((3115, 3121), 'keras.optimizers.adam', 'adam', ([], {}), '()\n', (3119, 3121), False, 'from keras.optimizers import RMSprop, adam\n'), ((4546, 4552), 'keras.optimizers.adam', 'adam', ([], {}), '()\n', (4550, 4552), False, 'from keras.optimizers import RMSprop, adam\n'), ((5841, 5888), 'numpy.arange', 'np.arange', (['(i * batch_size)', '((i + 1) * batch_size)'], {}), '(i * batch_size, (i + 1) * batch_size)\n', (5850, 5888), True, 'import numpy as np\n')]
|
import copy
import time
import numpy as np
import open3d
import torch
import torch.nn.functional as F
# from lapsolver import solve_dense
from matplotlib import cm
from open3d import *
from open3d import *
from torch.autograd import Function
from train_open_spline_utils.src.VisUtils import tessalate_points
from train_open_spline_utils.src.curve_utils import DrawSurfs
from train_open_spline_utils.src.eval_utils import to_one_hot
from train_open_spline_utils.src.guard import guard_exp
from train_open_spline_utils.src.segment_utils import to_one_hot, matching_iou, relaxed_iou, \
relaxed_iou_fast
from train_open_spline_utils.src.utils import draw_geometries
from train_open_spline_utils.src.utils import visualize_point_cloud
Vector3dVector, Vector3iVector = utility.Vector3dVector, utility.Vector3iVector
draw_surf = DrawSurfs()
EPS = float(np.finfo(np.float32).eps)
torch.manual_seed(2)
np.random.seed(2)
draw_surf = DrawSurfs()
regular_parameters = draw_surf.regular_parameterization(30, 30)
class LeastSquares:
def __init__(self):
pass
def lstsq(self, A, Y, lamb=0.0):
"""
Differentiable least square
:param A: m x n
:param Y: n x 1
"""
cols = A.shape[1]
if np.isinf(A.data.cpu().numpy()).any():
import ipdb;
ipdb.set_trace()
# Assuming A to be full column rank
if cols == torch.matrix_rank(A):
# Full column rank
q, r = torch.qr(A)
x = torch.inverse(r) @ q.transpose(1, 0) @ Y
else:
# rank(A) < n, do regularized least square.
AtA = A.transpose(1, 0) @ A
# get the smallest lambda that suits our purpose, so that error in
# results minimized.
with torch.no_grad():
lamb = best_lambda(AtA)
A_dash = AtA + lamb * torch.eye(cols, device=A.get_device())
Y_dash = A.transpose(1, 0) @ Y
# if it still doesn't work, just set the lamb to be very high value.
x = self.lstsq(A_dash, Y_dash, 1)
return x
def best_lambda(A):
"""
Takes an under determined system and small lambda value,
and comes up with lambda that makes the matrix A + lambda I
invertible. Assuming A to be square matrix.
"""
lamb = 1e-6
cols = A.shape[0]
for i in range(7):
A_dash = A + lamb * torch.eye(cols, device=A.get_device())
if cols == torch.matrix_rank(A_dash):
# we achieved the required rank
break
else:
# factor by which to increase the lambda. Choosing 10 for performance.
lamb *= 10
return lamb
def up_sample_all(points, normals, weights, cluster_ids, primitives, labels):
"""
Upsamples points based on nearest neighbors.
"""
dist = np.expand_dims(points, 1) - np.expand_dims(points, 0)
dist = np.sum(dist ** 2, 2)
indices = np.argsort(dist, 1)
neighbors = points[indices[:, 0:3]]
centers = np.mean(neighbors, 1)
new_points = np.concatenate([points, centers])
new_normals = np.concatenate([normals, normals])
new_weights = np.concatenate([weights, weights], 1)
new_primitives = np.concatenate([primitives, primitives])
new_cluster_ids = np.concatenate([cluster_ids, cluster_ids])
new_labels = np.concatenate([labels, labels])
return new_points, new_normals, new_weights, new_primitives, new_cluster_ids, new_labels
def up_sample_points(points, times=1):
"""
Upsamples points based on nearest neighbors.
"""
points = points.data.cpu()
batch_size = points.shape[0]
points = points.permute(0, 2, 1)
for t in range(times):
Points = []
for b in range(batch_size):
dist = torch.unsqueeze(points[b], 1) - torch.unsqueeze(points[b], 0)
dist = torch.sum(dist ** 2, 2)
_, indices = torch.topk(dist, k=3, dim=1, largest=False)
neighbors = points[b][indices]
centers = torch.mean(neighbors, 1)
new_points = torch.cat([points[b], centers])
Points.append(new_points)
points = torch.stack(Points, 0)
return points.permute(0, 2, 1).cuda()
def up_sample_points_numpy(points, times=1):
"""
Upsamples points based on nearest neighbors.
Takes two neareast neighbors and finds the centroid
and that becomes the new point.
:param points: N x 3
"""
for t in range(times):
dist = np.expand_dims(points, 1) - np.expand_dims(points, 0)
dist = np.sum(dist ** 2, 2)
indices = np.argsort(dist, 1)
neighbors = points[indices[:, 0:3]]
centers = np.mean(neighbors, 1)
points = np.concatenate([points, centers])
return points
def up_sample_points_torch(points, times=1):
"""
Upsamples points based on nearest neighbors.
Takes two neareast neighbors and finds the centroid
and that becomes the new point.
:param points: N x 3
"""
for t in range(times):
dist = torch.unsqueeze(points, 1) - torch.unsqueeze(points, 0)
dist = torch.sum(dist ** 2, 2)
_, indices = torch.topk(dist, 5, 1, largest=False)
neighbors = points[indices[:, 1:]]
centers = torch.mean(neighbors, 1)
points = torch.cat([points, centers])
return points
def up_sample_points_torch_memory_efficient(points, times=1):
"""
Upsamples points based on nearest neighbors.
Takes two neareast neighbors and finds the centroid
and that becomes the new point.
:param points: N x 3
"""
for t in range(times):
# dist = torch.unsqueeze(points, 1) - torch.unsqueeze(points, 0)
# dist = torch.sum(dist ** 2, 2)
indices = []
N = min(points.shape[0], 100)
for i in range(points.shape[0] // N):
diff_ = torch.sum((torch.unsqueeze(points[i * N:(i + 1) * N], 1) - torch.unsqueeze(points, 0)) ** 2, 2)
_, diff_indices = torch.topk(diff_, 5, 1, largest=False)
indices.append(diff_indices)
indices = torch.cat(indices, 0)
# dist = dist_memory_efficient(points, points)
# _, indices = torch.topk(dist, 5, 1, largest=False)
neighbors = points[indices[:, 0:]]
centers = torch.mean(neighbors, 1)
points = torch.cat([points, centers])
return points
def dist_memory_efficient(p, q):
diff = []
for i in range(p.shape[0]):
diff.append(torch.sum((torch.unsqueeze(p[i:i + 1], 1) - torch.unsqueeze(q, 0)) ** 2, 2).data.cpu().numpy())
diff = np.concantenate(diff, 0)
# diff = torch.sqrt(diff)
return diff
def up_sample_points_in_range(points, weights, a_min, a_max):
N = points.shape[0]
if N > a_max:
L = np.random.choice(np.arange(N), a_max, replace=False)
points = points[L]
weights = weights[L]
return points, weights
else:
while True:
points = up_sample_points_torch(points)
weights = torch.cat([weights, weights], 0)
if points.shape[0] >= a_max:
break
N = points.shape[0]
L = np.random.choice(np.arange(N), a_max, replace=False)
points = points[L]
weights = weights[L]
return points, weights
def up_sample_points_torch_in_range(points, a_min, a_max):
N = points.shape[0]
if N > a_max:
N = points.shape[0]
L = np.random.choice(np.arange(N), a_max, replace=False)
points = points[L]
return points
else:
while True:
points = up_sample_points_torch(points)
if points.shape[0] >= a_max:
break
N = points.shape[0]
L = np.random.choice(np.arange(N), a_max, replace=False)
points = points[L]
return points
def create_grid(input, grid_points, size_u, size_v, thres=0.02):
grid_points = torch.from_numpy(grid_points.astype(np.float32)).cuda()
input = torch.from_numpy(input.astype(np.float32)).cuda()
grid_points = grid_points.reshape((size_u, size_v, 3))
grid_points.permute(2, 0, 1)
grid_points = torch.unsqueeze(grid_points, 0)
filter = np.array([[[0.25, 0.25], [0.25, 0.25]],
[[0, 0], [0, 0]],
[[0.0, 0.0], [0.0, 0.0]]]).astype(np.float32)
filter = np.stack([filter, np.roll(filter, 1, 0), np.roll(filter, 2, 0)])
filter = torch.from_numpy(filter).cuda()
grid_mean_points = F.conv2d(grid_points.permute(0, 3, 1, 2), filter, padding=0)
grid_mean_points = grid_mean_points.permute(0, 2, 3, 1)
grid_mean_points = grid_mean_points.reshape(((size_u - 1) * (size_v - 1), 3))
if True:
# diff = (torch.unsqueeze(grid_mean_points, 1) - torch.unsqueeze(input, 0)) ** 2
diff = []
for i in range(grid_mean_points.shape[0]):
diff.append(torch.sum((torch.unsqueeze(grid_mean_points[i:i + 1], 1) - torch.unsqueeze(input, 0)) ** 2, 2))
diff = torch.cat(diff, 0)
diff = torch.sqrt(diff)
indices = torch.min(diff, 1)[0] < thres
else:
grid_mean_points = grid_mean_points.data.cpu().numpy()
input = input.data.cpu().numpy()
diff = (np.expand_dims(grid_mean_points, 1) - np.expand_dims(input, 0)) ** 2
diff = np.sqrt(np.sum(diff, 2))
indices = np.min(diff, 1) < thres
mask_grid = indices.reshape(((size_u - 1), (size_v - 1)))
return mask_grid, diff, filter, grid_mean_points
def tessalate_points_fast(points, size_u, size_v, mask=None, viz=False):
"""
Given a grid points, this returns a tessalation of the grid using triangle.
Furthermore, if the mask is given those grids are avoided.
"""
def index_to_id(i, j, size_v):
return i * size_v + j
triangles = []
vertices = points
for i in range(0, size_u - 1):
for j in range(0, size_v - 1):
if mask is not None:
if mask[i, j] == 0:
continue
tri = [index_to_id(i, j, size_v), index_to_id(i + 1, j, size_v), index_to_id(i + 1, j + 1, size_v)]
triangles.append(tri)
tri = [index_to_id(i, j, size_v), index_to_id(i + 1, j + 1, size_v), index_to_id(i, j + 1, size_v)]
triangles.append(tri)
new_mesh = geometry.TriangleMesh()
new_mesh.triangles = utility.Vector3iVector(np.array(triangles))
new_mesh.vertices = utility.Vector3dVector(np.stack(vertices, 0))
new_mesh.remove_unreferenced_vertices()
new_mesh.compute_vertex_normals()
if viz:
draw_geometries([new_mesh])
return new_mesh
def weights_normalize(weights, bw):
"""
Assuming that weights contains dot product of embedding of a
points with embedding of cluster center, we want to normalize
these weights to get probabilities. Since the clustering is
gotten by mean shift clustering, we use the same kernel to compute
the probabilities also.
"""
prob = guard_exp(weights / (bw ** 2) / 2)
prob = prob / torch.sum(prob, 0, keepdim=True)
# This is to avoid numerical issues
if weights.shape[0] == 1:
return prob
# This is done to ensure that max probability is 1 at the center.
# this will be helpful for the spline fitting network
prob = prob - torch.min(prob, 1, keepdim=True)[0]
prob = prob / (torch.max(prob, 1, keepdim=True)[0] + EPS)
return prob
def one_hot_normalization(weights):
N, K = weights.shape
weights = np.argmax(weights, 1)
one_hot = to_one_hot(weights, K)
weights = one_hot.float()
return weights
# def SIOU(target, pred_labels):
# """
# First it computes the matching using hungarian matching
# between predicted and groun truth labels.
# Then it computes the iou score, starting from matching pairs
# coming out from hungarian matching solver. Note that
# it is assumed that the iou is only computed over matched pairs.
# That is to say, if any column in the matched pair has zero
# number of points, that pair is not considered.
# """
# labels_one_hot = to_one_hot(target)
# cluster_ids_one_hot = to_one_hot(pred_labels)
# cost = relaxed_iou(torch.unsqueeze(cluster_ids_one_hot, 0).double(), torch.unsqueeze(labels_one_hot, 0).double())
# cost_ = 1.0 - torch.as_tensor(cost)
# cost_ = cost_.data.cpu().numpy()
# matching = []
# for b in range(1):
# rids, cids = solve_dense(cost_[b])
# matching.append([rids, cids])
# s_iou = matching_iou(matching, np.expand_dims(pred_labels, 0), np.expand_dims(target, 0))
# return s_iou
# def match(target, pred_labels):
# labels_one_hot = to_one_hot(target)
# cluster_ids_one_hot = to_one_hot(pred_labels)
# # cost = relaxed_iou(torch.unsqueeze(cluster_ids_one_hot, 0).float(), torch.unsqueeze(labels_one_hot, 0).float())
# # cost_ = 1.0 - torch.as_tensor(cost)
# cost = relaxed_iou_fast(torch.unsqueeze(cluster_ids_one_hot, 0).float(), torch.unsqueeze(labels_one_hot, 0).float())
# # cost_ = 1.0 - torch.as_tensor(cost)
# cost_ = 1.0 - cost.data.cpu().numpy()
# rids, cids = solve_dense(cost_[0])
# unique_target = np.unique(target)
# unique_pred = np.unique(pred_labels)
# return rids, cids, unique_target, unique_pred
def visualize_weighted_points(points, w, normals=None, viz=False):
N = points.shape[0]
colors = cm.get_cmap("seismic")(w)[:, 0:3]
return visualize_point_cloud(points, colors=colors, normals=normals, viz=viz)
def compute_grad_V(U, S, V, grad_V):
N = S.shape[0]
K = svd_grad_K(S)
S = torch.eye(N).cuda(S.get_device()) * S.reshape((N, 1))
inner = K.T * (V.T @ grad_V)
inner = (inner + inner.T) / 2.0
return 2 * U @ S @ inner @ V.T
def svd_grad_K(S):
N = S.shape[0]
s1 = S.view((1, N))
s2 = S.view((N, 1))
diff = s2 - s1
plus = s2 + s1
# TODO Look into it
eps = torch.ones((N, N)) * 10 ** (-6)
eps = eps.cuda(S.get_device())
max_diff = torch.max(torch.abs(diff), eps)
sign_diff = torch.sign(diff)
K_neg = sign_diff * max_diff
# gaurd the matrix inversion
K_neg[torch.arange(N), torch.arange(N)] = 10 ** (-6)
K_neg = 1 / K_neg
K_pos = 1 / plus
ones = torch.ones((N, N)).cuda(S.get_device())
rm_diag = ones - torch.eye(N).cuda(S.get_device())
K = K_neg * K_pos * rm_diag
return K
class CustomSVD(Function):
"""
Costum SVD to deal with the situations when the
singular values are equal. In this case, if dealt
normally the gradient w.r.t to the input goes to inf.
To deal with this situation, we replace the entries of
a K matrix from eq: 13 in https://arxiv.org/pdf/1509.07838.pdf
to high value.
Note: only applicable for the tall and square matrix and doesn't
give correct gradients for fat matrix. Maybe transpose of the
original matrix is requires to deal with this situation. Left for
future work.
"""
@staticmethod
def forward(ctx, input):
# Note: input is matrix of size m x n with m >= n.
# Note: if above assumption is voilated, the gradients
# will be wrong.
try:
U, S, V = torch.svd(input, some=True)
except:
import ipdb;
ipdb.set_trace()
ctx.save_for_backward(U, S, V)
return U, S, V
@staticmethod
def backward(ctx, grad_U, grad_S, grad_V):
U, S, V = ctx.saved_tensors
grad_input = compute_grad_V(U, S, V, grad_V)
return grad_input
customsvd = CustomSVD.apply
def standardize_points(points):
Points = []
stds = []
Rs = []
means = []
batch_size = points.shape[0]
for i in range(batch_size):
point, std, mean, R = standardize_point(points[i])
Points.append(point)
stds.append(std)
means.append(mean)
Rs.append(R)
Points = np.stack(Points, 0)
return Points, stds, means, Rs
def standardize_point(point):
mean = torch.mean(point, 0)[0]
point = point - mean
S, U = pca_numpy(point)
smallest_ev = U[:, np.argmin(S)]
R = rotation_matrix_a_to_b(smallest_ev, np.array([1, 0, 0]))
# axis aligns with x axis.
point = R @ point.T
point = point.T
std = np.abs(np.max(point, 0) - np.min(point, 0))
std = std.reshape((1, 3))
point = point / (std + EPS)
return point, std, mean, R
def standardize_points_torch(points, weights):
Points = []
stds = []
Rs = []
means = []
batch_size = points.shape[0]
for i in range(batch_size):
point, std, mean, R = standardize_point_torch(points[i], weights)
Points.append(point)
stds.append(std)
means.append(mean)
Rs.append(R)
Points = torch.stack(Points, 0)
return Points, stds, means, Rs
def standardize_point_torch(point, weights):
# TODO: not back propagation through rotation matrix and scaling yet.
# Change this 0.8 to 0 to include all points.
higher_indices = weights[:, 0] > 0.8
# some heuristic
if torch.sum(higher_indices) < 400:
if weights.shape[0] >= 7500:
_, higher_indices = torch.topk(weights[:, 0], weights.shape[0] // 4)
else:
_, higher_indices = torch.topk(weights[:, 0], weights.shape[0] // 2)
weighted_points = point[higher_indices] * weights[higher_indices]
# Note: gradients throught means, force the network to produce correct means.
mean = torch.sum(weighted_points, 0) / (torch.sum(weights[higher_indices]) + EPS)
point = point - mean
# take only very confident points to compute PCA direction.
S, U = pca_torch(point[higher_indices])
smallest_ev = U[:, torch.min(S[:, 0], 0)[1]].data.cpu().numpy()
R = rotation_matrix_a_to_b(smallest_ev, np.array([1, 0, 0]))
# axis aligns with x axis.
R = R.astype(np.float32)
R = torch.from_numpy(R).cuda(point.get_device()).detach()
point = R @ torch.transpose(point, 1, 0)
point = torch.transpose(point, 1, 0)
weighted_points = point[higher_indices] * weights[higher_indices]
try:
std = torch.abs(torch.max(weighted_points, 0)[0] - torch.min(weighted_points, 0)[0])
except:
import ipdb;
ipdb.set_trace()
std = std.reshape((1, 3)).detach()
point = point / (std + EPS)
return point, std, mean, R
def rotation_matrix_a_to_b(A, B):
"""
Finds rotation matrix from vector A in 3d to vector B
in 3d.
B = R @ A
"""
cos = np.dot(A, B)
sin = np.linalg.norm(np.cross(B, A))
u = A
v = B - np.dot(A, B) * A
v = v / (np.linalg.norm(v) + EPS)
w = np.cross(B, A)
w = w / (np.linalg.norm(w) + EPS)
F = np.stack([u, v, w], 1)
G = np.array([[cos, -sin, 0],
[sin, cos, 0],
[0, 0, 1]])
try:
R = F @ G @ np.linalg.inv(F)
except:
R = np.eye(3, dtype=np.float32)
return R
def pca_numpy(X):
S, U = np.linalg.eig(X.T @ X)
return S, U
def pca_torch(X):
# TODO 2Change this to do SVD, because it is stable and computationally
# less intensive.
covariance = torch.transpose(X, 1, 0) @ X
S, U = torch.eig(covariance, eigenvectors=True)
return S, U
def reverse_all_transformations(points, means, stds, Rs):
new_points = []
for i in range(len(Rs)):
new_points.append(reverse_all_transformation(points[i], means[i], stds[i], Rs[i]))
new_points = np.stack(new_points, 0)
return new_points
def reverse_all_transformation(point, mean, std, R):
std = std.reshape((1, 3))
new_points_scaled = point * std
new_points_inv_rotation = np.linalg.inv(R) @ new_points_scaled.T
new_points_final = new_points_inv_rotation.T + mean
return new_points_final
def sample_points_from_control_points_(nu, nv, outputs, batch_size, input_size_u=20, input_size_v=20):
batch_size = outputs.shape[0]
grid_size = nu.shape[0]
reconst_points = []
outputs = outputs.reshape((batch_size, input_size_u, input_size_v, 3))
for b in range(batch_size):
point = []
for i in range(3):
# cloning because it is giving error in back ward pass.
point.append(torch.matmul(torch.matmul(nu, outputs[b, :, :, i].clone()), torch.transpose(nv, 1, 0)))
reconst_points.append(torch.stack(point, 2))
reconst_points = torch.stack(reconst_points, 0)
reconst_points = reconst_points.view(batch_size, grid_size ** 2, 3)
return reconst_points
def project_to_plane(points, a, d):
a = a.reshape((3, 1))
a = a / torch.norm(a, 2)
# Project on the same plane but passing through origin
projections = points - ((points @ a).permute(1, 0) * a).permute(1, 0)
# shift the points on the plane back to the original d distance
# from origin
projections = projections + a.transpose(1, 0) * d
return projections
def project_to_point_cloud(points, surface):
"""
project points on to the surface defined by points
"""
diff = (np.expand_dims(points, 1) - np.expand_dims(surface, 0)) ** 2
diff = np.sum(diff, 2)
return surface[np.argmin(diff, 1)]
def bit_mapping_points(input, output_points, thres, size_u, size_v, mesh=None):
if mesh:
pass
else:
mesh = tessalate_points(output_points, size_u, size_v)
vertices = np.array(mesh.vertices)
triangles = np.array(mesh.triangles)
output = np.mean(vertices[triangles], 1)
diff = (np.expand_dims(output, 1) - np.expand_dims(input, 0)) ** 2
diff = np.sqrt(np.sum(diff, 2))
indices = np.min(diff, 1) < thres
mesh = copy.deepcopy(mesh)
t = np.array(mesh.triangles)
mesh.triangles = Vector3iVector(t[indices])
return mesh
def bit_mapping_points_torch(input, output_points, thres, size_u, size_v, mesh=None):
mask, diff, filter, grid_mean_points = create_grid(input, output_points, size_u, size_v, thres=thres)
mesh = tessalate_points_fast(output_points, size_u, size_v, mask=mask)
t3 = time.time()
return mesh
def bit_mapping_points(input, output_points, thres, size_u, size_v, mesh=None):
if mesh:
pass
else:
mesh = tessalate_points(output_points, size_u, size_v)
vertices = np.array(mesh.vertices)
triangles = np.array(mesh.triangles)
output = np.mean(vertices[triangles], 1)
diff = (np.expand_dims(output, 1) - np.expand_dims(input, 0)) ** 2
diff = np.sqrt(np.sum(diff, 2))
indices = np.min(diff, 1) < thres
mesh = copy.deepcopy(mesh)
t = np.array(mesh.triangles)
mesh.triangles = Vector3iVector(t[indices])
return mesh
def bit_map_mesh(mesh, include_indices):
mesh = copy.deepcopy(mesh)
t = np.array(mesh.triangles)
mesh.triangles = Vector3iVector(t[include_indices])
return mesh
def display_inlier_outlier(cloud, ind):
inlier_cloud = cloud.select_down_sample(ind)
outlier_cloud = cloud.select_down_sample(ind, invert=True)
print("Showing outliers (red) and inliers (gray): ")
outlier_cloud.paint_uniform_color([1, 0, 0])
inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])
open3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
def remove_outliers(points, viz=False):
pcd = visualize_point_cloud(points)
cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20,
std_ratio=0.50)
if viz:
display_inlier_outlier(voxel_down_pcd, ind)
return np.array(cl.points)
def visualize_bit_mapping_shape(data_, weights, recon_points, parameters=None, bit_map=True, epsilon=0.05):
# This steps basically gathers trimmed primitives and samples points and normals on trimmed surfaces.
# TODO: better way to do it is to not tesellate but directly find the
# grid point that are occupied.
pred_meshes = []
for index, g in enumerate(data_):
if (recon_points[index] is None):
# corresponds to degenrate cases
continue
if isinstance(recon_points[index], np.ndarray):
if recon_points[index].shape[0] == 0:
continue
points, _, l, _, _, i = g
if not isinstance(points, np.ndarray):
points = points.data.cpu().numpy()
part_points = points
if l in [11]:
# torus
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 2).data.cpu().numpy()
if bit_map:
if epsilon:
e = epsilon
else:
e = 0.03
pred_mesh = bit_mapping_points_torch(part_points, recon_points[index], e, 100, 60)
if l in [0, 9, 6, 7]:
# closed bspline surface
if not isinstance(recon_points[index], np.ndarray):
recon_points_ = recon_points[index].data.cpu().numpy()[0]
else:
recon_points_ = recon_points[index]
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 2).data.cpu().numpy()
try:
pred_mesh = tessalate_points_fast(recon_points_, 31, 30)
except:
import ipdb;
ipdb.set_trace()
if bit_map:
if epsilon:
e = epsilon
else:
e = 0.06
pred_mesh = bit_mapping_points_torch(part_points, recon_points_, e, 31, 30)
elif l in [2, 8]:
# open bspline surface
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 2).data.cpu().numpy()
if not isinstance(recon_points[index], np.ndarray):
recon_points_ = recon_points[index].data.cpu().numpy()[0]
else:
recon_points_ = recon_points[index]
pred_mesh = tessalate_points_fast(recon_points_, 30, 30)
if bit_map:
if epsilon:
e = epsilon
else:
e = 0.06
pred_mesh = bit_mapping_points_torch(part_points, recon_points_, e, 30, 30)
elif l == 1:
# Fit plane
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 3).data.cpu().numpy()
if epsilon:
e = epsilon
else:
e = 0.02
pred_mesh = bit_mapping_points_torch(part_points, recon_points[index], e, 120, 120)
elif l == 3:
# Cone
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 3).data.cpu().numpy()
if epsilon:
e = epsilon
else:
e = 0.03
try:
N = recon_points[index].shape[0] // 51
pred_mesh = bit_mapping_points_torch(part_points, recon_points[index], e, N, 51)
except:
import ipdb;
ipdb.set_trace()
elif l == 4:
# cylinder
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 3).data.cpu().numpy()
if epsilon:
e = epsilon
else:
e = 0.03
pred_mesh = bit_mapping_points_torch(part_points, recon_points[index], e, 200, 60)
elif l == 5:
part_points = up_sample_points_torch_memory_efficient(torch.from_numpy(points).cuda(), 2).data.cpu().numpy()
if epsilon:
e = epsilon
else:
e = 0.03
pred_mesh = bit_mapping_points_torch(part_points, recon_points[index], e, 100, 100)
pred_meshes.append(pred_mesh)
return pred_meshes
|
[
"numpy.random.seed",
"numpy.sum",
"matplotlib.cm.get_cmap",
"numpy.argmax",
"torch.sqrt",
"ipdb.set_trace",
"torch.eye",
"torch.cat",
"numpy.argmin",
"numpy.argsort",
"open3d.visualization.draw_geometries",
"numpy.mean",
"train_open_spline_utils.src.utils.visualize_point_cloud",
"numpy.arange",
"train_open_spline_utils.src.segment_utils.to_one_hot",
"torch.arange",
"numpy.linalg.norm",
"torch.inverse",
"torch.no_grad",
"torch.ones",
"train_open_spline_utils.src.utils.draw_geometries",
"numpy.linalg.eig",
"torch.sign",
"numpy.finfo",
"numpy.max",
"torch.matrix_rank",
"numpy.concantenate",
"torch.qr",
"numpy.stack",
"torch.mean",
"copy.deepcopy",
"torch.topk",
"numpy.roll",
"torch.svd",
"torch.manual_seed",
"torch.norm",
"numpy.cross",
"numpy.min",
"numpy.linalg.inv",
"torch.max",
"torch.unsqueeze",
"numpy.dot",
"train_open_spline_utils.src.curve_utils.DrawSurfs",
"torch.sum",
"train_open_spline_utils.src.VisUtils.tessalate_points",
"numpy.concatenate",
"torch.from_numpy",
"torch.min",
"torch.stack",
"train_open_spline_utils.src.guard.guard_exp",
"numpy.expand_dims",
"time.time",
"numpy.array",
"numpy.eye",
"torch.abs",
"torch.eig",
"torch.transpose"
] |
[((829, 840), 'train_open_spline_utils.src.curve_utils.DrawSurfs', 'DrawSurfs', ([], {}), '()\n', (838, 840), False, 'from train_open_spline_utils.src.curve_utils import DrawSurfs\n'), ((879, 899), 'torch.manual_seed', 'torch.manual_seed', (['(2)'], {}), '(2)\n', (896, 899), False, 'import torch\n'), ((900, 917), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (914, 917), True, 'import numpy as np\n'), ((930, 941), 'train_open_spline_utils.src.curve_utils.DrawSurfs', 'DrawSurfs', ([], {}), '()\n', (939, 941), False, 'from train_open_spline_utils.src.curve_utils import DrawSurfs\n'), ((2908, 2928), 'numpy.sum', 'np.sum', (['(dist ** 2)', '(2)'], {}), '(dist ** 2, 2)\n', (2914, 2928), True, 'import numpy as np\n'), ((2943, 2962), 'numpy.argsort', 'np.argsort', (['dist', '(1)'], {}), '(dist, 1)\n', (2953, 2962), True, 'import numpy as np\n'), ((3017, 3038), 'numpy.mean', 'np.mean', (['neighbors', '(1)'], {}), '(neighbors, 1)\n', (3024, 3038), True, 'import numpy as np\n'), ((3057, 3090), 'numpy.concatenate', 'np.concatenate', (['[points, centers]'], {}), '([points, centers])\n', (3071, 3090), True, 'import numpy as np\n'), ((3109, 3143), 'numpy.concatenate', 'np.concatenate', (['[normals, normals]'], {}), '([normals, normals])\n', (3123, 3143), True, 'import numpy as np\n'), ((3162, 3199), 'numpy.concatenate', 'np.concatenate', (['[weights, weights]', '(1)'], {}), '([weights, weights], 1)\n', (3176, 3199), True, 'import numpy as np\n'), ((3222, 3262), 'numpy.concatenate', 'np.concatenate', (['[primitives, primitives]'], {}), '([primitives, primitives])\n', (3236, 3262), True, 'import numpy as np\n'), ((3285, 3327), 'numpy.concatenate', 'np.concatenate', (['[cluster_ids, cluster_ids]'], {}), '([cluster_ids, cluster_ids])\n', (3299, 3327), True, 'import numpy as np\n'), ((3345, 3377), 'numpy.concatenate', 'np.concatenate', (['[labels, labels]'], {}), '([labels, labels])\n', (3359, 3377), True, 'import numpy as np\n'), ((6584, 6608), 'numpy.concantenate', 'np.concantenate', (['diff', '(0)'], {}), '(diff, 0)\n', (6599, 6608), True, 'import numpy as np\n'), ((8104, 8135), 'torch.unsqueeze', 'torch.unsqueeze', (['grid_points', '(0)'], {}), '(grid_points, 0)\n', (8119, 8135), False, 'import torch\n'), ((10945, 10977), 'train_open_spline_utils.src.guard.guard_exp', 'guard_exp', (['(weights / bw ** 2 / 2)'], {}), '(weights / bw ** 2 / 2)\n', (10954, 10977), False, 'from train_open_spline_utils.src.guard import guard_exp\n'), ((11460, 11481), 'numpy.argmax', 'np.argmax', (['weights', '(1)'], {}), '(weights, 1)\n', (11469, 11481), True, 'import numpy as np\n'), ((11496, 11518), 'train_open_spline_utils.src.segment_utils.to_one_hot', 'to_one_hot', (['weights', 'K'], {}), '(weights, K)\n', (11506, 11518), False, 'from train_open_spline_utils.src.segment_utils import to_one_hot, matching_iou, relaxed_iou, relaxed_iou_fast\n'), ((13429, 13499), 'train_open_spline_utils.src.utils.visualize_point_cloud', 'visualize_point_cloud', (['points'], {'colors': 'colors', 'normals': 'normals', 'viz': 'viz'}), '(points, colors=colors, normals=normals, viz=viz)\n', (13450, 13499), False, 'from train_open_spline_utils.src.utils import visualize_point_cloud\n'), ((14037, 14053), 'torch.sign', 'torch.sign', (['diff'], {}), '(diff)\n', (14047, 14053), False, 'import torch\n'), ((15884, 15903), 'numpy.stack', 'np.stack', (['Points', '(0)'], {}), '(Points, 0)\n', (15892, 15903), True, 'import numpy as np\n'), ((16748, 16770), 'torch.stack', 'torch.stack', (['Points', '(0)'], {}), '(Points, 0)\n', (16759, 16770), False, 'import torch\n'), ((17984, 18012), 'torch.transpose', 'torch.transpose', (['point', '(1)', '(0)'], {}), '(point, 1, 0)\n', (17999, 18012), False, 'import torch\n'), ((18491, 18503), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (18497, 18503), True, 'import numpy as np\n'), ((18630, 18644), 'numpy.cross', 'np.cross', (['B', 'A'], {}), '(B, A)\n', (18638, 18644), True, 'import numpy as np\n'), ((18691, 18713), 'numpy.stack', 'np.stack', (['[u, v, w]', '(1)'], {}), '([u, v, w], 1)\n', (18699, 18713), True, 'import numpy as np\n'), ((18722, 18774), 'numpy.array', 'np.array', (['[[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]]'], {}), '([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])\n', (18730, 18774), True, 'import numpy as np\n'), ((18953, 18975), 'numpy.linalg.eig', 'np.linalg.eig', (['(X.T @ X)'], {}), '(X.T @ X)\n', (18966, 18975), True, 'import numpy as np\n'), ((19167, 19207), 'torch.eig', 'torch.eig', (['covariance'], {'eigenvectors': '(True)'}), '(covariance, eigenvectors=True)\n', (19176, 19207), False, 'import torch\n'), ((19441, 19464), 'numpy.stack', 'np.stack', (['new_points', '(0)'], {}), '(new_points, 0)\n', (19449, 19464), True, 'import numpy as np\n'), ((20360, 20390), 'torch.stack', 'torch.stack', (['reconst_points', '(0)'], {}), '(reconst_points, 0)\n', (20371, 20390), False, 'import torch\n'), ((21081, 21096), 'numpy.sum', 'np.sum', (['diff', '(2)'], {}), '(diff, 2)\n', (21087, 21096), True, 'import numpy as np\n'), ((21332, 21355), 'numpy.array', 'np.array', (['mesh.vertices'], {}), '(mesh.vertices)\n', (21340, 21355), True, 'import numpy as np\n'), ((21372, 21396), 'numpy.array', 'np.array', (['mesh.triangles'], {}), '(mesh.triangles)\n', (21380, 21396), True, 'import numpy as np\n'), ((21410, 21441), 'numpy.mean', 'np.mean', (['vertices[triangles]', '(1)'], {}), '(vertices[triangles], 1)\n', (21417, 21441), True, 'import numpy as np\n'), ((21598, 21617), 'copy.deepcopy', 'copy.deepcopy', (['mesh'], {}), '(mesh)\n', (21611, 21617), False, 'import copy\n'), ((21626, 21650), 'numpy.array', 'np.array', (['mesh.triangles'], {}), '(mesh.triangles)\n', (21634, 21650), True, 'import numpy as np\n'), ((21993, 22004), 'time.time', 'time.time', ([], {}), '()\n', (22002, 22004), False, 'import time\n'), ((22217, 22240), 'numpy.array', 'np.array', (['mesh.vertices'], {}), '(mesh.vertices)\n', (22225, 22240), True, 'import numpy as np\n'), ((22257, 22281), 'numpy.array', 'np.array', (['mesh.triangles'], {}), '(mesh.triangles)\n', (22265, 22281), True, 'import numpy as np\n'), ((22295, 22326), 'numpy.mean', 'np.mean', (['vertices[triangles]', '(1)'], {}), '(vertices[triangles], 1)\n', (22302, 22326), True, 'import numpy as np\n'), ((22483, 22502), 'copy.deepcopy', 'copy.deepcopy', (['mesh'], {}), '(mesh)\n', (22496, 22502), False, 'import copy\n'), ((22511, 22535), 'numpy.array', 'np.array', (['mesh.triangles'], {}), '(mesh.triangles)\n', (22519, 22535), True, 'import numpy as np\n'), ((22654, 22673), 'copy.deepcopy', 'copy.deepcopy', (['mesh'], {}), '(mesh)\n', (22667, 22673), False, 'import copy\n'), ((22682, 22706), 'numpy.array', 'np.array', (['mesh.triangles'], {}), '(mesh.triangles)\n', (22690, 22706), True, 'import numpy as np\n'), ((23098, 23165), 'open3d.visualization.draw_geometries', 'open3d.visualization.draw_geometries', (['[inlier_cloud, outlier_cloud]'], {}), '([inlier_cloud, outlier_cloud])\n', (23134, 23165), False, 'import open3d\n'), ((23218, 23247), 'train_open_spline_utils.src.utils.visualize_point_cloud', 'visualize_point_cloud', (['points'], {}), '(points)\n', (23239, 23247), False, 'from train_open_spline_utils.src.utils import visualize_point_cloud\n'), ((23446, 23465), 'numpy.array', 'np.array', (['cl.points'], {}), '(cl.points)\n', (23454, 23465), True, 'import numpy as np\n'), ((853, 873), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (861, 873), True, 'import numpy as np\n'), ((2843, 2868), 'numpy.expand_dims', 'np.expand_dims', (['points', '(1)'], {}), '(points, 1)\n', (2857, 2868), True, 'import numpy as np\n'), ((2871, 2896), 'numpy.expand_dims', 'np.expand_dims', (['points', '(0)'], {}), '(points, 0)\n', (2885, 2896), True, 'import numpy as np\n'), ((4160, 4182), 'torch.stack', 'torch.stack', (['Points', '(0)'], {}), '(Points, 0)\n', (4171, 4182), False, 'import torch\n'), ((4565, 4585), 'numpy.sum', 'np.sum', (['(dist ** 2)', '(2)'], {}), '(dist ** 2, 2)\n', (4571, 4585), True, 'import numpy as np\n'), ((4604, 4623), 'numpy.argsort', 'np.argsort', (['dist', '(1)'], {}), '(dist, 1)\n', (4614, 4623), True, 'import numpy as np\n'), ((4686, 4707), 'numpy.mean', 'np.mean', (['neighbors', '(1)'], {}), '(neighbors, 1)\n', (4693, 4707), True, 'import numpy as np\n'), ((4725, 4758), 'numpy.concatenate', 'np.concatenate', (['[points, centers]'], {}), '([points, centers])\n', (4739, 4758), True, 'import numpy as np\n'), ((5119, 5142), 'torch.sum', 'torch.sum', (['(dist ** 2)', '(2)'], {}), '(dist ** 2, 2)\n', (5128, 5142), False, 'import torch\n'), ((5164, 5201), 'torch.topk', 'torch.topk', (['dist', '(5)', '(1)'], {'largest': '(False)'}), '(dist, 5, 1, largest=False)\n', (5174, 5201), False, 'import torch\n'), ((5263, 5287), 'torch.mean', 'torch.mean', (['neighbors', '(1)'], {}), '(neighbors, 1)\n', (5273, 5287), False, 'import torch\n'), ((5305, 5333), 'torch.cat', 'torch.cat', (['[points, centers]'], {}), '([points, centers])\n', (5314, 5333), False, 'import torch\n'), ((6088, 6109), 'torch.cat', 'torch.cat', (['indices', '(0)'], {}), '(indices, 0)\n', (6097, 6109), False, 'import torch\n'), ((6287, 6311), 'torch.mean', 'torch.mean', (['neighbors', '(1)'], {}), '(neighbors, 1)\n', (6297, 6311), False, 'import torch\n'), ((6329, 6357), 'torch.cat', 'torch.cat', (['[points, centers]'], {}), '([points, centers])\n', (6338, 6357), False, 'import torch\n'), ((7163, 7175), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (7172, 7175), True, 'import numpy as np\n'), ((7713, 7725), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (7722, 7725), True, 'import numpy as np\n'), ((8956, 8974), 'torch.cat', 'torch.cat', (['diff', '(0)'], {}), '(diff, 0)\n', (8965, 8974), False, 'import torch\n'), ((8990, 9006), 'torch.sqrt', 'torch.sqrt', (['diff'], {}), '(diff)\n', (9000, 9006), False, 'import torch\n'), ((10345, 10364), 'numpy.array', 'np.array', (['triangles'], {}), '(triangles)\n', (10353, 10364), True, 'import numpy as np\n'), ((10413, 10434), 'numpy.stack', 'np.stack', (['vertices', '(0)'], {}), '(vertices, 0)\n', (10421, 10434), True, 'import numpy as np\n'), ((10538, 10565), 'train_open_spline_utils.src.utils.draw_geometries', 'draw_geometries', (['[new_mesh]'], {}), '([new_mesh])\n', (10553, 10565), False, 'from train_open_spline_utils.src.utils import draw_geometries\n'), ((10998, 11030), 'torch.sum', 'torch.sum', (['prob', '(0)'], {'keepdim': '(True)'}), '(prob, 0, keepdim=True)\n', (11007, 11030), False, 'import torch\n'), ((13907, 13925), 'torch.ones', 'torch.ones', (['(N, N)'], {}), '((N, N))\n', (13917, 13925), False, 'import torch\n'), ((13999, 14014), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (14008, 14014), False, 'import torch\n'), ((15982, 16002), 'torch.mean', 'torch.mean', (['point', '(0)'], {}), '(point, 0)\n', (15992, 16002), False, 'import torch\n'), ((16141, 16160), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (16149, 16160), True, 'import numpy as np\n'), ((17047, 17072), 'torch.sum', 'torch.sum', (['higher_indices'], {}), '(higher_indices)\n', (17056, 17072), False, 'import torch\n'), ((17458, 17487), 'torch.sum', 'torch.sum', (['weighted_points', '(0)'], {}), '(weighted_points, 0)\n', (17467, 17487), False, 'import torch\n'), ((17781, 17800), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (17789, 17800), True, 'import numpy as np\n'), ((17943, 17971), 'torch.transpose', 'torch.transpose', (['point', '(1)', '(0)'], {}), '(point, 1, 0)\n', (17958, 17971), False, 'import torch\n'), ((18529, 18543), 'numpy.cross', 'np.cross', (['B', 'A'], {}), '(B, A)\n', (18537, 18543), True, 'import numpy as np\n'), ((19127, 19151), 'torch.transpose', 'torch.transpose', (['X', '(1)', '(0)'], {}), '(X, 1, 0)\n', (19142, 19151), False, 'import torch\n'), ((19638, 19654), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (19651, 19654), True, 'import numpy as np\n'), ((20565, 20581), 'torch.norm', 'torch.norm', (['a', '(2)'], {}), '(a, 2)\n', (20575, 20581), False, 'import torch\n'), ((21116, 21134), 'numpy.argmin', 'np.argmin', (['diff', '(1)'], {}), '(diff, 1)\n', (21125, 21134), True, 'import numpy as np\n'), ((21269, 21316), 'train_open_spline_utils.src.VisUtils.tessalate_points', 'tessalate_points', (['output_points', 'size_u', 'size_v'], {}), '(output_points, size_u, size_v)\n', (21285, 21316), False, 'from train_open_spline_utils.src.VisUtils import tessalate_points\n'), ((21532, 21547), 'numpy.sum', 'np.sum', (['diff', '(2)'], {}), '(diff, 2)\n', (21538, 21547), True, 'import numpy as np\n'), ((21563, 21578), 'numpy.min', 'np.min', (['diff', '(1)'], {}), '(diff, 1)\n', (21569, 21578), True, 'import numpy as np\n'), ((22154, 22201), 'train_open_spline_utils.src.VisUtils.tessalate_points', 'tessalate_points', (['output_points', 'size_u', 'size_v'], {}), '(output_points, size_u, size_v)\n', (22170, 22201), False, 'from train_open_spline_utils.src.VisUtils import tessalate_points\n'), ((22417, 22432), 'numpy.sum', 'np.sum', (['diff', '(2)'], {}), '(diff, 2)\n', (22423, 22432), True, 'import numpy as np\n'), ((22448, 22463), 'numpy.min', 'np.min', (['diff', '(1)'], {}), '(diff, 1)\n', (22454, 22463), True, 'import numpy as np\n'), ((1323, 1339), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (1337, 1339), False, 'import ipdb\n'), ((1404, 1424), 'torch.matrix_rank', 'torch.matrix_rank', (['A'], {}), '(A)\n', (1421, 1424), False, 'import torch\n'), ((1476, 1487), 'torch.qr', 'torch.qr', (['A'], {}), '(A)\n', (1484, 1487), False, 'import torch\n'), ((2462, 2487), 'torch.matrix_rank', 'torch.matrix_rank', (['A_dash'], {}), '(A_dash)\n', (2479, 2487), False, 'import torch\n'), ((3864, 3887), 'torch.sum', 'torch.sum', (['(dist ** 2)', '(2)'], {}), '(dist ** 2, 2)\n', (3873, 3887), False, 'import torch\n'), ((3913, 3956), 'torch.topk', 'torch.topk', (['dist'], {'k': '(3)', 'dim': '(1)', 'largest': '(False)'}), '(dist, k=3, dim=1, largest=False)\n', (3923, 3956), False, 'import torch\n'), ((4022, 4046), 'torch.mean', 'torch.mean', (['neighbors', '(1)'], {}), '(neighbors, 1)\n', (4032, 4046), False, 'import torch\n'), ((4073, 4104), 'torch.cat', 'torch.cat', (['[points[b], centers]'], {}), '([points[b], centers])\n', (4082, 4104), False, 'import torch\n'), ((4496, 4521), 'numpy.expand_dims', 'np.expand_dims', (['points', '(1)'], {}), '(points, 1)\n', (4510, 4521), True, 'import numpy as np\n'), ((4524, 4549), 'numpy.expand_dims', 'np.expand_dims', (['points', '(0)'], {}), '(points, 0)\n', (4538, 4549), True, 'import numpy as np\n'), ((5048, 5074), 'torch.unsqueeze', 'torch.unsqueeze', (['points', '(1)'], {}), '(points, 1)\n', (5063, 5074), False, 'import torch\n'), ((5077, 5103), 'torch.unsqueeze', 'torch.unsqueeze', (['points', '(0)'], {}), '(points, 0)\n', (5092, 5103), False, 'import torch\n'), ((5990, 6028), 'torch.topk', 'torch.topk', (['diff_', '(5)', '(1)'], {'largest': '(False)'}), '(diff_, 5, 1, largest=False)\n', (6000, 6028), False, 'import torch\n'), ((6791, 6803), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (6800, 6803), True, 'import numpy as np\n'), ((7018, 7050), 'torch.cat', 'torch.cat', (['[weights, weights]', '(0)'], {}), '([weights, weights], 0)\n', (7027, 7050), False, 'import torch\n'), ((7434, 7446), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (7443, 7446), True, 'import numpy as np\n'), ((8150, 8238), 'numpy.array', 'np.array', (['[[[0.25, 0.25], [0.25, 0.25]], [[0, 0], [0, 0]], [[0.0, 0.0], [0.0, 0.0]]]'], {}), '([[[0.25, 0.25], [0.25, 0.25]], [[0, 0], [0, 0]], [[0.0, 0.0], [0.0,\n 0.0]]])\n', (8158, 8238), True, 'import numpy as np\n'), ((8331, 8352), 'numpy.roll', 'np.roll', (['filter', '(1)', '(0)'], {}), '(filter, 1, 0)\n', (8338, 8352), True, 'import numpy as np\n'), ((8354, 8375), 'numpy.roll', 'np.roll', (['filter', '(2)', '(0)'], {}), '(filter, 2, 0)\n', (8361, 8375), True, 'import numpy as np\n'), ((8391, 8415), 'torch.from_numpy', 'torch.from_numpy', (['filter'], {}), '(filter)\n', (8407, 8415), False, 'import torch\n'), ((9277, 9292), 'numpy.sum', 'np.sum', (['diff', '(2)'], {}), '(diff, 2)\n', (9283, 9292), True, 'import numpy as np\n'), ((9312, 9327), 'numpy.min', 'np.min', (['diff', '(1)'], {}), '(diff, 1)\n', (9318, 9327), True, 'import numpy as np\n'), ((11269, 11301), 'torch.min', 'torch.min', (['prob', '(1)'], {'keepdim': '(True)'}), '(prob, 1, keepdim=True)\n', (11278, 11301), False, 'import torch\n'), ((13384, 13406), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (13395, 13406), False, 'from matplotlib import cm\n'), ((14132, 14147), 'torch.arange', 'torch.arange', (['N'], {}), '(N)\n', (14144, 14147), False, 'import torch\n'), ((14149, 14164), 'torch.arange', 'torch.arange', (['N'], {}), '(N)\n', (14161, 14164), False, 'import torch\n'), ((14234, 14252), 'torch.ones', 'torch.ones', (['(N, N)'], {}), '((N, N))\n', (14244, 14252), False, 'import torch\n'), ((15180, 15207), 'torch.svd', 'torch.svd', (['input'], {'some': '(True)'}), '(input, some=True)\n', (15189, 15207), False, 'import torch\n'), ((16083, 16095), 'numpy.argmin', 'np.argmin', (['S'], {}), '(S)\n', (16092, 16095), True, 'import numpy as np\n'), ((16255, 16271), 'numpy.max', 'np.max', (['point', '(0)'], {}), '(point, 0)\n', (16261, 16271), True, 'import numpy as np\n'), ((16274, 16290), 'numpy.min', 'np.min', (['point', '(0)'], {}), '(point, 0)\n', (16280, 16290), True, 'import numpy as np\n'), ((17149, 17197), 'torch.topk', 'torch.topk', (['weights[:, 0]', '(weights.shape[0] // 4)'], {}), '(weights[:, 0], weights.shape[0] // 4)\n', (17159, 17197), False, 'import torch\n'), ((17244, 17292), 'torch.topk', 'torch.topk', (['weights[:, 0]', '(weights.shape[0] // 2)'], {}), '(weights[:, 0], weights.shape[0] // 2)\n', (17254, 17292), False, 'import torch\n'), ((17491, 17525), 'torch.sum', 'torch.sum', (['weights[higher_indices]'], {}), '(weights[higher_indices])\n', (17500, 17525), False, 'import torch\n'), ((18227, 18243), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (18241, 18243), False, 'import ipdb\n'), ((18567, 18579), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (18573, 18579), True, 'import numpy as np\n'), ((18597, 18614), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (18611, 18614), True, 'import numpy as np\n'), ((18658, 18675), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {}), '(w)\n', (18672, 18675), True, 'import numpy as np\n'), ((18840, 18856), 'numpy.linalg.inv', 'np.linalg.inv', (['F'], {}), '(F)\n', (18853, 18856), True, 'import numpy as np\n'), ((18881, 18908), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (18887, 18908), True, 'import numpy as np\n'), ((20316, 20337), 'torch.stack', 'torch.stack', (['point', '(2)'], {}), '(point, 2)\n', (20327, 20337), False, 'import torch\n'), ((21009, 21034), 'numpy.expand_dims', 'np.expand_dims', (['points', '(1)'], {}), '(points, 1)\n', (21023, 21034), True, 'import numpy as np\n'), ((21037, 21063), 'numpy.expand_dims', 'np.expand_dims', (['surface', '(0)'], {}), '(surface, 0)\n', (21051, 21063), True, 'import numpy as np\n'), ((21454, 21479), 'numpy.expand_dims', 'np.expand_dims', (['output', '(1)'], {}), '(output, 1)\n', (21468, 21479), True, 'import numpy as np\n'), ((21482, 21506), 'numpy.expand_dims', 'np.expand_dims', (['input', '(0)'], {}), '(input, 0)\n', (21496, 21506), True, 'import numpy as np\n'), ((22339, 22364), 'numpy.expand_dims', 'np.expand_dims', (['output', '(1)'], {}), '(output, 1)\n', (22353, 22364), True, 'import numpy as np\n'), ((22367, 22391), 'numpy.expand_dims', 'np.expand_dims', (['input', '(0)'], {}), '(input, 0)\n', (22381, 22391), True, 'import numpy as np\n'), ((1785, 1800), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1798, 1800), False, 'import torch\n'), ((3783, 3812), 'torch.unsqueeze', 'torch.unsqueeze', (['points[b]', '(1)'], {}), '(points[b], 1)\n', (3798, 3812), False, 'import torch\n'), ((3815, 3844), 'torch.unsqueeze', 'torch.unsqueeze', (['points[b]', '(0)'], {}), '(points[b], 0)\n', (3830, 3844), False, 'import torch\n'), ((9025, 9043), 'torch.min', 'torch.min', (['diff', '(1)'], {}), '(diff, 1)\n', (9034, 9043), False, 'import torch\n'), ((9185, 9220), 'numpy.expand_dims', 'np.expand_dims', (['grid_mean_points', '(1)'], {}), '(grid_mean_points, 1)\n', (9199, 9220), True, 'import numpy as np\n'), ((9223, 9247), 'numpy.expand_dims', 'np.expand_dims', (['input', '(0)'], {}), '(input, 0)\n', (9237, 9247), True, 'import numpy as np\n'), ((11324, 11356), 'torch.max', 'torch.max', (['prob', '(1)'], {'keepdim': '(True)'}), '(prob, 1, keepdim=True)\n', (11333, 11356), False, 'import torch\n'), ((13588, 13600), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (13597, 13600), False, 'import torch\n'), ((14295, 14307), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (14304, 14307), False, 'import torch\n'), ((15261, 15277), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (15275, 15277), False, 'import ipdb\n'), ((1504, 1520), 'torch.inverse', 'torch.inverse', (['r'], {}), '(r)\n', (1517, 1520), False, 'import torch\n'), ((17872, 17891), 'torch.from_numpy', 'torch.from_numpy', (['R'], {}), '(R)\n', (17888, 17891), False, 'import torch\n'), ((18117, 18146), 'torch.max', 'torch.max', (['weighted_points', '(0)'], {}), '(weighted_points, 0)\n', (18126, 18146), False, 'import torch\n'), ((18152, 18181), 'torch.min', 'torch.min', (['weighted_points', '(0)'], {}), '(weighted_points, 0)\n', (18161, 18181), False, 'import torch\n'), ((20258, 20283), 'torch.transpose', 'torch.transpose', (['nv', '(1)', '(0)'], {}), '(nv, 1, 0)\n', (20273, 20283), False, 'import torch\n'), ((25205, 25221), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (25219, 25221), False, 'import ipdb\n'), ((5875, 5920), 'torch.unsqueeze', 'torch.unsqueeze', (['points[i * N:(i + 1) * N]', '(1)'], {}), '(points[i * N:(i + 1) * N], 1)\n', (5890, 5920), False, 'import torch\n'), ((5923, 5949), 'torch.unsqueeze', 'torch.unsqueeze', (['points', '(0)'], {}), '(points, 0)\n', (5938, 5949), False, 'import torch\n'), ((8856, 8901), 'torch.unsqueeze', 'torch.unsqueeze', (['grid_mean_points[i:i + 1]', '(1)'], {}), '(grid_mean_points[i:i + 1], 1)\n', (8871, 8901), False, 'import torch\n'), ((8904, 8929), 'torch.unsqueeze', 'torch.unsqueeze', (['input', '(0)'], {}), '(input, 0)\n', (8919, 8929), False, 'import torch\n'), ((26986, 27002), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (27000, 27002), False, 'import ipdb\n'), ((17691, 17712), 'torch.min', 'torch.min', (['S[:, 0]', '(0)'], {}), '(S[:, 0], 0)\n', (17700, 17712), False, 'import torch\n'), ((6488, 6518), 'torch.unsqueeze', 'torch.unsqueeze', (['p[i:i + 1]', '(1)'], {}), '(p[i:i + 1], 1)\n', (6503, 6518), False, 'import torch\n'), ((6521, 6542), 'torch.unsqueeze', 'torch.unsqueeze', (['q', '(0)'], {}), '(q, 0)\n', (6536, 6542), False, 'import torch\n'), ((24363, 24387), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (24379, 24387), False, 'import torch\n'), ((24995, 25019), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (25011, 25019), False, 'import torch\n'), ((25578, 25602), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (25594, 25602), False, 'import torch\n'), ((26249, 26273), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (26265, 26273), False, 'import torch\n'), ((26602, 26626), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (26618, 26626), False, 'import torch\n'), ((27114, 27138), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (27130, 27138), False, 'import torch\n'), ((27448, 27472), 'torch.from_numpy', 'torch.from_numpy', (['points'], {}), '(points)\n', (27464, 27472), False, 'import torch\n')]
|
# <NAME>
# Mayo, 2020
# <EMAIL>
# Variables aleatorias
# La variable aleatoria es una función, se caracteriza por ser determinista
#
#Datos a partir de la base llamada datos.cvs
#### **************** Algoritmo **************** ####
#******************************************************
# IMPORTANDO PAQUETES
#******************************************************
from __future__ import division
import csv
from pylab import *
import matplotlib.pyplot as plt
from sklearn import *
from sklearn.preprocessing import PolynomialFeatures
from numpy import *
import numpy as np
import math
from collections import OrderedDict
import decimal
import scipy.stats as stats
import pandas as pd
import matplotlib.mlab as mlab
from scipy.stats import norm
from scipy.stats import rayleigh
#******************************************************
# DEFINICIONES
#******************************************************
#### MEDIDAS ARITMETICAS NECESARIAS
def media_arit_lista(lista):
n = len(lista) + 1
sumatoria = 0
for i in lista:
sumatoria = sumatoria + float(i)
return sumatoria/n
def varianza_lista(lista):
n = len(lista) + 1
sum_var = 0
for j in lista:
sum_var = sum_var + (media_arit_lista(lista)-float(j))**2
return sum_var/n
def desvia_estan_lista(lista):
desvi = varianza_lista(lista)**(1/2)
return desvi
def curtosis(lista, media, varianza):
n = len(lista) + 1
funcion = 0
k0 = 0
for k in lista:
funcion = (float(k)- float(media))/float(varianza)
k0 = k0 + float(funcion)**4
kx = k0 - 3
return kx
def intervalo(data):
inter = []
for k in data:
redondeado = round(float(k))
# Rango de 12 a 54
if(12<=redondeado) and (redondeado<=54):
inter.append(float(k))
return inter
# Obteniendo la informacion del archivo .csv
with open("datos.csv", "r") as csv_file:
# Leyendo cada celda y separandola con coma para poder interpretar los datos
csv_reader = csv.reader(csv_file, delimiter=',')
# Se salta la primera linea del CSV
next(csv_reader)
# Arerglo para guardar los datos aleatorios
data = []
# Recorre todas las filas dentro del archivo cvs
for filas_completas_data in csv_reader:
# Se guardan los datos en un arreglo
data.append(float(filas_completas_data[0]))
#*******************************************************************
### CALCULO DE LOS 4 PRIMEROS MOMENTOS
#*******************************************************************
# Permiten resumir el comportamiento de las variables de manera general
# Se requiere de la media aritmetica para calcular los momentos de
# Rayleigh, entonces:
mu, sigma = stats.rayleigh.fit(data)
# print(mu)
# print(sigma)
desv = stats.rayleigh (mu, sigma)
media_pandas, var_pandas, inclinacion_pandas, curtosis_pandas = desv.stats(moments='mvsk')
print("La media obtenida desde pandas es de: ", media_pandas)
print("La varianza obtenida desde pandas es de: ", var_pandas)
print("El sesgo obtenido desde pandas es de: ", inclinacion_pandas)
print("La curtosis obtenida desde pandas es de: ", curtosis_pandas)
# # Los 4 momentos en funciones creadas se describen como:
# 1. Varianza medida de la dispersión de la función al rededor del promedio
media = media_arit_lista(data)
print("\n\n\n\nLa media de la definición creada: ", media)
varianza = varianza_lista(data)
print("La varianza de la definición creada: ",varianza)
# # 2. Desviación estándar es el cuadrado de la varianza
desviacion_std = desvia_estan_lista(data)
print("El valor de la desviación estándar de la definición creada: ", desviacion_std)
# # 3. Sesgo (Inclinación) refiere a la tendencia que tiene una descripcion completa
# # de la variable aleatoria. Si es cero la pdf es simetrica, negativa tiende a la izquierda
# # y positivo tiende a la derecha
funcion = 0
sx = 0
for k in data:
funcion = (float(k)- float(media))/float(varianza)
f4 = float(funcion)*float(funcion)*float(funcion)
sx = sx + float(f4)
print("El sesgo de la definición creada: ", sx)
# # 4. Curtosis (Medidad de abultamiento)
# # Se denomina como kx, si kx < 0 es achatada es decir como si tanta cima
# # Si k>0 es prominente como si fuese una montaña, sería empinada
curtosis_resp = curtosis(data, media, varianza)
print("Curtosis de la definición creada: ", curtosis_resp)
#******************************************************
### HISTOGRAMA de DATA
#******************************************************
##Ahora con la información almacenada en el arreglo "data"
###se procede a crear el histograma
histograma = plt.hist(data,30, density=True)
plt.savefig('Histograma de data ')
# # Limpia el area de graficacion
plt.cla()
#******************************************************
### CURVA DE MEJOR AJUSTE A LOS DATOS
#******************************************************
N = len(data) +1
escala = media_pandas/np.sqrt(np.pi/2)
V_norm_hist = escala * np.sqrt( -2* np.log (np.random.uniform(0, 1, N)))
fig, ax = plt.subplots(1, 1)
num_bins = 30
_binvalues, bins, _patches = plt.hist(V_norm_hist, bins=num_bins, density=False, rwidth=1, ec='blue', label='Histograma')
x = np.linspace(bins[0], bins[-1], 120)
binwidth = (bins[-1] - bins[0]) / num_bins
escala = V_norm_hist.mean()/np.sqrt(np.pi/2)
plt.plot(x, rayleigh(loc=0, scale=escala).pdf(x)*len(V_norm_hist)*binwidth, lw=5, alpha=0.6, label='Rayleigh pdf)')
plt.legend()
plt.savefig('Curva de mejor ajuste')
# # Limpia el area de graficacion
plt.cla()
#*******************************************************************
### ENCONTRAR LA PROBABILIDAD EN EL INTERVALO [a,b]
### Y CONTRARESTARLO CON LA FRECUENCIA RELATIVA
#*******************************************************************
# Como mi carnet es B61254, de acuerdo a las instrucciones se toma como
# a = 12
# b = 54
#### Calculando el intervalo y la probabilidad de esta
# Se ordena la función de menor a mayor para acotar este intervalo y calcular su probabilidad
# Guardo los valores entre 12 y 54
numeros1254 = 0
intervalo1254 = intervalo(data)
# print(intervalo1254)
for i in range(N):
# print(i)
for j in intervalo1254:
numeros1254 = numeros1254 + j
# Ya tengo los datos que pertenecen dentro del intervalo1254
# # Para la frecuencia relativa
# # Lo que se hace es dividir estos valores entre la cantidad total de datos
# # Esta cantidad estaba almacenada en la variable N
frecuencia_relativa1254 = numeros1254/N
print("\n \nLa frecuencia relativa perteneciente al intervalo [12, 54] es de: ", frecuencia_relativa1254)
# # # Calculando la probabilidad
probabilidad = desv.cdf(54) - desv.cdf(12)
print( 'La probibilidad obtenida mediante las funciones importadas para este rango es de: ', probabilidad)
#*******************************************************************
### TRANSFORMACIONES
#*******************************************************************
## Se considera que los datos del arhicvo csv son X
## Se debe realizar Y = sqrt(X) que es la transformacion.
## Porteriormente se grafica su histrograma
# Entonces para todos los valores en la data
sqrt_x = 0
Y_trasnfor= []
for values in data:
sqrt_x = float(values)**(1/2)
Y_trasnfor.append(sqrt_x)
#******************************************************
### HISTOGRAMA de TRANSFORMACION
#******************************************************
##Ahora con la información almacenada en el arreglo "data"
###se procede a crear el histograma
histograma = plt.hist(Y_trasnfor,30)
plt.savefig('Histograma transformacion')
# # Limpia el area de graficacion
plt.cla()
#******************************************************
# ### FUNCION DE DENSIDAD de la TRANSFORMACION
# #******************************************************
media_y = media_arit_lista(Y_trasnfor)
print("La media de la transformación: ", media_y)
desviacion_y = desvia_estan_lista(Y_trasnfor)
print("La desviación de la transformación", desviacion_y)
distribucion_y = norm(loc=media_y, scale=desviacion_y)
x_transf = np.linspace(distribucion_y.ppf(0.001),distribucion_y.ppf(0.999), 120)
plt.hist(Y_trasnfor,density=True, label='Transformacion')
plt.plot(x_transf, distribucion_y.pdf(x_transf), 'r-', label='PDF teorico')
plt.legend()
plt.savefig('Funcion de densidad de la transformacion')
|
[
"numpy.random.uniform",
"scipy.stats.norm",
"csv.reader",
"scipy.stats.rayleigh.fit",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.legend",
"scipy.stats.rayleigh",
"matplotlib.pyplot.cla",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((1972, 2007), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1982, 2007), False, 'import csv\n'), ((2664, 2688), 'scipy.stats.rayleigh.fit', 'stats.rayleigh.fit', (['data'], {}), '(data)\n', (2682, 2688), True, 'import scipy.stats as stats\n'), ((2726, 2751), 'scipy.stats.rayleigh', 'stats.rayleigh', (['mu', 'sigma'], {}), '(mu, sigma)\n', (2740, 2751), True, 'import scipy.stats as stats\n'), ((4747, 4779), 'matplotlib.pyplot.hist', 'plt.hist', (['data', '(30)'], {'density': '(True)'}), '(data, 30, density=True)\n', (4755, 4779), True, 'import matplotlib.pyplot as plt\n'), ((4780, 4814), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Histograma de data """'], {}), "('Histograma de data ')\n", (4791, 4814), True, 'import matplotlib.pyplot as plt\n'), ((4869, 4878), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4876, 4878), True, 'import matplotlib.pyplot as plt\n'), ((5196, 5214), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (5208, 5214), True, 'import matplotlib.pyplot as plt\n'), ((5260, 5356), 'matplotlib.pyplot.hist', 'plt.hist', (['V_norm_hist'], {'bins': 'num_bins', 'density': '(False)', 'rwidth': '(1)', 'ec': '"""blue"""', 'label': '"""Histograma"""'}), "(V_norm_hist, bins=num_bins, density=False, rwidth=1, ec='blue',\n label='Histograma')\n", (5268, 5356), True, 'import matplotlib.pyplot as plt\n'), ((5358, 5393), 'numpy.linspace', 'np.linspace', (['bins[0]', 'bins[-1]', '(120)'], {}), '(bins[0], bins[-1], 120)\n', (5369, 5393), True, 'import numpy as np\n'), ((5602, 5614), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5612, 5614), True, 'import matplotlib.pyplot as plt\n'), ((5616, 5652), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Curva de mejor ajuste"""'], {}), "('Curva de mejor ajuste')\n", (5627, 5652), True, 'import matplotlib.pyplot as plt\n'), ((5707, 5716), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (5714, 5716), True, 'import matplotlib.pyplot as plt\n'), ((7754, 7778), 'matplotlib.pyplot.hist', 'plt.hist', (['Y_trasnfor', '(30)'], {}), '(Y_trasnfor, 30)\n', (7762, 7778), True, 'import matplotlib.pyplot as plt\n'), ((7779, 7819), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Histograma transformacion"""'], {}), "('Histograma transformacion')\n", (7790, 7819), True, 'import matplotlib.pyplot as plt\n'), ((7874, 7883), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (7881, 7883), True, 'import matplotlib.pyplot as plt\n'), ((8283, 8320), 'scipy.stats.norm', 'norm', ([], {'loc': 'media_y', 'scale': 'desviacion_y'}), '(loc=media_y, scale=desviacion_y)\n', (8287, 8320), False, 'from scipy.stats import norm\n'), ((8404, 8462), 'matplotlib.pyplot.hist', 'plt.hist', (['Y_trasnfor'], {'density': '(True)', 'label': '"""Transformacion"""'}), "(Y_trasnfor, density=True, label='Transformacion')\n", (8412, 8462), True, 'import matplotlib.pyplot as plt\n'), ((8540, 8552), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8550, 8552), True, 'import matplotlib.pyplot as plt\n'), ((8554, 8609), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Funcion de densidad de la transformacion"""'], {}), "('Funcion de densidad de la transformacion')\n", (8565, 8609), True, 'import matplotlib.pyplot as plt\n'), ((5094, 5112), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (5101, 5112), True, 'import numpy as np\n'), ((5467, 5485), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (5474, 5485), True, 'import numpy as np\n'), ((5156, 5182), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (5173, 5182), True, 'import numpy as np\n'), ((5497, 5526), 'scipy.stats.rayleigh', 'rayleigh', ([], {'loc': '(0)', 'scale': 'escala'}), '(loc=0, scale=escala)\n', (5505, 5526), False, 'from scipy.stats import rayleigh\n')]
|
import matplotlib.pyplot as plt
import numpy as np
# import numpy.linalg as la
from kernels import eval_sp_dp_QBX, sommerfeld
plt.gca().set_aspect("equal")
k = 10
alpha = k # CFIE parameter
beta = 0
interval = 10
xs = 0
ys = 5
sp, dp, _, _, _, _, _, _ = eval_sp_dp_QBX(4, k)
som_sp, _ = sommerfeld(k, beta, interval, "full")
tg_size = 101
x = np.linspace(-5, 5, tg_size)
y = np.linspace(0, 10, tg_size)
X, Y = np.meshgrid(x, y)
test_targets = np.array((X.reshape(-1), Y.reshape(-1)))
exact_test = sp(test_targets[0], test_targets[1], xs, ys).reshape(tg_size, -1)
som_test = som_sp(test_targets[0], test_targets[1], xs, ys).reshape(tg_size, -1)
v = np.linspace(-0.15, 0.15, 100, endpoint=True)
# plt.contourf(X, Y, np.real(exact_test), v,
# cmap='magma', extend='both')
plt.figure(1)
plt.contourf(X, Y, np.real(exact_test), v, cmap="twilight")
plt.colorbar()
plt.figure(2)
plt.contourf(X, Y, np.real(exact_test + som_test), v, cmap="twilight")
plt.colorbar()
plt.show()
|
[
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"kernels.eval_sp_dp_QBX",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"kernels.sommerfeld",
"numpy.linspace",
"numpy.real"
] |
[((259, 279), 'kernels.eval_sp_dp_QBX', 'eval_sp_dp_QBX', (['(4)', 'k'], {}), '(4, k)\n', (273, 279), False, 'from kernels import eval_sp_dp_QBX, sommerfeld\n'), ((292, 329), 'kernels.sommerfeld', 'sommerfeld', (['k', 'beta', 'interval', '"""full"""'], {}), "(k, beta, interval, 'full')\n", (302, 329), False, 'from kernels import eval_sp_dp_QBX, sommerfeld\n'), ((349, 376), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', 'tg_size'], {}), '(-5, 5, tg_size)\n', (360, 376), True, 'import numpy as np\n'), ((381, 408), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', 'tg_size'], {}), '(0, 10, tg_size)\n', (392, 408), True, 'import numpy as np\n'), ((416, 433), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (427, 433), True, 'import numpy as np\n'), ((654, 698), 'numpy.linspace', 'np.linspace', (['(-0.15)', '(0.15)', '(100)'], {'endpoint': '(True)'}), '(-0.15, 0.15, 100, endpoint=True)\n', (665, 698), True, 'import numpy as np\n'), ((788, 801), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (798, 801), True, 'import matplotlib.pyplot as plt\n'), ((862, 876), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (874, 876), True, 'import matplotlib.pyplot as plt\n'), ((877, 890), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (887, 890), True, 'import matplotlib.pyplot as plt\n'), ((962, 976), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (974, 976), True, 'import matplotlib.pyplot as plt\n'), ((977, 987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (985, 987), True, 'import matplotlib.pyplot as plt\n'), ((821, 840), 'numpy.real', 'np.real', (['exact_test'], {}), '(exact_test)\n', (828, 840), True, 'import numpy as np\n'), ((910, 940), 'numpy.real', 'np.real', (['(exact_test + som_test)'], {}), '(exact_test + som_test)\n', (917, 940), True, 'import numpy as np\n'), ((128, 137), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (135, 137), True, 'import matplotlib.pyplot as plt\n')]
|
"""Authors: <NAME> and <NAME>."""
from nwb_conversion_tools.basedatainterface import BaseDataInterface
from pynwb import NWBFile
import os
import warnings
from lxml import etree as et
import numpy as np
from ..utils.neuroscope import read_lfp, write_lfp, write_spike_waveforms
class GrosmarkLFPInterface(BaseDataInterface):
"""Primary data interface for LFP aspects of the GrosmarkAD dataset."""
@classmethod
def get_input_schema(cls):
"""Return subset of json schema for informing the NWBConverter of expepcted input arguments."""
return dict(properties=dict(folder_path="string"))
def convert_data(self, nwbfile: NWBFile, metadata: dict, stub_test: bool = False):
"""Convert the LFP portion of a particular session of the GrosmarkAD dataset."""
session_path = self.input_args["folder_path"]
subject_path, session_id = os.path.split(session_path)
if "_" in session_id:
subject_id, date_text = session_id.split("_")
xml_filepath = os.path.join(session_path, "{}.xml".format(session_id))
root = et.parse(xml_filepath).getroot()
shank_channels = [
[int(channel.text) for channel in group.find("channels")]
for group in root.find("spikeDetection").find("channelGroups").findall("group")
]
all_shank_channels = np.concatenate(shank_channels)
all_shank_channels.sort()
lfp_sampling_rate = float(root.find("fieldPotentials").find("lfpSamplingRate").text)
spikes_nsamples = int(root.find("neuroscope").find("spikes").find("nSamples").text)
subject_path, session_id = os.path.split(session_path)
_, all_channels_lfp_data = read_lfp(session_path, stub=stub_test)
try:
lfp_data = all_channels_lfp_data[:, all_shank_channels]
except IndexError:
warnings.warn("Encountered indexing issue for all_shank_channels on lfp_data subsetting; using entire lfp!")
lfp_data = all_channels_lfp_data
write_lfp(
nwbfile,
lfp_data,
lfp_sampling_rate,
name="lfp",
description="lfp signal for all shank electrodes",
electrode_inds=None,
)
write_spike_waveforms(
nwbfile,
session_path,
spikes_nsamples=spikes_nsamples,
shank_channels=shank_channels,
stub_test=stub_test,
)
|
[
"lxml.etree.parse",
"warnings.warn",
"os.path.split",
"numpy.concatenate"
] |
[((881, 908), 'os.path.split', 'os.path.split', (['session_path'], {}), '(session_path)\n', (894, 908), False, 'import os\n'), ((1354, 1384), 'numpy.concatenate', 'np.concatenate', (['shank_channels'], {}), '(shank_channels)\n', (1368, 1384), True, 'import numpy as np\n'), ((1640, 1667), 'os.path.split', 'os.path.split', (['session_path'], {}), '(session_path)\n', (1653, 1667), False, 'import os\n'), ((1092, 1114), 'lxml.etree.parse', 'et.parse', (['xml_filepath'], {}), '(xml_filepath)\n', (1100, 1114), True, 'from lxml import etree as et\n'), ((1863, 1981), 'warnings.warn', 'warnings.warn', (['"""Encountered indexing issue for all_shank_channels on lfp_data subsetting; using entire lfp!"""'], {}), "(\n 'Encountered indexing issue for all_shank_channels on lfp_data subsetting; using entire lfp!'\n )\n", (1876, 1981), False, 'import warnings\n')]
|
from decimal import Decimal
import math
import numpy as np
import pyproj
WGS84_LATLON_EPSG = 4326
# There's significant overhead in pyproj when building a Transformer object.
# Without a cache a Transformer can be built many times per request, even for
# the same CRS.
_TRANSFORMER_CACHE = {}
def reproject_latlons(lats, lons, epsg=None, wkt=None):
"""Convert WGS84 latlons to another projection.
Args:
lats, lons: Lists/arrays of latitude/longitude numbers.
epsg: Integer EPSG code.
"""
if epsg is None and wkt is None:
raise ValueError("Must provide either epsg or wkt.")
if epsg and wkt:
raise ValueError("Must provide only one of epsg or wkt.")
if epsg == WGS84_LATLON_EPSG:
return lons, lats
# Validate EPSG.
if epsg is not None and (not 1024 <= epsg <= 32767):
raise ValueError("Dataset has invalid epsg projection.")
# Load transformer.
to_crs = wkt or f"EPSG:{epsg}"
if to_crs in _TRANSFORMER_CACHE:
transformer = _TRANSFORMER_CACHE[to_crs]
else:
from_crs = f"EPSG:{WGS84_LATLON_EPSG}"
transformer = pyproj.transformer.Transformer.from_crs(
from_crs, to_crs, always_xy=True
)
_TRANSFORMER_CACHE[to_crs] = transformer
# Do the transform.
x, y = transformer.transform(lons, lats)
return x, y
def base_floor(x, base=1):
"""Round number down to nearest multiple of base."""
return base * np.floor(x / base)
def decimal_base_floor(x, base=1):
"""Round decimal down to nearest multiple of base."""
if not isinstance(base, (Decimal, int)):
raise ValueError("Base must be an integer or decimal.")
integer = math.floor(x / float(base))
return base * Decimal(integer)
def safe_is_nan(x):
"""Is the value NaN (not a number).
Returns True for np.nan and nan python float. Returns False for anything
else, including None and non-numeric types.
Called safe because it won't raise a TypeError for non-numerics.
Args:
x: Object to check for NaN.
Returns:
Boolean whether the object is NaN.
"""
try:
return math.isnan(x)
except TypeError:
return False
def fill_na(a, value):
"""Replace NaN values in a with provided value.
Args:
a: Iterable, possibly containing NaN items.
value: WHat NaN values should be replaced with.
Returns:
List same length as a, with NaN values replaced.
"""
return [value if safe_is_nan(x) else x for x in a]
|
[
"math.isnan",
"pyproj.transformer.Transformer.from_crs",
"decimal.Decimal",
"numpy.floor"
] |
[((1140, 1213), 'pyproj.transformer.Transformer.from_crs', 'pyproj.transformer.Transformer.from_crs', (['from_crs', 'to_crs'], {'always_xy': '(True)'}), '(from_crs, to_crs, always_xy=True)\n', (1179, 1213), False, 'import pyproj\n'), ((1476, 1494), 'numpy.floor', 'np.floor', (['(x / base)'], {}), '(x / base)\n', (1484, 1494), True, 'import numpy as np\n'), ((1759, 1775), 'decimal.Decimal', 'Decimal', (['integer'], {}), '(integer)\n', (1766, 1775), False, 'from decimal import Decimal\n'), ((2171, 2184), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (2181, 2184), False, 'import math\n')]
|
# Import necessary modules
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
# TODO
# import ridge_x and ridge_y from /datasets
def display_plot(cv_scores, cv_scores_std):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(alpha_space, cv_scores)
std_error = cv_scores_std / np.sqrt(10)
ax.fill_between(
alpha_space, cv_scores + std_error, cv_scores - std_error, alpha=0.2
)
ax.set_ylabel("CV Score +/- Std Error")
ax.set_xlabel("Alpha")
ax.axhline(np.max(cv_scores), linestyle="--", color=".5")
ax.set_xlim([alpha_space[0], alpha_space[-1]])
ax.set_xscale("log")
plt.show()
# Setup the array of alphas and lists to store scores
ridge = Ridge(normalize=True)
ridge_scores = []
ridge_scores_std = []
alpha_space = np.logspace(-4, 0, 50)
for alpha in alpha_space:
ridge.alpha = alpha
# Perform 10-fold CV: ridge_cv_scores
ridge_cv_scores = cross_val_score(ridge, X, y, cv=10)
# Append the mean of ridge_cv_scores to ridge_scores
ridge_scores.append(np.mean(ridge_cv_scores))
# Append the std of ridge_cv_scores to ridge_scores_std
ridge_scores_std.append(np.std(ridge_cv_scores))
# Display the plot
display_plot(ridge_scores, ridge_scores_std)
|
[
"matplotlib.pyplot.show",
"numpy.std",
"numpy.logspace",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.mean",
"sklearn.linear_model.Ridge",
"numpy.sqrt"
] |
[((799, 820), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'normalize': '(True)'}), '(normalize=True)\n', (804, 820), False, 'from sklearn.linear_model import Ridge\n'), ((876, 898), 'numpy.logspace', 'np.logspace', (['(-4)', '(0)', '(50)'], {}), '(-4, 0, 50)\n', (887, 898), True, 'import numpy as np\n'), ((277, 289), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (287, 289), True, 'import matplotlib.pyplot as plt\n'), ((723, 733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (731, 733), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1049), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['ridge', 'X', 'y'], {'cv': '(10)'}), '(ridge, X, y, cv=10)\n', (1029, 1049), False, 'from sklearn.model_selection import cross_val_score\n'), ((393, 404), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (400, 404), True, 'import numpy as np\n'), ((596, 613), 'numpy.max', 'np.max', (['cv_scores'], {}), '(cv_scores)\n', (602, 613), True, 'import numpy as np\n'), ((1132, 1156), 'numpy.mean', 'np.mean', (['ridge_cv_scores'], {}), '(ridge_cv_scores)\n', (1139, 1156), True, 'import numpy as np\n'), ((1247, 1270), 'numpy.std', 'np.std', (['ridge_cv_scores'], {}), '(ridge_cv_scores)\n', (1253, 1270), True, 'import numpy as np\n')]
|
"""
@File : game
@author : yulosun
@Date : 10/11/19
@license:
"""
import actr
import time
import math
import numpy as np
import numbers
import matplotlib.pyplot as plt
import SYL_spt2
import datetime
actr.load_act_r_model(r"C:\Users\syl\Desktop\ACTR_ATO\sp_new.lisp")
response = False
t = 0
choice = None
done = False
def button_pressed(len, dir):
global choice, speed_actual, speed_target
if not (choice):
choice = dir
if not (done):
if int(speed_actual[0]) > int(speed_target[0]):
speed_actual[0] = str(int(speed_actual[0]) - int(len))
elif int(speed_actual[0]) == int(speed_target[0]):
speed_actual[0] = speed_actual[0]
else:
speed_actual[0] = str(int(speed_actual[0]) + int(len))
def button_stop_pressed(dir):
speed_actual[0] = str(0)
def number_sims(a, b):
if isinstance(b, numbers.Number) and isinstance(a, numbers.Number):
return abs(a - b) / -300
else:
return False
def compute_difference():
c = actr.buffer_read('imaginal')
n = actr.copy_chunk(c)
actr.mod_chunk(n,'difference',abs(actr.chunk_slot_value(c,'length') - actr.chunk_slot_value(c,'goal-length')))
return n
def experiment(human=False):
global response
response = False
if human == True:
while response == False:
actr.process_events()
else:
actr.install_device(window)
actr.start_hand_at_mouse()
actr.run(10, True)
actr.remove_command_monitor("output-key", "sp-key-press")
actr.remove_command("sp-key-press")
print(actr.get_time(model_time=True))
actr.run_n_events(2, real_time=False)
return response
if __name__ == '__main__':
# 绘制目标曲线
targetgroup = []
# 绘制实际运行曲线
actualgroup = []
window = actr.open_exp_window("速度曲线追踪", visible=True, width=600, height=600, x=100, y=100)
actr.install_device(window)
actr.add_text_to_exp_window(window, text="当前推荐速度:", x=10, y=60, height=40, width=95, color='black', font_size=22)
# actr.add_text_to_exp_window(window, text="当前速度差值:", x=10, y=20, height=40, width=180, color='black', font_size=22)
actr.add_text_to_exp_window(window, text="当前实际速度:", x=10, y=100, height=40, width=95, color='black', font_size=22)
actr.add_text_to_exp_window(window, text="距离车站位置:", x=10, y=140, height=40, width=95, color='black', font_size=22)
actr.add_text_to_exp_window(window, text="当前速度差值:", x=10, y=180, height=40, width=95, color='black', font_size=22)
speed_target = [""]
speed_actual = ["40"]
timegroup = []
# actr.add_image_to_exp_window(window, "background", "ref-brain.gif", x=0, y=0, width=390, height=390)
# actr.add_items_to_exp_window(window,actr.create_image_for_exp_window(window, "brain", "ref-brain.gif", x=10, y=160, width=128,
# height=128, action="click-brain-py"))
start_time = datetime.datetime.now()
for i in range(2191,2379):
t += 1
# t = time.time()
# speed_target[0] = math.log2(t+1)
speed_target[0] = SYL_spt2.train_model().target_v(i)
print("目标速度:", speed_target[0])
recomed_speed = speed_target[0] # 推荐速度
text2 = speed_actual[0] # 实际速度
text3 = str(int(recomed_speed) - int(text2)) # 速度差值
if int(text3) == 0:
text3 = "0"
elif int(text3) > 0:
text3 = "+"
else:
text3 = "-"
text4 = "3" # 距离目标车站的距离
text5 = "前方将出现斜坡!"
x1_target_speed = actr.add_text_to_exp_window(window, str(speed_target), x=200, y=60, color='black', height=50, width=100, font_size=22)
x2_actual_speed = actr.add_text_to_exp_window(window, text2, x=200, y=100, color='black', height=50, width=100, font_size=22)
x3_delta_speed = actr.add_text_to_exp_window(window, text3, x=200, y=20, color='black', height=50, width=100, font_size=22)
x4_delta_distance = actr.add_text_to_exp_window(window, text4, x=200, y=140, color='black', height=50, width=100, font_size=22)
x5 = actr.add_text_to_exp_window(window, str(int(recomed_speed) - int(text2)), x=200, y=180, color='black', height=50, width=100, font_size=22)
x6 = actr.add_text_to_exp_window(window, text5, x=10, y=240, color='red', height=50, width=100, font_size=22)
# actr.start_hand_at_mouse()
actr.add_command("sp-button-pressed-up-keep-down", button_pressed,"sp press button(up\keep\down) task")
actr.add_command("sp-number-sims", number_sims, "Similarity hook function for building sticks task.")
actr.add_command("sp-button-stop-pressed", button_stop_pressed, "sp task output-key monitor")
actr.add_command("sp-compute-difference", compute_difference,"Imaginal action function to compute the difference between sticks.")
actr.monitor_command("output-key", "sp-key-press")
experiment(human=False)
actr.reset()
actr.remove_items_from_exp_window(window, x1_target_speed)
actr.remove_items_from_exp_window(window, x2_actual_speed)
actr.remove_items_from_exp_window(window, x3_delta_speed)
actr.remove_items_from_exp_window(window, x4_delta_distance)
actr.remove_items_from_exp_window(window, x5)
targetgroup.append(speed_target[0])
actualgroup.append(speed_actual[0])
actr.add_button_to_exp_window(window, text="7", x=500, y=60, action=["sp-button-pressed", 0.2, "up"], height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="6", x=500, y=80, action=["sp-button-pressed", 0.2, "up"],height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="5", x=500, y=100, action=["sp-button-pressed", 0.2, "up"],height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="4", x=500, y=120, action=["sp-button-pressed", 0.2, "up"],height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="3", x=500, y=140, action=["sp-button-pressed", 0.2, "up"],height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="2", x=500, y=160, action=["sp-button-pressed", 0.2, "up"],height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="1", x=500, y=180, action=["sp-button-pressed", 0.2, "up"],height=20, width=100, color='yellow')
actr.add_button_to_exp_window(window, text="up", x=500, y=200, action=["sp-button-pressed-up-keep-down", 2, "up"], height=20, width=100,color='yellow')
actr.add_button_to_exp_window(window, text="keep", x=500, y=220, action=["sp-button-pressed-up-keep-down", 0, "keep"], height=20, width=100, color='gray')
actr.add_button_to_exp_window(window, text="down", x=500, y=240, action=["sp-button-pressed-up-keep-down", 2, "down"], height=20, width=100,color='green')
actr.add_button_to_exp_window(window, text="-2", x=500, y=260, action=["sp-button-pressed", 0.2, "down"],height=20, width=100, color='green')
actr.add_button_to_exp_window(window, text="-3", x=500, y=280, action=["sp-button-pressed", 0.2, "down"],height=20, width=100, color='green')
actr.add_button_to_exp_window(window, text="-4", x=500, y=300, action=["sp-button-pressed", 0.2, "down"],height=20, width=100, color='green')
actr.add_button_to_exp_window(window, text="-5", x=500, y=320, action=["sp-button-pressed", 0.2, "down"],height=20, width=100, color='green')
actr.add_button_to_exp_window(window, text="-6", x=500, y=340, action=["sp-button-pressed", 0.2, "down"],height=20, width=100, color='green')
actr.add_button_to_exp_window(window, text="-7", x=500, y=360, action=["sp-button-pressed", 0.2, "down"],height=20, width=100, color='green')
actr.add_button_to_exp_window(window, text="EB", x=500, y=380, action=["sp-button-stop-pressed",0.2, "stop"],height=20, width=100,color='red')
# actr.start_hand_at_mouse()
timegroup.append(t)
actr.print_visicon()
x = np.arange(0, 350)
l1 = plt.plot(timegroup, targetgroup, 'r--',label='targetspeed')
l2 = plt.plot(timegroup, actualgroup, 'g--', label='actualspeed')
plt.plot(timegroup, targetgroup, 'ro-',timegroup, actualgroup, 'g+-')
plt.title('Speed tracking')
plt.xlabel('time')
plt.ylabel('speed value')
plt.legend()
end_time = datetime.datetime.now()
interval = (end_time - start_time).seconds
print("程序共执行了:",interval)
my_y_ticks = np.arange(0, 100, 0.3)
plt.yticks(my_y_ticks)
plt.show()
|
[
"matplotlib.pyplot.title",
"actr.run",
"actr.process_events",
"actr.copy_chunk",
"actr.add_text_to_exp_window",
"actr.monitor_command",
"actr.chunk_slot_value",
"numpy.arange",
"actr.remove_items_from_exp_window",
"actr.add_command",
"actr.buffer_read",
"actr.load_act_r_model",
"matplotlib.pyplot.yticks",
"actr.install_device",
"actr.remove_command",
"actr.remove_command_monitor",
"datetime.datetime.now",
"actr.start_hand_at_mouse",
"matplotlib.pyplot.show",
"actr.open_exp_window",
"matplotlib.pyplot.legend",
"actr.print_visicon",
"actr.run_n_events",
"actr.get_time",
"matplotlib.pyplot.ylabel",
"actr.add_button_to_exp_window",
"actr.reset",
"matplotlib.pyplot.plot",
"SYL_spt2.train_model",
"matplotlib.pyplot.xlabel"
] |
[((224, 295), 'actr.load_act_r_model', 'actr.load_act_r_model', (['"""C:\\\\Users\\\\syl\\\\Desktop\\\\ACTR_ATO\\\\sp_new.lisp"""'], {}), "('C:\\\\Users\\\\syl\\\\Desktop\\\\ACTR_ATO\\\\sp_new.lisp')\n", (245, 295), False, 'import actr\n'), ((1076, 1104), 'actr.buffer_read', 'actr.buffer_read', (['"""imaginal"""'], {}), "('imaginal')\n", (1092, 1104), False, 'import actr\n'), ((1114, 1132), 'actr.copy_chunk', 'actr.copy_chunk', (['c'], {}), '(c)\n', (1129, 1132), False, 'import actr\n'), ((1547, 1604), 'actr.remove_command_monitor', 'actr.remove_command_monitor', (['"""output-key"""', '"""sp-key-press"""'], {}), "('output-key', 'sp-key-press')\n", (1574, 1604), False, 'import actr\n'), ((1610, 1645), 'actr.remove_command', 'actr.remove_command', (['"""sp-key-press"""'], {}), "('sp-key-press')\n", (1629, 1645), False, 'import actr\n'), ((1694, 1731), 'actr.run_n_events', 'actr.run_n_events', (['(2)'], {'real_time': '(False)'}), '(2, real_time=False)\n', (1711, 1731), False, 'import actr\n'), ((1873, 1958), 'actr.open_exp_window', 'actr.open_exp_window', (['"""速度曲线追踪"""'], {'visible': '(True)', 'width': '(600)', 'height': '(600)', 'x': '(100)', 'y': '(100)'}), "('速度曲线追踪', visible=True, width=600, height=600, x=100,\n y=100)\n", (1893, 1958), False, 'import actr\n'), ((1960, 1987), 'actr.install_device', 'actr.install_device', (['window'], {}), '(window)\n', (1979, 1987), False, 'import actr\n'), ((1993, 2110), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window'], {'text': '"""当前推荐速度:"""', 'x': '(10)', 'y': '(60)', 'height': '(40)', 'width': '(95)', 'color': '"""black"""', 'font_size': '(22)'}), "(window, text='当前推荐速度:', x=10, y=60, height=40,\n width=95, color='black', font_size=22)\n", (2020, 2110), False, 'import actr\n'), ((2234, 2352), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window'], {'text': '"""当前实际速度:"""', 'x': '(10)', 'y': '(100)', 'height': '(40)', 'width': '(95)', 'color': '"""black"""', 'font_size': '(22)'}), "(window, text='当前实际速度:', x=10, y=100, height=40,\n width=95, color='black', font_size=22)\n", (2261, 2352), False, 'import actr\n'), ((2354, 2472), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window'], {'text': '"""距离车站位置:"""', 'x': '(10)', 'y': '(140)', 'height': '(40)', 'width': '(95)', 'color': '"""black"""', 'font_size': '(22)'}), "(window, text='距离车站位置:', x=10, y=140, height=40,\n width=95, color='black', font_size=22)\n", (2381, 2472), False, 'import actr\n'), ((2474, 2592), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window'], {'text': '"""当前速度差值:"""', 'x': '(10)', 'y': '(180)', 'height': '(40)', 'width': '(95)', 'color': '"""black"""', 'font_size': '(22)'}), "(window, text='当前速度差值:', x=10, y=180, height=40,\n width=95, color='black', font_size=22)\n", (2501, 2592), False, 'import actr\n'), ((3038, 3061), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3059, 3061), False, 'import datetime\n'), ((8213, 8230), 'numpy.arange', 'np.arange', (['(0)', '(350)'], {}), '(0, 350)\n', (8222, 8230), True, 'import numpy as np\n'), ((8241, 8301), 'matplotlib.pyplot.plot', 'plt.plot', (['timegroup', 'targetgroup', '"""r--"""'], {'label': '"""targetspeed"""'}), "(timegroup, targetgroup, 'r--', label='targetspeed')\n", (8249, 8301), True, 'import matplotlib.pyplot as plt\n'), ((8311, 8371), 'matplotlib.pyplot.plot', 'plt.plot', (['timegroup', 'actualgroup', '"""g--"""'], {'label': '"""actualspeed"""'}), "(timegroup, actualgroup, 'g--', label='actualspeed')\n", (8319, 8371), True, 'import matplotlib.pyplot as plt\n'), ((8377, 8447), 'matplotlib.pyplot.plot', 'plt.plot', (['timegroup', 'targetgroup', '"""ro-"""', 'timegroup', 'actualgroup', '"""g+-"""'], {}), "(timegroup, targetgroup, 'ro-', timegroup, actualgroup, 'g+-')\n", (8385, 8447), True, 'import matplotlib.pyplot as plt\n'), ((8452, 8491), 'matplotlib.pyplot.title', 'plt.title', (['"""Speed \u200b\u200btracking"""'], {}), "('Speed \\u200b\\u200btracking')\n", (8461, 8491), True, 'import matplotlib.pyplot as plt\n'), ((8487, 8505), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (8497, 8505), True, 'import matplotlib.pyplot as plt\n'), ((8511, 8536), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""speed value"""'], {}), "('speed value')\n", (8521, 8536), True, 'import matplotlib.pyplot as plt\n'), ((8542, 8554), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8552, 8554), True, 'import matplotlib.pyplot as plt\n'), ((8573, 8596), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8594, 8596), False, 'import datetime\n'), ((8698, 8720), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(0.3)'], {}), '(0, 100, 0.3)\n', (8707, 8720), True, 'import numpy as np\n'), ((8730, 8752), 'matplotlib.pyplot.yticks', 'plt.yticks', (['my_y_ticks'], {}), '(my_y_ticks)\n', (8740, 8752), True, 'import matplotlib.pyplot as plt\n'), ((8760, 8770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8768, 8770), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1477), 'actr.install_device', 'actr.install_device', (['window'], {}), '(window)\n', (1469, 1477), False, 'import actr\n'), ((1487, 1513), 'actr.start_hand_at_mouse', 'actr.start_hand_at_mouse', ([], {}), '()\n', (1511, 1513), False, 'import actr\n'), ((1523, 1541), 'actr.run', 'actr.run', (['(10)', '(True)'], {}), '(10, True)\n', (1531, 1541), False, 'import actr\n'), ((1657, 1687), 'actr.get_time', 'actr.get_time', ([], {'model_time': '(True)'}), '(model_time=True)\n', (1670, 1687), False, 'import actr\n'), ((3824, 3935), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window', 'text2'], {'x': '(200)', 'y': '(100)', 'color': '"""black"""', 'height': '(50)', 'width': '(100)', 'font_size': '(22)'}), "(window, text2, x=200, y=100, color='black',\n height=50, width=100, font_size=22)\n", (3851, 3935), False, 'import actr\n'), ((3958, 4068), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window', 'text3'], {'x': '(200)', 'y': '(20)', 'color': '"""black"""', 'height': '(50)', 'width': '(100)', 'font_size': '(22)'}), "(window, text3, x=200, y=20, color='black',\n height=50, width=100, font_size=22)\n", (3985, 4068), False, 'import actr\n'), ((4094, 4205), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window', 'text4'], {'x': '(200)', 'y': '(140)', 'color': '"""black"""', 'height': '(50)', 'width': '(100)', 'font_size': '(22)'}), "(window, text4, x=200, y=140, color='black',\n height=50, width=100, font_size=22)\n", (4121, 4205), False, 'import actr\n'), ((4369, 4478), 'actr.add_text_to_exp_window', 'actr.add_text_to_exp_window', (['window', 'text5'], {'x': '(10)', 'y': '(240)', 'color': '"""red"""', 'height': '(50)', 'width': '(100)', 'font_size': '(22)'}), "(window, text5, x=10, y=240, color='red', height\n =50, width=100, font_size=22)\n", (4396, 4478), False, 'import actr\n'), ((4521, 4631), 'actr.add_command', 'actr.add_command', (['"""sp-button-pressed-up-keep-down"""', 'button_pressed', '"""sp press button(up\\\\keep\\\\down) task"""'], {}), "('sp-button-pressed-up-keep-down', button_pressed,\n 'sp press button(up\\\\keep\\\\down) task')\n", (4537, 4631), False, 'import actr\n'), ((4634, 4739), 'actr.add_command', 'actr.add_command', (['"""sp-number-sims"""', 'number_sims', '"""Similarity hook function for building sticks task."""'], {}), "('sp-number-sims', number_sims,\n 'Similarity hook function for building sticks task.')\n", (4650, 4739), False, 'import actr\n'), ((4745, 4842), 'actr.add_command', 'actr.add_command', (['"""sp-button-stop-pressed"""', 'button_stop_pressed', '"""sp task output-key monitor"""'], {}), "('sp-button-stop-pressed', button_stop_pressed,\n 'sp task output-key monitor')\n", (4761, 4842), False, 'import actr\n'), ((4848, 4983), 'actr.add_command', 'actr.add_command', (['"""sp-compute-difference"""', 'compute_difference', '"""Imaginal action function to compute the difference between sticks."""'], {}), "('sp-compute-difference', compute_difference,\n 'Imaginal action function to compute the difference between sticks.')\n", (4864, 4983), False, 'import actr\n'), ((4990, 5040), 'actr.monitor_command', 'actr.monitor_command', (['"""output-key"""', '"""sp-key-press"""'], {}), "('output-key', 'sp-key-press')\n", (5010, 5040), False, 'import actr\n'), ((5083, 5095), 'actr.reset', 'actr.reset', ([], {}), '()\n', (5093, 5095), False, 'import actr\n'), ((5105, 5163), 'actr.remove_items_from_exp_window', 'actr.remove_items_from_exp_window', (['window', 'x1_target_speed'], {}), '(window, x1_target_speed)\n', (5138, 5163), False, 'import actr\n'), ((5173, 5231), 'actr.remove_items_from_exp_window', 'actr.remove_items_from_exp_window', (['window', 'x2_actual_speed'], {}), '(window, x2_actual_speed)\n', (5206, 5231), False, 'import actr\n'), ((5241, 5298), 'actr.remove_items_from_exp_window', 'actr.remove_items_from_exp_window', (['window', 'x3_delta_speed'], {}), '(window, x3_delta_speed)\n', (5274, 5298), False, 'import actr\n'), ((5308, 5368), 'actr.remove_items_from_exp_window', 'actr.remove_items_from_exp_window', (['window', 'x4_delta_distance'], {}), '(window, x4_delta_distance)\n', (5341, 5368), False, 'import actr\n'), ((5378, 5423), 'actr.remove_items_from_exp_window', 'actr.remove_items_from_exp_window', (['window', 'x5'], {}), '(window, x5)\n', (5411, 5423), False, 'import actr\n'), ((5525, 5669), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""7"""', 'x': '(500)', 'y': '(60)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='7', x=500, y=60, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (5554, 5669), False, 'import actr\n'), ((5674, 5818), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""6"""', 'x': '(500)', 'y': '(80)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='6', x=500, y=80, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (5703, 5818), False, 'import actr\n'), ((5822, 5967), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""5"""', 'x': '(500)', 'y': '(100)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='5', x=500, y=100, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (5851, 5967), False, 'import actr\n'), ((5971, 6116), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""4"""', 'x': '(500)', 'y': '(120)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='4', x=500, y=120, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (6000, 6116), False, 'import actr\n'), ((6120, 6265), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""3"""', 'x': '(500)', 'y': '(140)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='3', x=500, y=140, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (6149, 6265), False, 'import actr\n'), ((6269, 6414), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""2"""', 'x': '(500)', 'y': '(160)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='2', x=500, y=160, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (6298, 6414), False, 'import actr\n'), ((6418, 6563), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""1"""', 'x': '(500)', 'y': '(180)', 'action': "['sp-button-pressed', 0.2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='1', x=500, y=180, action=[\n 'sp-button-pressed', 0.2, 'up'], height=20, width=100, color='yellow')\n", (6447, 6563), False, 'import actr\n'), ((6567, 6729), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""up"""', 'x': '(500)', 'y': '(200)', 'action': "['sp-button-pressed-up-keep-down', 2, 'up']", 'height': '(20)', 'width': '(100)', 'color': '"""yellow"""'}), "(window, text='up', x=500, y=200, action=[\n 'sp-button-pressed-up-keep-down', 2, 'up'], height=20, width=100, color\n ='yellow')\n", (6596, 6729), False, 'import actr\n'), ((6728, 6891), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""keep"""', 'x': '(500)', 'y': '(220)', 'action': "['sp-button-pressed-up-keep-down', 0, 'keep']", 'height': '(20)', 'width': '(100)', 'color': '"""gray"""'}), "(window, text='keep', x=500, y=220, action=[\n 'sp-button-pressed-up-keep-down', 0, 'keep'], height=20, width=100,\n color='gray')\n", (6757, 6891), False, 'import actr\n'), ((6892, 7056), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""down"""', 'x': '(500)', 'y': '(240)', 'action': "['sp-button-pressed-up-keep-down', 2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='down', x=500, y=240, action=[\n 'sp-button-pressed-up-keep-down', 2, 'down'], height=20, width=100,\n color='green')\n", (6921, 7056), False, 'import actr\n'), ((7056, 7203), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""-2"""', 'x': '(500)', 'y': '(260)', 'action': "['sp-button-pressed', 0.2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='-2', x=500, y=260, action=[\n 'sp-button-pressed', 0.2, 'down'], height=20, width=100, color='green')\n", (7085, 7203), False, 'import actr\n'), ((7207, 7354), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""-3"""', 'x': '(500)', 'y': '(280)', 'action': "['sp-button-pressed', 0.2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='-3', x=500, y=280, action=[\n 'sp-button-pressed', 0.2, 'down'], height=20, width=100, color='green')\n", (7236, 7354), False, 'import actr\n'), ((7358, 7505), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""-4"""', 'x': '(500)', 'y': '(300)', 'action': "['sp-button-pressed', 0.2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='-4', x=500, y=300, action=[\n 'sp-button-pressed', 0.2, 'down'], height=20, width=100, color='green')\n", (7387, 7505), False, 'import actr\n'), ((7509, 7656), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""-5"""', 'x': '(500)', 'y': '(320)', 'action': "['sp-button-pressed', 0.2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='-5', x=500, y=320, action=[\n 'sp-button-pressed', 0.2, 'down'], height=20, width=100, color='green')\n", (7538, 7656), False, 'import actr\n'), ((7660, 7807), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""-6"""', 'x': '(500)', 'y': '(340)', 'action': "['sp-button-pressed', 0.2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='-6', x=500, y=340, action=[\n 'sp-button-pressed', 0.2, 'down'], height=20, width=100, color='green')\n", (7689, 7807), False, 'import actr\n'), ((7811, 7958), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""-7"""', 'x': '(500)', 'y': '(360)', 'action': "['sp-button-pressed', 0.2, 'down']", 'height': '(20)', 'width': '(100)', 'color': '"""green"""'}), "(window, text='-7', x=500, y=360, action=[\n 'sp-button-pressed', 0.2, 'down'], height=20, width=100, color='green')\n", (7840, 7958), False, 'import actr\n'), ((7962, 8112), 'actr.add_button_to_exp_window', 'actr.add_button_to_exp_window', (['window'], {'text': '"""EB"""', 'x': '(500)', 'y': '(380)', 'action': "['sp-button-stop-pressed', 0.2, 'stop']", 'height': '(20)', 'width': '(100)', 'color': '"""red"""'}), "(window, text='EB', x=500, y=380, action=[\n 'sp-button-stop-pressed', 0.2, 'stop'], height=20, width=100, color='red')\n", (7991, 8112), False, 'import actr\n'), ((8181, 8201), 'actr.print_visicon', 'actr.print_visicon', ([], {}), '()\n', (8199, 8201), False, 'import actr\n'), ((1408, 1429), 'actr.process_events', 'actr.process_events', ([], {}), '()\n', (1427, 1429), False, 'import actr\n'), ((1172, 1206), 'actr.chunk_slot_value', 'actr.chunk_slot_value', (['c', '"""length"""'], {}), "(c, 'length')\n", (1193, 1206), False, 'import actr\n'), ((1208, 1247), 'actr.chunk_slot_value', 'actr.chunk_slot_value', (['c', '"""goal-length"""'], {}), "(c, 'goal-length')\n", (1229, 1247), False, 'import actr\n'), ((3210, 3232), 'SYL_spt2.train_model', 'SYL_spt2.train_model', ([], {}), '()\n', (3230, 3232), False, 'import SYL_spt2\n')]
|
import numpy as np
def radius_of_curvature(pixels_x, pixels_y, mx, my):
if pixels_y is None or pixels_x is None:
return 0
y_eval = np.max(pixels_y) * my
fit = np.polyfit(pixels_y * my, pixels_x * mx, 2)
curvature = ((1 + (2 * fit[0] * y_eval + fit[1]) ** 2) ** 1.5) / np.absolute(2 * fit[0])
return curvature
# calcualte curvature in meters, based on parabola
def measure_curvature_real(left_fit, right_fit, ploty, xm, ym):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
left_curvature_real = radius_of_curvature(left_fit, ploty, xm, ym)
right_curvature_real = radius_of_curvature(right_fit, ploty, xm, ym)
return left_curvature_real, right_curvature_real
def calulate_position(left_fit, right_fit, xm):
if left_fit is None or right_fit is None:
return 0
left_pos = left_fit[-1]
right_pos = right_fit[-1]
center_px = left_pos + (right_pos - left_pos) / 2
diff = center_px - 640
diff_in_m = diff * xm
return diff_in_m
def calulate_lane_size(left_fit, right_fit, xm):
if left_fit is None or right_fit is None:
return 0, 0, 0, 0
lane_width_m = (right_fit - left_fit) * xm
lane_width = lane_width_m[-1]
lane_width_min = np.min(lane_width_m)
lane_width_max = np.max(lane_width_m)
return lane_width, lane_width_min, lane_width_max
|
[
"numpy.absolute",
"numpy.min",
"numpy.max",
"numpy.polyfit"
] |
[((181, 224), 'numpy.polyfit', 'np.polyfit', (['(pixels_y * my)', '(pixels_x * mx)', '(2)'], {}), '(pixels_y * my, pixels_x * mx, 2)\n', (191, 224), True, 'import numpy as np\n'), ((1325, 1345), 'numpy.min', 'np.min', (['lane_width_m'], {}), '(lane_width_m)\n', (1331, 1345), True, 'import numpy as np\n'), ((1367, 1387), 'numpy.max', 'np.max', (['lane_width_m'], {}), '(lane_width_m)\n', (1373, 1387), True, 'import numpy as np\n'), ((149, 165), 'numpy.max', 'np.max', (['pixels_y'], {}), '(pixels_y)\n', (155, 165), True, 'import numpy as np\n'), ((294, 317), 'numpy.absolute', 'np.absolute', (['(2 * fit[0])'], {}), '(2 * fit[0])\n', (305, 317), True, 'import numpy as np\n')]
|
__all__ = ['SegmentationEM']
import attr
import numpy as np
from .. import annotations
from ..annotations import Annotation, manage_docstring
from ..base import BaseImageSegmentationAggregator
@attr.s
@manage_docstring
class SegmentationEM(BaseImageSegmentationAggregator):
"""
The EM algorithm for the image segmentation task.
For each task, EM algorithm is performed to classify the image pixels.
<NAME>-<NAME>. 2018.
Quality Evaluation Methods for Crowdsourced Image Segmentation
http://ilpubs.stanford.edu:8090/1161/1/main.pdf
"""
n_iter: int = attr.ib(default=10)
# segmentations_
@staticmethod
@manage_docstring
def _e_step(
segmentations: annotations.SEGMENTATIONS,
errors: annotations.SEGMENTATION_ERRORS,
priors: annotations.IMAGE_PIXEL_PROBAS,
) -> annotations.IMAGE_PIXEL_PROBAS:
"""
Perform E-step of algorithm.
Given performers' segmentations and error vector and priors
for each pixel calculates posteriori probabilities.
"""
weighted_seg = np.multiply(errors, segmentations.T.astype(float)).T +\
np.multiply((1 - errors), (1 - segmentations).T.astype(float)).T
with np.errstate(divide='ignore'):
pos_log_prob = np.log(priors) + np.log(weighted_seg).sum(axis=0)
neg_log_prob = np.log(1 - priors) + np.log(1 - weighted_seg).sum(axis=0)
with np.errstate(invalid='ignore'):
# division by the denominator in the Bayes formula
priors = np.nan_to_num(np.exp(pos_log_prob) / (np.exp(pos_log_prob) + np.exp(neg_log_prob)), nan=0)
return priors
@staticmethod
@manage_docstring
def _m_step(
segmentations: annotations.SEGMENTATIONS,
priors: annotations.IMAGE_PIXEL_PROBAS,
segmentation_region_size: int,
segmentations_sizes: np.ndarray
) -> annotations.SEGMENTATION_ERRORS:
"""
Perform M-step of algorithm.
Given a priori probabilities for each pixel and the segmentation of the performers,
it estimates performer's errors probabilities vector.
"""
mean_errors_expectation = (segmentations_sizes + priors.sum() -
2 * (segmentations * priors).sum(axis=(1, 2))) / segmentation_region_size
# return probability of worker marking pixel correctly
return 1 - mean_errors_expectation
@manage_docstring
def _aggregate_one(self, segmentations: annotations.SEGMENTATIONS) -> annotations.SEGMENTATION:
"""
Performs an expectation maximization algorithm for a single image.
"""
priors = sum(segmentations) / len(segmentations)
segmentations = np.stack(segmentations.values)
segmentation_region_size = segmentations.any(axis=0).sum()
if segmentation_region_size == 0:
return np.zeros_like(segmentations[0])
segmentations_sizes = segmentations.sum(axis=(1, 2))
# initialize with errors assuming that ground truth segmentation is majority vote
errors = self._m_step(segmentations, np.round(priors), segmentation_region_size, segmentations_sizes)
for _ in range(self.n_iter):
priors = self._e_step(segmentations, errors, priors)
errors = self._m_step(segmentations, priors, segmentation_region_size, segmentations_sizes)
return priors > 0.5
@manage_docstring
def fit(self, data: annotations.SEGMENTATION_DATA) -> Annotation(type='SegmentationEM', title='self'):
data = data[['task', 'performer', 'segmentation']]
self.segmentations_ = data.groupby('task').segmentation.apply(
lambda segmentations: self._aggregate_one(segmentations) # using lambda for python 3.7 compatibility
)
return self
@manage_docstring
def fit_predict(self, data: annotations.SEGMENTATION_DATA) -> annotations.TASKS_SEGMENTATIONS:
return self.fit(data).segmentations_
|
[
"numpy.stack",
"numpy.zeros_like",
"numpy.log",
"attr.ib",
"numpy.errstate",
"numpy.exp",
"numpy.round"
] |
[((586, 605), 'attr.ib', 'attr.ib', ([], {'default': '(10)'}), '(default=10)\n', (593, 605), False, 'import attr\n'), ((2776, 2806), 'numpy.stack', 'np.stack', (['segmentations.values'], {}), '(segmentations.values)\n', (2784, 2806), True, 'import numpy as np\n'), ((1245, 1273), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (1256, 1273), True, 'import numpy as np\n'), ((2935, 2966), 'numpy.zeros_like', 'np.zeros_like', (['segmentations[0]'], {}), '(segmentations[0])\n', (2948, 2966), True, 'import numpy as np\n'), ((3164, 3180), 'numpy.round', 'np.round', (['priors'], {}), '(priors)\n', (3172, 3180), True, 'import numpy as np\n'), ((1302, 1316), 'numpy.log', 'np.log', (['priors'], {}), '(priors)\n', (1308, 1316), True, 'import numpy as np\n'), ((1379, 1397), 'numpy.log', 'np.log', (['(1 - priors)'], {}), '(1 - priors)\n', (1385, 1397), True, 'import numpy as np\n'), ((1455, 1484), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (1466, 1484), True, 'import numpy as np\n'), ((1319, 1339), 'numpy.log', 'np.log', (['weighted_seg'], {}), '(weighted_seg)\n', (1325, 1339), True, 'import numpy as np\n'), ((1400, 1424), 'numpy.log', 'np.log', (['(1 - weighted_seg)'], {}), '(1 - weighted_seg)\n', (1406, 1424), True, 'import numpy as np\n'), ((1592, 1612), 'numpy.exp', 'np.exp', (['pos_log_prob'], {}), '(pos_log_prob)\n', (1598, 1612), True, 'import numpy as np\n'), ((1616, 1636), 'numpy.exp', 'np.exp', (['pos_log_prob'], {}), '(pos_log_prob)\n', (1622, 1636), True, 'import numpy as np\n'), ((1639, 1659), 'numpy.exp', 'np.exp', (['neg_log_prob'], {}), '(neg_log_prob)\n', (1645, 1659), True, 'import numpy as np\n')]
|
from unittest import TestCase
import numpy as np
from matplotlib import pyplot as plt
from src.bandit_algorithms.thompson_sampling_learner import ThompsonSamplingLearner
from src.tests.bandit_algorithms.environments.bandit_test_environment import BanditTestEnvironment
from src.tests.bandit_algorithms.greedy_learner import GreedyLearner
class TestThompsonSamplingVsGreedyExperiment(TestCase):
def test_perform_experiment(self):
# ################ Setup experiment. ################ #
# Environment.
p = np.array([0.15, 0.1, 0.1, 0.35])
n_arms = len(p)
opt = np.max(p) # Real optimal.
# Horizon.
time_horizon = 300
# Experiment variables.
n_experiments = 1000
ts_rewards_per_experiment = []
gr_rewards_per_experiment = []
# ################ Run experiment. ################ #
for e in range(0, n_experiments):
# Initialize environment and learners for experiment.
env = BanditTestEnvironment(n_arms=n_arms, probabilities=p)
ts_learner = ThompsonSamplingLearner(n_arms=n_arms)
gr_learner = GreedyLearner(n_arms=n_arms)
for i in range(time_horizon):
# Simulate interaction between environment and thompson sampling learner.
pulled_arm = ts_learner.pull_arm()
reward = env.round(pulled_arm)
ts_learner.update(pulled_arm, reward)
# Simulate interaction between environment and greedy learner.
pulled_arm = gr_learner.pull_arm()
reward = env.round(pulled_arm)
gr_learner.update(pulled_arm, reward)
# Store rewards of the experiment.
ts_rewards_per_experiment.append(ts_learner.collected_rewards)
gr_rewards_per_experiment.append(gr_learner.collected_rewards)
# ################ Preprocess result. ################ #
# Mean over the regret of all n_experiments experiments, for both TS and Greedy.
ts_mean_regrets = np.mean(opt - ts_rewards_per_experiment, axis=0)
gr_mean_regrets = np.mean(opt - gr_rewards_per_experiment, axis=0)
# ################ Plot result. ################ #
plt.figure(0)
plt.xlabel("t")
plt.ylabel("Regret")
plt.plot(np.cumsum(ts_mean_regrets), 'r')
plt.plot(np.cumsum(gr_mean_regrets), 'g')
plt.legend(["TS", "Greedy"])
plt.show()
# We can see on the plot that:
# Regret of greedy increase linearly.
# While instantaneous regret of TS decreases as the number of rounds increases.
|
[
"src.tests.bandit_algorithms.greedy_learner.GreedyLearner",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"src.tests.bandit_algorithms.environments.bandit_test_environment.BanditTestEnvironment",
"numpy.cumsum",
"numpy.max",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.figure",
"src.bandit_algorithms.thompson_sampling_learner.ThompsonSamplingLearner",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((538, 570), 'numpy.array', 'np.array', (['[0.15, 0.1, 0.1, 0.35]'], {}), '([0.15, 0.1, 0.1, 0.35])\n', (546, 570), True, 'import numpy as np\n'), ((609, 618), 'numpy.max', 'np.max', (['p'], {}), '(p)\n', (615, 618), True, 'import numpy as np\n'), ((2082, 2130), 'numpy.mean', 'np.mean', (['(opt - ts_rewards_per_experiment)'], {'axis': '(0)'}), '(opt - ts_rewards_per_experiment, axis=0)\n', (2089, 2130), True, 'import numpy as np\n'), ((2157, 2205), 'numpy.mean', 'np.mean', (['(opt - gr_rewards_per_experiment)'], {'axis': '(0)'}), '(opt - gr_rewards_per_experiment, axis=0)\n', (2164, 2205), True, 'import numpy as np\n'), ((2275, 2288), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (2285, 2288), True, 'from matplotlib import pyplot as plt\n'), ((2297, 2312), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (2307, 2312), True, 'from matplotlib import pyplot as plt\n'), ((2321, 2341), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Regret"""'], {}), "('Regret')\n", (2331, 2341), True, 'from matplotlib import pyplot as plt\n'), ((2450, 2478), 'matplotlib.pyplot.legend', 'plt.legend', (["['TS', 'Greedy']"], {}), "(['TS', 'Greedy'])\n", (2460, 2478), True, 'from matplotlib import pyplot as plt\n'), ((2487, 2497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2495, 2497), True, 'from matplotlib import pyplot as plt\n'), ((1013, 1066), 'src.tests.bandit_algorithms.environments.bandit_test_environment.BanditTestEnvironment', 'BanditTestEnvironment', ([], {'n_arms': 'n_arms', 'probabilities': 'p'}), '(n_arms=n_arms, probabilities=p)\n', (1034, 1066), False, 'from src.tests.bandit_algorithms.environments.bandit_test_environment import BanditTestEnvironment\n'), ((1092, 1130), 'src.bandit_algorithms.thompson_sampling_learner.ThompsonSamplingLearner', 'ThompsonSamplingLearner', ([], {'n_arms': 'n_arms'}), '(n_arms=n_arms)\n', (1115, 1130), False, 'from src.bandit_algorithms.thompson_sampling_learner import ThompsonSamplingLearner\n'), ((1156, 1184), 'src.tests.bandit_algorithms.greedy_learner.GreedyLearner', 'GreedyLearner', ([], {'n_arms': 'n_arms'}), '(n_arms=n_arms)\n', (1169, 1184), False, 'from src.tests.bandit_algorithms.greedy_learner import GreedyLearner\n'), ((2359, 2385), 'numpy.cumsum', 'np.cumsum', (['ts_mean_regrets'], {}), '(ts_mean_regrets)\n', (2368, 2385), True, 'import numpy as np\n'), ((2409, 2435), 'numpy.cumsum', 'np.cumsum', (['gr_mean_regrets'], {}), '(gr_mean_regrets)\n', (2418, 2435), True, 'import numpy as np\n')]
|
'''****************************************************************************
* GANs.py: GAN Models
******************************************************************************
* v0.1 - 01.03.2019
*
* Copyright (c) 2019 <NAME> (<EMAIL>)
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
****************************************************************************'''
# https://github.com/eriklindernoren/Keras-GAN/blob/master/acgan/acgan.py
# https://github.com/keras-team/keras/blob/master/examples/mnist_acgan.py
import os
import numpy as np
import tensorflow as tf
from matplotlib.pyplot import imsave
from tensorflow.keras import Input, Model, Sequential
from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D
from tqdm import tqdm
from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D
from misc.misc import Hexnet_print, normalize_array, print_newline
from misc.visualization import visualize_hexarray
class ACGAN():
def __init__(self, input_shape, classes, latent_dim=100, mode='baseline'):
self.input_shape = input_shape
self.channels = self.input_shape[2]
self.classes = classes
self.latent_dim = latent_dim
self.mode = mode
def build_generator(self):
model = Sequential()
# https://github.com/eriklindernoren/Keras-GAN/blob/master/acgan/acgan.py
# https://github.com/keras-team/keras/blob/master/examples/mnist_acgan.py
model.add(Dense(units = 128 * 8 * 8, activation = 'relu', input_dim = self.latent_dim))
model.add(Reshape(target_shape = (8, 8, 128)))
if self.mode == 'baseline':
# model.add(UpSampling2D())
# model.add(Conv2D(filters=128, kernel_size=3, padding='same'))
model.add(Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same'))
elif self.mode == 'S-ACGAN':
# model.add(SSampling2D())
# model.add(SConv2D(filters=128, kernel_size=3, padding='SAME'))
model.add(SConv2DTranspose(filters=128, kernel_size=3, strides=2, padding='SAME'))
elif self.mode == 'H-ACGAN':
# model.add(HSampling2D())
# model.add(HConv2D(filters=128, kernel_size=3, padding='SAME'))
model.add(HConv2DTranspose(filters=128, kernel_size=3, strides=2, padding='SAME'))
model.add(Activation('relu'))
model.add(BatchNormalization())
if self.mode == 'baseline':
# model.add(UpSampling2D())
# model.add(Conv2D(filters=64, kernel_size=3, padding='same'))
model.add(Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same'))
elif self.mode == 'S-ACGAN':
# model.add(SSampling2D())
# model.add(SConv2D(filters=64, kernel_size=3, padding='SAME'))
model.add(SConv2DTranspose(filters=64, kernel_size=3, strides=2, padding='SAME'))
elif self.mode == 'H-ACGAN':
# model.add(HSampling2D())
# model.add(HConv2D(filters=64, kernel_size=3, padding='SAME'))
model.add(HConv2DTranspose(filters=64, kernel_size=3, strides=2, padding='SAME'))
model.add(Activation('relu'))
model.add(BatchNormalization())
if self.mode == 'baseline':
model.add(Conv2D(filters=self.channels, kernel_size=3, padding='same'))
elif self.mode == 'S-ACGAN':
model.add(SConv2D(filters=self.channels, kernel_size=3, padding='SAME'))
elif self.mode == 'H-ACGAN':
model.add(HConv2D(filters=self.channels, kernel_size=3, padding='SAME'))
model.add(Activation('tanh'))
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.classes, self.latent_dim)(label))
model_input = multiply([noise, label_embedding])
img = model(model_input)
return (Model([noise, label], img), model)
def build_discriminator(self):
model = Sequential()
# https://github.com/keras-team/keras/blob/master/examples/mnist_acgan.py
if self.mode == 'baseline':
model.add(Conv2D(filters=32, kernel_size=3, strides=2, padding='same', input_shape=self.input_shape))
elif self.mode == 'S-ACGAN':
model.add(SConv2D(filters=32, kernel_size=3, strides=2, padding='SAME', input_shape=self.input_shape))
elif self.mode == 'H-ACGAN':
model.add(HConv2D(filters=32, kernel_size=3, strides=2, padding='SAME', input_shape=self.input_shape))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.3))
if self.mode == 'baseline':
model.add(Conv2D(filters=64, kernel_size=3, padding='same'))
elif self.mode == 'S-ACGAN':
model.add(SConv2D(filters=64, kernel_size=3, padding='SAME'))
elif self.mode == 'H-ACGAN':
model.add(HConv2D(filters=64, kernel_size=3, padding='SAME'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.3))
if self.mode == 'baseline':
model.add(Conv2D(filters=128, kernel_size=3, strides=2, padding='same'))
elif self.mode == 'S-ACGAN':
model.add(SConv2D(filters=128, kernel_size=3, strides=2, padding='SAME'))
elif self.mode == 'H-ACGAN':
model.add(HConv2D(filters=128, kernel_size=3, strides=2, padding='SAME'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.3))
if self.mode == 'baseline':
model.add(Conv2D(filters=256, kernel_size=3, padding='same'))
elif self.mode == 'S-ACGAN':
model.add(SConv2D(filters=256, kernel_size=3, padding='SAME'))
elif self.mode == 'H-ACGAN':
model.add(HConv2D(filters=256, kernel_size=3, padding='SAME'))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(rate=0.3))
model.add(Flatten())
img = Input(shape=self.input_shape)
features = model(img)
validity = Dense(units=1, activation='sigmoid')(features)
label = Dense(units=self.classes, activation='softmax')(features)
return (Model(img, [validity, label]), model)
def compile(self):
losses = ['binary_crossentropy', 'sparse_categorical_crossentropy']
optimizer = tf.optimizers.Adam(learning_rate=0.0002, beta_1=0.5)
(self.discriminator, self.discriminator_for_summary) = self.build_discriminator()
self.discriminator.compile(loss=losses, optimizer=optimizer, metrics=['accuracy'])
(self.generator, self.generator_for_summary) = self.build_generator()
# The generator takes noise and the target label as input and generates the corresponding digit of that label
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,))
img = self.generator([noise, label])
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity and the label of that image
valid, target_label = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model([noise, label], [valid, target_label])
self.combined.compile(loss=losses, optimizer=optimizer)
def sample_images(self, epoch, visualize_hexagonal, output_dir, run_title, images_to_sample_per_class):
r, c = images_to_sample_per_class, self.classes
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
sampled_labels = np.asarray([num for _ in range(r) for num in range(c)])
gen_imgs = self.generator.predict([noise, sampled_labels])
gen_imgs = normalize_array(gen_imgs)
output_dir_samples = os.path.join(output_dir, run_title)
os.makedirs(output_dir_samples, exist_ok=True)
for image_counter, (image, label) in enumerate(zip(gen_imgs, sampled_labels)):
image_filename = f'epoch{epoch}_label{label}_image{image_counter}.png'
if not visualize_hexagonal:
imsave(os.path.join(output_dir_samples, image_filename), image)
else:
visualize_hexarray(image, os.path.join(output_dir_samples, image_filename))
def fit(
self,
train_data,
train_labels,
batch_size = 100,
epochs = 100,
visualize_hexagonal = False,
output_dir = None,
run_title = None,
images_to_sample_per_class = 10,
disable_training = False):
# Configure inputs
train_labels = np.reshape(train_labels, newshape = (-1, 1))
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
batches = int(train_data.shape[0] / batch_size)
for epoch in range(1, epochs + 1):
for batch in tqdm(range(1, batches + 1)):
################################################################
# Train the discriminator
################################################################
# Select a random batch of images
idx = np.random.randint(0, train_data.shape[0], batch_size)
imgs = train_data[idx]
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# The labels of the digits that the generator tries to create an image representation of
sampled_labels = np.random.randint(0, self.classes, (batch_size, 1))
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, sampled_labels])
# Image labels
img_labels = train_labels[idx]
if not disable_training:
d_loss_real = self.discriminator.train_on_batch(imgs, [valid, img_labels])
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, [fake, sampled_labels])
else:
d_loss_real = self.discriminator.test_on_batch(imgs, [valid, img_labels])
d_loss_fake = self.discriminator.test_on_batch(gen_imgs, [fake, sampled_labels])
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
################################################################
# Train the generator
################################################################
if not disable_training:
g_loss = self.combined.train_on_batch([noise, sampled_labels], [valid, sampled_labels])
else:
g_loss = self.combined.test_on_batch([noise, sampled_labels], [valid, sampled_labels])
Hexnet_print(f'(epoch={epoch:{len(str(epochs))}}/{epochs}) [D loss={d_loss[0]:11.8f}, acc={100*d_loss[3]:6.2f}%, op_acc={100*d_loss[4]:6.2f}%] [G loss={g_loss[0]:11.8f}]')
if output_dir is not None:
self.sample_images(epoch, visualize_hexagonal, output_dir, run_title, images_to_sample_per_class)
def evaluate(
self,
train_data,
train_labels,
batch_size = 100,
epochs = 10,
visualize_hexagonal = False,
output_dir = None,
run_title = None,
images_to_sample_per_class = 10,
disable_training = True):
if run_title is not None:
run_title = f'{run_title}_evaluation'
self.fit(
train_data,
train_labels,
batch_size,
epochs,
visualize_hexagonal,
output_dir,
run_title,
images_to_sample_per_class,
disable_training)
def summary(self):
Hexnet_print('Generator')
self.generator_for_summary.summary()
print_newline()
Hexnet_print('Discriminator')
self.discriminator_for_summary.summary()
def model_GAN_ACGAN_standalone(input_shape, classes):
return ACGAN(input_shape, classes, mode='baseline')
def model_GAN_SACGAN_standalone(input_shape, classes):
return ACGAN(input_shape, classes, mode='S-ACGAN')
def model_GAN_HACGAN_standalone(input_shape, classes):
return ACGAN(input_shape, classes, mode='H-ACGAN')
|
[
"tensorflow.keras.layers.multiply",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"numpy.ones",
"misc.misc.print_newline",
"tensorflow.keras.layers.LeakyReLU",
"numpy.random.randint",
"numpy.random.normal",
"tensorflow.keras.Sequential",
"os.path.join",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.Input",
"layers.layers.HConv2DTranspose",
"tensorflow.keras.layers.Activation",
"numpy.reshape",
"layers.layers.SConv2DTranspose",
"numpy.add",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dropout",
"layers.layers.HConv2D",
"layers.layers.SConv2D",
"tensorflow.optimizers.Adam",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Conv2DTranspose",
"misc.misc.Hexnet_print",
"tensorflow.keras.layers.Conv2D",
"os.makedirs",
"misc.misc.normalize_array",
"numpy.zeros"
] |
[((2504, 2516), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (2514, 2516), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((4905, 4936), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (4910, 4936), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((4963, 4995), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(1,)', 'dtype': '"""int32"""'}), "(shape=(1,), dtype='int32')\n", (4968, 4995), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((5107, 5141), 'tensorflow.keras.layers.multiply', 'multiply', (['[noise, label_embedding]'], {}), '([noise, label_embedding])\n', (5115, 5141), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5292, 5304), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (5302, 5304), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((7241, 7270), 'tensorflow.keras.Input', 'Input', ([], {'shape': 'self.input_shape'}), '(shape=self.input_shape)\n', (7246, 7270), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((7623, 7675), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', ([], {'learning_rate': '(0.0002)', 'beta_1': '(0.5)'}), '(learning_rate=0.0002, beta_1=0.5)\n', (7641, 7675), True, 'import tensorflow as tf\n'), ((8072, 8103), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), '(shape=(self.latent_dim,))\n', (8077, 8103), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((8120, 8137), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (8125, 8137), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((8546, 8590), 'tensorflow.keras.Model', 'Model', (['[noise, label]', '[valid, target_label]'], {}), '([noise, label], [valid, target_label])\n', (8551, 8590), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((8856, 8904), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(r * c, self.latent_dim)'], {}), '(0, 1, (r * c, self.latent_dim))\n', (8872, 8904), True, 'import numpy as np\n'), ((9084, 9109), 'misc.misc.normalize_array', 'normalize_array', (['gen_imgs'], {}), '(gen_imgs)\n', (9099, 9109), False, 'from misc.misc import Hexnet_print, normalize_array, print_newline\n'), ((9140, 9175), 'os.path.join', 'os.path.join', (['output_dir', 'run_title'], {}), '(output_dir, run_title)\n', (9152, 9175), False, 'import os\n'), ((9184, 9230), 'os.makedirs', 'os.makedirs', (['output_dir_samples'], {'exist_ok': '(True)'}), '(output_dir_samples, exist_ok=True)\n', (9195, 9230), False, 'import os\n'), ((10056, 10098), 'numpy.reshape', 'np.reshape', (['train_labels'], {'newshape': '(-1, 1)'}), '(train_labels, newshape=(-1, 1))\n', (10066, 10098), True, 'import numpy as np\n'), ((10154, 10178), 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (10161, 10178), True, 'import numpy as np\n'), ((10195, 10220), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (10203, 10220), True, 'import numpy as np\n'), ((13377, 13402), 'misc.misc.Hexnet_print', 'Hexnet_print', (['"""Generator"""'], {}), "('Generator')\n", (13389, 13402), False, 'from misc.misc import Hexnet_print, normalize_array, print_newline\n'), ((13457, 13472), 'misc.misc.print_newline', 'print_newline', ([], {}), '()\n', (13470, 13472), False, 'from misc.misc import Hexnet_print, normalize_array, print_newline\n'), ((13481, 13510), 'misc.misc.Hexnet_print', 'Hexnet_print', (['"""Discriminator"""'], {}), "('Discriminator')\n", (13493, 13510), False, 'from misc.misc import Hexnet_print, normalize_array, print_newline\n'), ((2702, 2772), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(128 * 8 * 8)', 'activation': '"""relu"""', 'input_dim': 'self.latent_dim'}), "(units=128 * 8 * 8, activation='relu', input_dim=self.latent_dim)\n", (2707, 2772), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((2798, 2831), 'tensorflow.keras.layers.Reshape', 'Reshape', ([], {'target_shape': '(8, 8, 128)'}), '(target_shape=(8, 8, 128))\n', (2805, 2831), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((3597, 3615), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3607, 3615), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((3635, 3655), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3653, 3655), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((4413, 4431), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4423, 4431), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((4451, 4471), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4469, 4471), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((4857, 4875), 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (4867, 4875), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5022, 5031), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5029, 5031), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5204, 5230), 'tensorflow.keras.Model', 'Model', (['[noise, label]', 'img'], {}), '([noise, label], img)\n', (5209, 5230), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((5863, 5883), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (5872, 5883), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5903, 5920), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (5910, 5920), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((6273, 6293), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6282, 6293), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((6313, 6330), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (6320, 6330), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((6719, 6739), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (6728, 6739), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((6759, 6776), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (6766, 6776), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((7132, 7152), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7141, 7152), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((7172, 7189), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (7179, 7189), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((7209, 7218), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7216, 7218), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((7320, 7356), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""'}), "(units=1, activation='sigmoid')\n", (7325, 7356), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((7386, 7433), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'self.classes', 'activation': '"""softmax"""'}), "(units=self.classes, activation='softmax')\n", (7391, 7433), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((7461, 7490), 'tensorflow.keras.Model', 'Model', (['img', '[validity, label]'], {}), '(img, [validity, label])\n', (7466, 7490), False, 'from tensorflow.keras import Input, Model, Sequential\n'), ((3010, 3080), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, strides=2, padding='same')\n", (3025, 3080), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((3831, 3900), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, strides=2, padding='same')\n", (3846, 3900), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((4532, 4592), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'self.channels', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=self.channels, kernel_size=3, padding='same')\n", (4538, 4592), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5032, 5072), 'tensorflow.keras.layers.Embedding', 'Embedding', (['self.classes', 'self.latent_dim'], {}), '(self.classes, self.latent_dim)\n', (5041, 5072), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5448, 5543), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""', 'input_shape': 'self.input_shape'}), "(filters=32, kernel_size=3, strides=2, padding='same', input_shape=\n self.input_shape)\n", (5454, 5543), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((5981, 6030), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, padding='same')\n", (5987, 6030), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((6391, 6452), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(filters=128, kernel_size=3, strides=2, padding='same')\n", (6397, 6452), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((6837, 6887), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=256, kernel_size=3, padding='same')\n", (6843, 6887), False, 'from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, Conv2DTranspose, Dense, Dropout, Embedding, Flatten, LeakyReLU, multiply, Reshape, UpSampling2D\n'), ((10656, 10709), 'numpy.random.randint', 'np.random.randint', (['(0)', 'train_data.shape[0]', 'batch_size'], {}), '(0, train_data.shape[0], batch_size)\n', (10673, 10709), True, 'import numpy as np\n'), ((10824, 10877), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, self.latent_dim)'], {}), '(0, 1, (batch_size, self.latent_dim))\n', (10840, 10877), True, 'import numpy as np\n'), ((11017, 11068), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.classes', '(batch_size, 1)'], {}), '(0, self.classes, (batch_size, 1))\n', (11034, 11068), True, 'import numpy as np\n'), ((3257, 3328), 'layers.layers.SConv2DTranspose', 'SConv2DTranspose', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(filters=128, kernel_size=3, strides=2, padding='SAME')\n", (3273, 3328), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((4076, 4146), 'layers.layers.SConv2DTranspose', 'SConv2DTranspose', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(filters=64, kernel_size=3, strides=2, padding='SAME')\n", (4092, 4146), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((4653, 4714), 'layers.layers.SConv2D', 'SConv2D', ([], {'filters': 'self.channels', 'kernel_size': '(3)', 'padding': '"""SAME"""'}), "(filters=self.channels, kernel_size=3, padding='SAME')\n", (4660, 4714), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((5599, 5695), 'layers.layers.SConv2D', 'SConv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""', 'input_shape': 'self.input_shape'}), "(filters=32, kernel_size=3, strides=2, padding='SAME', input_shape=\n self.input_shape)\n", (5606, 5695), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((6091, 6141), 'layers.layers.SConv2D', 'SConv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""SAME"""'}), "(filters=64, kernel_size=3, padding='SAME')\n", (6098, 6141), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((6513, 6575), 'layers.layers.SConv2D', 'SConv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(filters=128, kernel_size=3, strides=2, padding='SAME')\n", (6520, 6575), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((6948, 6999), 'layers.layers.SConv2D', 'SConv2D', ([], {'filters': '(256)', 'kernel_size': '(3)', 'padding': '"""SAME"""'}), "(filters=256, kernel_size=3, padding='SAME')\n", (6955, 6999), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((9466, 9514), 'os.path.join', 'os.path.join', (['output_dir_samples', 'image_filename'], {}), '(output_dir_samples, image_filename)\n', (9478, 9514), False, 'import os\n'), ((9583, 9631), 'os.path.join', 'os.path.join', (['output_dir_samples', 'image_filename'], {}), '(output_dir_samples, image_filename)\n', (9595, 9631), False, 'import os\n'), ((11766, 11798), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (11772, 11798), True, 'import numpy as np\n'), ((3505, 3576), 'layers.layers.HConv2DTranspose', 'HConv2DTranspose', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(filters=128, kernel_size=3, strides=2, padding='SAME')\n", (3521, 3576), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((4322, 4392), 'layers.layers.HConv2DTranspose', 'HConv2DTranspose', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(filters=64, kernel_size=3, strides=2, padding='SAME')\n", (4338, 4392), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((4775, 4836), 'layers.layers.HConv2D', 'HConv2D', ([], {'filters': 'self.channels', 'kernel_size': '(3)', 'padding': '"""SAME"""'}), "(filters=self.channels, kernel_size=3, padding='SAME')\n", (4782, 4836), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((5751, 5847), 'layers.layers.HConv2D', 'HConv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""', 'input_shape': 'self.input_shape'}), "(filters=32, kernel_size=3, strides=2, padding='SAME', input_shape=\n self.input_shape)\n", (5758, 5847), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((6202, 6252), 'layers.layers.HConv2D', 'HConv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""SAME"""'}), "(filters=64, kernel_size=3, padding='SAME')\n", (6209, 6252), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((6636, 6698), 'layers.layers.HConv2D', 'HConv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""SAME"""'}), "(filters=128, kernel_size=3, strides=2, padding='SAME')\n", (6643, 6698), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n'), ((7060, 7111), 'layers.layers.HConv2D', 'HConv2D', ([], {'filters': '(256)', 'kernel_size': '(3)', 'padding': '"""SAME"""'}), "(filters=256, kernel_size=3, padding='SAME')\n", (7067, 7111), False, 'from layers.layers import HConv2D, HConv2DTranspose, HSampling2D, SConv2D, SConv2DTranspose, SSampling2D\n')]
|
import os
import numpy as np
import torch
from sklearn.datasets import load_svmlight_file
from tensorflow import gfile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.transforms import Compose
from allrank.utils.ltr_logging import get_logger
logger = get_logger()
PADDED_Y_VALUE = -1
PADDED_INDEX_VALUE = -1
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
x, y, indices = sample
return torch.from_numpy(x).type(torch.float32), torch.from_numpy(y).type(torch.float32), torch.from_numpy(indices).type(torch.long)
class FixLength(object):
"""Fix all listings to have equal length by either zero padding or sampling.
For a given listing, if its length is less than dim_given, it is zero padded to match that length (x's are padded with vectors of zeros,
y's are padded with zeroes.)
If its length is greater than dim_given, a random sample of items from that listing is taken to match the dim_given.
Args:
dim_given (int): Dimension of x after length fixing operation.
"""
def __init__(self, dim_given):
assert isinstance(dim_given, int)
self.dim_given = dim_given
def __call__(self, sample):
sample_size = len(sample[1])
if sample_size < self.dim_given: # when expected dimension is larger than number of observation in instance do the padding
fixed_len_x, fixed_len_y, indices = self._pad(sample, sample_size)
else: # otherwise do the sampling
fixed_len_x, fixed_len_y, indices = self._sample(sample, sample_size)
return fixed_len_x, fixed_len_y, indices
def _sample(self, sample, sample_size):
indices = np.random.choice(sample_size, self.dim_given, replace=False)
fixed_len_y = sample[1][indices]
if fixed_len_y.sum() == 0:
if sample[1].sum() == 1:
indices = np.concatenate([np.random.choice(indices, self.dim_given - 1, replace=False), [np.argmax(sample[1])]])
fixed_len_y = sample[1][indices]
elif sample[1].sum() > 0:
return self._sample(sample, sample_size)
fixed_len_x = sample[0][indices]
return fixed_len_x, fixed_len_y, indices
def _pad(self, sample, sample_size):
fixed_len_x = np.pad(sample[0], ((0, self.dim_given - sample_size), (0, 0)), "constant")
fixed_len_y = np.pad(sample[1], (0, self.dim_given - sample_size), "constant", constant_values=PADDED_Y_VALUE)
indices = np.pad(np.arange(0, sample_size), (0, self.dim_given - sample_size), "constant", constant_values=PADDED_INDEX_VALUE)
return fixed_len_x, fixed_len_y, indices
class LibSVMDataset(Dataset):
"""LibSVM Learning to Rank dataset."""
def __init__(self, X, y, query_ids, transform=None):
"""
Args:
x (scipy sparse matrix): Features of dataset.
y (numpy array): Target of dataset.
query_ids (numpy array): Ids determining group membership.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
X = X.toarray()
groups = np.cumsum(np.unique(query_ids, return_counts=True)[1])
self.X_by_qid = np.split(X, groups)[:-1]
self.y_by_qid = np.split(y, groups)[:-1]
self.longest_query_length = max([len(a) for a in self.X_by_qid])
logger.info("loaded dataset with {} queries".format(len(self.X_by_qid)))
logger.info("longest query had {} documents".format(self.longest_query_length))
self.transform = transform
@classmethod
def from_svm_file(cls, svm_file_path, transform=None):
"""
Args:
svm_file_path (string): Path to the svm file with data.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
x, y, query_ids = load_svmlight_file(svm_file_path, query_id=True)
logger.info("loaded dataset from {} and got x shape {}, y shape {} and query_ids shape {}".format(
svm_file_path, x.shape, y.shape, query_ids.shape))
return cls(x, y, query_ids, transform)
def __len__(self):
return len(self.X_by_qid)
def __getitem__(self, idx):
X = self.X_by_qid[idx]
y = self.y_by_qid[idx]
sample = X, y
if self.transform:
sample = self.transform(sample)
return sample
@property
def shape(self):
batch_dim = len(self)
document_dim = self.longest_query_length
features_dim = self[0][0].shape[-1]
return [batch_dim, document_dim, features_dim]
def load_libsvm_role(input_path: str, role: str) -> LibSVMDataset:
path = os.path.join(input_path, "{}.txt".format(role))
logger.info("will load {} data from {}".format(role, path))
with gfile.Open(path, "rb") as input_stream:
ds = LibSVMDataset.from_svm_file(input_stream)
logger.info("{} DS shape: {}".format(role, ds.shape))
return ds
def fix_length_to_longest_listing(ds: LibSVMDataset) -> Compose:
logger.info("Will pad to the longest listing: {}".format(ds.longest_query_length))
return transforms.Compose([FixLength(int(ds.longest_query_length)), ToTensor()])
def load_libsvm_dataset(input_path: str, listing_length: int, validation_ds_role: str):
train_ds = load_libsvm_role(input_path, "train")
train_ds.transform = transforms.Compose([FixLength(listing_length), ToTensor()])
val_ds = load_libsvm_role(input_path, validation_ds_role)
val_ds.transform = fix_length_to_longest_listing(val_ds)
return train_ds, val_ds
def create_data_loaders(train_ds, val_ds, num_workers, batch_size):
gpu_count = torch.cuda.device_count()
total_batch_size = max(1, gpu_count) * batch_size
logger.info("total batch size is {}".format(total_batch_size))
train_dl = DataLoader(train_ds, batch_size=total_batch_size, num_workers=num_workers, shuffle=True)
val_dl = DataLoader(val_ds, batch_size=total_batch_size * 2, num_workers=num_workers, shuffle=False)
return train_dl, val_dl
|
[
"numpy.pad",
"torch.utils.data.DataLoader",
"numpy.argmax",
"allrank.utils.ltr_logging.get_logger",
"torch.cuda.device_count",
"numpy.split",
"numpy.arange",
"sklearn.datasets.load_svmlight_file",
"numpy.random.choice",
"tensorflow.gfile.Open",
"numpy.unique",
"torch.from_numpy"
] |
[((307, 319), 'allrank.utils.ltr_logging.get_logger', 'get_logger', ([], {}), '()\n', (317, 319), False, 'from allrank.utils.ltr_logging import get_logger\n'), ((5819, 5844), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5842, 5844), False, 'import torch\n'), ((5982, 6074), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': 'total_batch_size', 'num_workers': 'num_workers', 'shuffle': '(True)'}), '(train_ds, batch_size=total_batch_size, num_workers=num_workers,\n shuffle=True)\n', (5992, 6074), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((6084, 6179), 'torch.utils.data.DataLoader', 'DataLoader', (['val_ds'], {'batch_size': '(total_batch_size * 2)', 'num_workers': 'num_workers', 'shuffle': '(False)'}), '(val_ds, batch_size=total_batch_size * 2, num_workers=num_workers,\n shuffle=False)\n', (6094, 6179), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1770, 1830), 'numpy.random.choice', 'np.random.choice', (['sample_size', 'self.dim_given'], {'replace': '(False)'}), '(sample_size, self.dim_given, replace=False)\n', (1786, 1830), True, 'import numpy as np\n'), ((2371, 2445), 'numpy.pad', 'np.pad', (['sample[0]', '((0, self.dim_given - sample_size), (0, 0))', '"""constant"""'], {}), "(sample[0], ((0, self.dim_given - sample_size), (0, 0)), 'constant')\n", (2377, 2445), True, 'import numpy as np\n'), ((2468, 2568), 'numpy.pad', 'np.pad', (['sample[1]', '(0, self.dim_given - sample_size)', '"""constant"""'], {'constant_values': 'PADDED_Y_VALUE'}), "(sample[1], (0, self.dim_given - sample_size), 'constant',\n constant_values=PADDED_Y_VALUE)\n", (2474, 2568), True, 'import numpy as np\n'), ((3994, 4042), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['svm_file_path'], {'query_id': '(True)'}), '(svm_file_path, query_id=True)\n', (4012, 4042), False, 'from sklearn.datasets import load_svmlight_file\n'), ((4946, 4968), 'tensorflow.gfile.Open', 'gfile.Open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (4956, 4968), False, 'from tensorflow import gfile\n'), ((2590, 2615), 'numpy.arange', 'np.arange', (['(0)', 'sample_size'], {}), '(0, sample_size)\n', (2599, 2615), True, 'import numpy as np\n'), ((3325, 3344), 'numpy.split', 'np.split', (['X', 'groups'], {}), '(X, groups)\n', (3333, 3344), True, 'import numpy as np\n'), ((3374, 3393), 'numpy.split', 'np.split', (['y', 'groups'], {}), '(y, groups)\n', (3382, 3393), True, 'import numpy as np\n'), ((3255, 3295), 'numpy.unique', 'np.unique', (['query_ids'], {'return_counts': '(True)'}), '(query_ids, return_counts=True)\n', (3264, 3295), True, 'import numpy as np\n'), ((518, 537), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (534, 537), False, 'import torch\n'), ((559, 578), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (575, 578), False, 'import torch\n'), ((600, 625), 'torch.from_numpy', 'torch.from_numpy', (['indices'], {}), '(indices)\n', (616, 625), False, 'import torch\n'), ((1986, 2046), 'numpy.random.choice', 'np.random.choice', (['indices', '(self.dim_given - 1)'], {'replace': '(False)'}), '(indices, self.dim_given - 1, replace=False)\n', (2002, 2046), True, 'import numpy as np\n'), ((2049, 2069), 'numpy.argmax', 'np.argmax', (['sample[1]'], {}), '(sample[1])\n', (2058, 2069), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential, load_model
from tensorflow.keras.layers import Dense, Input, Dropout, Flatten
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import CSVLogger
import pprint, os
import numpy as np
from PIL import Image
N_CATEGORIES=2
BATCH_SIZE=32
TRAIN_DIR="data/training"
VALIDATION_DIR='data/validation'
MODE_FILE_PREFIX='data/models/vgg16_fine_imageclassify_twins'
LEARNING_RATE=0.0001
MOMENTUM=0.9
def model_definition_and_training():
# -----------------------------------------------------------------------------------
# Model definition
# -----------------------------------------------------------------------------------
base_model = tf.keras.applications.vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224,224,3))
# 新たなFC層を追加
top_model= Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(N_CATEGORIES,activation='softmax'))
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
# VGG16の14層までの重みを固定
for layer in model.layers[:15]:
layer.trainable=False
#
model.compile(optimizer=SGD(lr=LEARNING_RATE,momentum=MOMENTUM), loss='categorical_crossentropy', metrics=['accuracy'])
# -----------------------------------------------------------------------------------
# Training data preprocessing
# -----------------------------------------------------------------------------------
train_datagen=ImageDataGenerator(rescale=1.0/255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_generator=train_datagen.flow_from_directory(TRAIN_DIR, target_size=(224,224), batch_size=BATCH_SIZE, class_mode='categorical', shuffle=True)
validation_datagen=ImageDataGenerator(rescale=1.0/255)
validation_generator=validation_datagen.flow_from_directory(VALIDATION_DIR, target_size=(224,224), batch_size=BATCH_SIZE, class_mode='categorical', shuffle=True)
hist=model.fit_generator(train_generator, epochs=200, verbose=1, validation_data=validation_generator, callbacks=[CSVLogger(MODE_FILE_PREFIX+'.csv')])
#save weights
model.save(MODE_FILE_PREFIX+'.h5')
return model
#
def main():
# 既存のモデルが存在する場合はロードし、存在しない場合は新たに学習する。
if os.path.exists(MODE_FILE_PREFIX+'.h5'):
model = load_model(MODE_FILE_PREFIX+'.h5')
else:
model = model_definition_and_training()
#
model.summary()
img_pil = tf.keras.preprocessing.image.load_img(
"data/test/2020-05-04 11.06.20.jpg", target_size=(224,224)
)
img_pil.show()
img = tf.keras.applications.vgg16.preprocess_input(
tf.keras.preprocessing.image.img_to_array(img_pil)[tf.newaxis]
)
label = ['Fuuka', 'Honoka']
predict = model.predict(img)
score = np.max(predict)
pred_label = label[np.argmax(predict[0])]
print('name:',pred_label)
print('score:',score)
#
if __name__ == "__main__":
main()
#
|
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.optimizers.SGD",
"os.path.exists",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.applications.vgg16.VGG16",
"numpy.max",
"tensorflow.keras.callbacks.CSVLogger",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] |
[((814, 917), 'tensorflow.keras.applications.vgg16.VGG16', 'tf.keras.applications.vgg16.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(224, 224, 3)'}), "(weights='imagenet', include_top=False,\n input_shape=(224, 224, 3))\n", (847, 917), True, 'import tensorflow as tf\n'), ((940, 952), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (950, 952), False, 'from tensorflow.keras.models import Model, Sequential, load_model\n'), ((1669, 1765), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (1687, 1765), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1932, 1969), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (1950, 1969), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2441, 2481), 'os.path.exists', 'os.path.exists', (["(MODE_FILE_PREFIX + '.h5')"], {}), "(MODE_FILE_PREFIX + '.h5')\n", (2455, 2481), False, 'import pprint, os\n'), ((2615, 2717), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', (['"""data/test/2020-05-04 11.06.20.jpg"""'], {'target_size': '(224, 224)'}), "('data/test/2020-05-04 11.06.20.jpg',\n target_size=(224, 224))\n", (2652, 2717), True, 'import tensorflow as tf\n'), ((2936, 2951), 'numpy.max', 'np.max', (['predict'], {}), '(predict)\n', (2942, 2951), True, 'import numpy as np\n'), ((969, 1017), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'input_shape': 'base_model.output_shape[1:]'}), '(input_shape=base_model.output_shape[1:])\n', (976, 1017), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Flatten\n'), ((1035, 1064), 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1040, 1064), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Flatten\n'), ((1082, 1094), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1089, 1094), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Flatten\n'), ((1112, 1153), 'tensorflow.keras.layers.Dense', 'Dense', (['N_CATEGORIES'], {'activation': '"""softmax"""'}), "(N_CATEGORIES, activation='softmax')\n", (1117, 1153), False, 'from tensorflow.keras.layers import Dense, Input, Dropout, Flatten\n'), ((2493, 2529), 'tensorflow.keras.models.load_model', 'load_model', (["(MODE_FILE_PREFIX + '.h5')"], {}), "(MODE_FILE_PREFIX + '.h5')\n", (2503, 2529), False, 'from tensorflow.keras.models import Model, Sequential, load_model\n'), ((2973, 2994), 'numpy.argmax', 'np.argmax', (['predict[0]'], {}), '(predict[0])\n', (2982, 2994), True, 'import numpy as np\n'), ((1348, 1388), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': 'LEARNING_RATE', 'momentum': 'MOMENTUM'}), '(lr=LEARNING_RATE, momentum=MOMENTUM)\n', (1351, 1388), False, 'from tensorflow.keras.optimizers import SGD\n'), ((2797, 2847), 'tensorflow.keras.preprocessing.image.img_to_array', 'tf.keras.preprocessing.image.img_to_array', (['img_pil'], {}), '(img_pil)\n', (2838, 2847), True, 'import tensorflow as tf\n'), ((2274, 2310), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (["(MODE_FILE_PREFIX + '.csv')"], {}), "(MODE_FILE_PREFIX + '.csv')\n", (2283, 2310), False, 'from tensorflow.keras.callbacks import CSVLogger\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from random import *
#com epoca
def f(x, p1,p2):
return -(p1*x)/p2
xx = np.arange(-1, 1, 0.1)
yy = np.arange(-1, 1, 0.1)
#x1 = [0.3, -0.6, -0.1, 0.1]
#x2 = [0.7,0.3,-0.8,-0.45]
#classe = [1,0,0,1]
x1 = [0.2, 0.4,-0.2,-0.4]
x2 = [0.2,0.4,-0.2,-0.4]
classe = [1,1,0,0]
peso = []
# defnir pesos aleatorios
for i in range(2):
n = uniform(-1,1) # gerar aleatorio float
peso.append(n)
n = 0.1 # taxa de aprendizado
e = 0 # erro
out = 0 # saida do neuronio
#peso = [0.8,-0.5]
epoca = 1
max_epoca = 100
print(peso) #peso inicial
while (max_epoca != epoca):
i = 0
while (i != (len(classe))-1):
# calcular a saida do neuronio
out = x1[i]*peso[0]+x2[i]*peso[1]
# funcao de ativacao
if out >= 0:
out = 1
else:
out = 0
#verificar se classificou de forma correta
if out != classe[i]:
e = (classe[i] - out) # calcular o erro e ajustar os pesos
peso[0] = peso[0] + n*e*x1[i]
peso[1] = peso[1] + n*e*x2[i]
print(peso)
plt.plot(xx, f(xx, peso[0], peso[1]), color='grey')
i+=1
epoca +=1
# exibir o grafico
for i in range(len(classe)):
if classe[i] == 1:
plt.plot(x1[i], x2[i], 'bo')
else:
plt.plot(x1[i], x2[i], 'ro')
plt.plot(xx, f(xx, peso[0], peso[1]), color='m')
plt.show()
|
[
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] |
[((125, 146), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.1)'], {}), '(-1, 1, 0.1)\n', (134, 146), True, 'import numpy as np\n'), ((153, 174), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.1)'], {}), '(-1, 1, 0.1)\n', (162, 174), True, 'import numpy as np\n'), ((1260, 1270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1268, 1270), True, 'import matplotlib.pyplot as plt\n'), ((1140, 1168), 'matplotlib.pyplot.plot', 'plt.plot', (['x1[i]', 'x2[i]', '"""bo"""'], {}), "(x1[i], x2[i], 'bo')\n", (1148, 1168), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1206), 'matplotlib.pyplot.plot', 'plt.plot', (['x1[i]', 'x2[i]', '"""ro"""'], {}), "(x1[i], x2[i], 'ro')\n", (1186, 1206), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 10 20:13:50 2020
@author: mm
#descricao: este programa calcula a fórmula de Báskara
"""
import numpy as np
#equação ax2+bx+c=0
a=1
b=5
c=4
delta = (b**2 - 4*a*c)
raiz_delta = np.sqrt(delta)
x_linha = (b.__neg__() + raiz_delta)/(2*a)
x_duaslinhas = (b.__neg__() - raiz_delta)/(2*a)
print("As raízes dessa equação são:" + str(x_linha) + " e " +str(x_duaslinhas))
|
[
"numpy.sqrt"
] |
[((249, 263), 'numpy.sqrt', 'np.sqrt', (['delta'], {}), '(delta)\n', (256, 263), True, 'import numpy as np\n')]
|
import numpy
from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator
from psyneulink.core.components.functions.distributionfunctions import DriftDiffusionAnalytical
from psyneulink.core.components.functions.transferfunctions import Linear, Logistic
from psyneulink.core.components.mechanisms.processing.integratormechanism import IntegratorMechanism
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.process import Process
from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection
from psyneulink.core.components.system import System
from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never
from psyneulink.core.scheduling.scheduler import Scheduler
from psyneulink.core.scheduling.time import TimeScale
from psyneulink.library.components.mechanisms.processing.integrator.ddm import DDM
from psyneulink.library.components.mechanisms.modulatory.control.evc.evccontrolmechanism import EVCControlMechanism
class TestInit:
def test_create_scheduler_from_system_StroopDemo(self):
Color_Input = TransferMechanism(name='Color Input', function=Linear(slope=0.2995))
Word_Input = TransferMechanism(name='Word Input', function=Linear(slope=0.2995))
# Processing Mechanisms (Control)
Color_Hidden = TransferMechanism(
name='Colors Hidden',
function=Logistic(gain=(1.0, ControlProjection)),
)
Word_Hidden = TransferMechanism(
name='Words Hidden',
function=Logistic(gain=(1.0, ControlProjection)),
)
Output = TransferMechanism(
name='Output',
function=Logistic(gain=(1.0, ControlProjection)),
)
# Decision Mechanisms
Decision = DDM(
function=DriftDiffusionAnalytical(
drift_rate=(1.0),
threshold=(0.1654),
noise=(0.5),
starting_point=(0),
t0=0.25,
),
name='Decision',
)
# Outcome Mechanisms:
Reward = TransferMechanism(name='Reward')
# Processes:
ColorNamingProcess = Process(
default_variable=[0],
pathway=[Color_Input, Color_Hidden, Output, Decision],
name='Color Naming Process',
)
WordReadingProcess = Process(
default_variable=[0],
pathway=[Word_Input, Word_Hidden, Output, Decision],
name='Word Reading Process',
)
RewardProcess = Process(
default_variable=[0],
pathway=[Reward],
name='RewardProcess',
)
# System:
mySystem = System(
processes=[ColorNamingProcess, WordReadingProcess, RewardProcess],
controller=EVCControlMechanism,
enable_controller=True,
# monitor_for_control=[Reward, (PROBABILITY_UPPER_THRESHOLD, 1, -1)],
name='EVC Gratton System',
)
sched = Scheduler(system=mySystem)
integrator_ColorInputPrediction = mySystem.execution_list[7]
integrator_RewardPrediction = mySystem.execution_list[8]
integrator_WordInputPrediction = mySystem.execution_list[9]
objective_EVC_mech = mySystem.execution_list[10]
expected_consideration_queue = [
{Color_Input, Word_Input, Reward, integrator_ColorInputPrediction, integrator_WordInputPrediction, integrator_RewardPrediction},
{Color_Hidden, Word_Hidden},
{Output},
{Decision},
{objective_EVC_mech},
]
assert sched.consideration_queue == expected_consideration_queue
class TestLinear:
def test_one_run_twice(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5,
)
)
p = Process(
default_variable=[0],
pathway=[A],
name='p'
)
s = System(
processes=[p],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(A, 2)}
stim_list = {A: [[1]]}
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mech = A
expected_output = [
numpy.array([1.]),
]
for i in range(len(expected_output)):
numpy.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(s)[i])
def test_two_AAB(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, B],
name='p'
)
s = System(
processes=[p],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(B, 1)}
stim_list = {A: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, EveryNCalls(A, 2))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mech = B
expected_output = [
numpy.array([2.]),
]
for i in range(len(expected_output)):
numpy.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(s)[i])
def test_two_ABB(self):
A = TransferMechanism(
name='A',
default_variable=[0],
function=Linear(slope=2.0),
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
p = Process(
default_variable=[0],
pathway=[A, B],
name='p'
)
s = System(
processes=[p],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(B, 2)}
stim_list = {A: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(A, Any(AtPass(0), AfterNCalls(B, 2)))
sched.add_condition(B, Any(JustRan(A), JustRan(B)))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mech = B
expected_output = [
numpy.array([2.]),
]
for i in range(len(expected_output)):
numpy.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(s)[i])
class TestBranching:
def test_three_ABAC(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, B],
name='p'
)
q = Process(
default_variable=[0],
pathway=[A, C],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(C, 1)}
stim_list = {A: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, Any(AtNCalls(A, 1), EveryNCalls(A, 2)))
sched.add_condition(C, EveryNCalls(A, 2))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [B, C]
expected_output = [
[
numpy.array([1.]),
],
[
numpy.array([2.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
def test_three_ABAC_convenience(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, B],
name='p'
)
q = Process(
default_variable=[0],
pathway=[A, C],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(C, 1)}
stim_list = {A: [[1]]}
s.scheduler.add_condition(B, Any(AtNCalls(A, 1), EveryNCalls(A, 2)))
s.scheduler.add_condition(C, EveryNCalls(A, 2))
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [B, C]
expected_output = [
[
numpy.array([1.]),
],
[
numpy.array([2.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
def test_three_ABACx2(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, B],
name='p'
)
q = Process(
default_variable=[0],
pathway=[A, C],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(C, 2)}
stim_list = {A: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, Any(AtNCalls(A, 1), EveryNCalls(A, 2)))
sched.add_condition(C, EveryNCalls(A, 2))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [B, C]
expected_output = [
[
numpy.array([3.]),
],
[
numpy.array([4.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
def test_three_2_ABC(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, C],
name='p'
)
q = Process(
default_variable=[0],
pathway=[B, C],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(C, 1)}
stim_list = {A: [[1]], B: [[2]]}
sched = Scheduler(system=s)
sched.add_condition(C, All(EveryNCalls(A, 1), EveryNCalls(B, 1)))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [C]
expected_output = [
[
numpy.array([5.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
def test_three_2_ABCx2(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = TransferMechanism(
name='C',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, C],
name='p'
)
q = Process(
default_variable=[0],
pathway=[B, C],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(C, 2)}
stim_list = {A: [[1]], B: [[2]]}
sched = Scheduler(system=s)
sched.add_condition(C, All(EveryNCalls(A, 1), EveryNCalls(B, 1)))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [C]
expected_output = [
[
numpy.array([10.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
def test_three_integrators(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
p = Process(
default_variable=[0],
pathway=[A, C],
name='p'
)
q = Process(
default_variable=[0],
pathway=[B, C],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(C, 2)}
stim_list = {A: [[1]], B: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, Any(EveryNCalls(A, 1), EveryNCalls(B, 1)))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
mechs = [A, B, C]
expected_output = [
[
numpy.array([2.]),
],
[
numpy.array([1.]),
],
[
numpy.array([4.]),
],
]
for m in range(len(mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], mechs[m].get_output_values(s)[i])
def test_four_ABBCD(self):
A = TransferMechanism(
name='A',
default_variable=[0],
function=Linear(slope=2.0),
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
D = TransferMechanism(
name='D',
default_variable=[0],
function=Linear(slope=1.0),
)
p = Process(
default_variable=[0],
pathway=[A, B, D],
name='p'
)
q = Process(
default_variable=[0],
pathway=[A, C, D],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(D, 1)}
stim_list = {A: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, EveryNCalls(A, 1))
sched.add_condition(C, EveryNCalls(A, 2))
sched.add_condition(D, Any(EveryNCalls(B, 3), EveryNCalls(C, 3)))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [D]
expected_output = [
[
numpy.array([4.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
def test_four_integrators_mixed(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
D = IntegratorMechanism(
name='D',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
p = Process(
default_variable=[0],
pathway=[A, C],
name='p'
)
p1 = Process(
default_variable=[0],
pathway=[A, D],
name='p1'
)
q = Process(
default_variable=[0],
pathway=[B, C],
name='q'
)
q1 = Process(
default_variable=[0],
pathway=[B, D],
name='q1'
)
s = System(
processes=[p, p1, q, q1],
name='s'
)
term_conds = {TimeScale.TRIAL: All(AfterNCalls(C, 1), AfterNCalls(D, 1))}
stim_list = {A: [[1]], B: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 1))
sched.add_condition(D, EveryNCalls(B, 1))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
mechs = [A, B, C, D]
expected_output = [
[
numpy.array([2.]),
],
[
numpy.array([1.]),
],
[
numpy.array([4.]),
],
[
numpy.array([3.]),
],
]
for m in range(len(mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], mechs[m].get_output_values(s)[i])
def test_five_ABABCDE(self):
A = TransferMechanism(
name='A',
default_variable=[0],
function=Linear(slope=2.0),
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
D = TransferMechanism(
name='D',
default_variable=[0],
function=Linear(slope=1.0),
)
E = TransferMechanism(
name='E',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, C, D],
name='p'
)
q = Process(
default_variable=[0],
pathway=[B, C, E],
name='q'
)
s = System(
processes=[p, q],
name='s'
)
term_conds = {TimeScale.TRIAL: AfterNCalls(E, 1)}
stim_list = {A: [[1]], B: [[2]]}
sched = Scheduler(system=s)
sched.add_condition(C, Any(EveryNCalls(A, 1), EveryNCalls(B, 1)))
sched.add_condition(D, EveryNCalls(C, 1))
sched.add_condition(E, EveryNCalls(C, 1))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
terminal_mechs = [D, E]
expected_output = [
[
numpy.array([3.]),
],
[
numpy.array([6.]),
],
]
for m in range(len(terminal_mechs)):
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], terminal_mechs[m].get_output_values(s)[i])
#
# A B
# |\/|
# C D
# |\/|
# E F
#
def test_six_integrators_threelayer_mixed(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
B = IntegratorMechanism(
name='B',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
C = IntegratorMechanism(
name='C',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
D = IntegratorMechanism(
name='D',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
E = IntegratorMechanism(
name='E',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
F = IntegratorMechanism(
name='F',
default_variable=[0],
function=SimpleIntegrator(
rate=1
)
)
p = [
Process(
default_variable=[0],
pathway=[A, C, E],
name='p'
),
Process(
default_variable=[0],
pathway=[A, C, F],
name='p1'
),
Process(
default_variable=[0],
pathway=[A, D, E],
name='p2'
),
Process(
default_variable=[0],
pathway=[A, D, F],
name='p3'
),
Process(
default_variable=[0],
pathway=[B, C, E],
name='q'
),
Process(
default_variable=[0],
pathway=[B, C, F],
name='q1'
),
Process(
default_variable=[0],
pathway=[B, D, E],
name='q2'
),
Process(
default_variable=[0],
pathway=[B, D, F],
name='q3'
)
]
s = System(
processes=p,
name='s'
)
term_conds = {TimeScale.TRIAL: All(AfterNCalls(E, 1), AfterNCalls(F, 1))}
stim_list = {A: [[1]], B: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, EveryNCalls(A, 2))
sched.add_condition(C, EveryNCalls(A, 1))
sched.add_condition(D, EveryNCalls(B, 1))
sched.add_condition(E, EveryNCalls(C, 1))
sched.add_condition(F, EveryNCalls(D, 2))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
# Intermediate time steps
#
# 0 1 2 3
#
# A 1 2 3 4
# B 1 2
# C 1 4 8 14
# D 3 9
# E 1 8 19 42
# F 23
#
expected_output = {
A: [
numpy.array([4.]),
],
B: [
numpy.array([2.]),
],
C: [
numpy.array([14.]),
],
D: [
numpy.array([9.]),
],
E: [
numpy.array([42.]),
],
F: [
numpy.array([23.]),
],
}
for m in expected_output:
for i in range(len(expected_output[m])):
numpy.testing.assert_allclose(expected_output[m][i], m.get_output_values(s)[i])
class TestTermination:
def test_termination_conditions_reset(self):
A = IntegratorMechanism(
name='A',
default_variable=[0],
function=SimpleIntegrator(
rate=.5
)
)
B = TransferMechanism(
name='B',
default_variable=[0],
function=Linear(slope=2.0),
)
p = Process(
default_variable=[0],
pathway=[A, B],
name='p'
)
s = System(
processes=[p],
name='s',
reinitialize_mechanisms_when=Never()
)
term_conds = {TimeScale.TRIAL: AfterNCalls(B, 2)}
stim_list = {A: [[1]]}
sched = Scheduler(system=s)
sched.add_condition(B, EveryNCalls(A, 2))
s.scheduler = sched
s.run(
inputs=stim_list,
termination_processing=term_conds
)
# A should run four times
terminal_mech = B
expected_output = [
numpy.array([4.]),
]
for i in range(len(expected_output)):
numpy.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(s)[i])
s.run(
inputs=stim_list,
)
# A should run an additional two times
terminal_mech = B
expected_output = [
numpy.array([6.]),
]
for i in range(len(expected_output)):
numpy.testing.assert_allclose(expected_output[i], terminal_mech.get_output_values(s)[i])
|
[
"psyneulink.core.scheduling.condition.EveryNCalls",
"psyneulink.core.scheduling.condition.Never",
"psyneulink.core.scheduling.condition.AfterNCalls",
"psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism",
"psyneulink.core.components.process.Process",
"psyneulink.core.components.system.System",
"psyneulink.core.scheduling.condition.AtNCalls",
"psyneulink.core.scheduling.scheduler.Scheduler",
"numpy.array",
"psyneulink.core.scheduling.condition.AtPass",
"psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator",
"psyneulink.core.scheduling.condition.JustRan",
"psyneulink.core.components.functions.distributionfunctions.DriftDiffusionAnalytical",
"psyneulink.core.components.functions.transferfunctions.Linear",
"psyneulink.core.components.functions.transferfunctions.Logistic"
] |
[((2227, 2259), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Reward"""'}), "(name='Reward')\n", (2244, 2259), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((2311, 2428), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Color_Input, Color_Hidden, Output, Decision]', 'name': '"""Color Naming Process"""'}), "(default_variable=[0], pathway=[Color_Input, Color_Hidden, Output,\n Decision], name='Color Naming Process')\n", (2318, 2428), False, 'from psyneulink.core.components.process import Process\n'), ((2502, 2617), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Word_Input, Word_Hidden, Output, Decision]', 'name': '"""Word Reading Process"""'}), "(default_variable=[0], pathway=[Word_Input, Word_Hidden, Output,\n Decision], name='Word Reading Process')\n", (2509, 2617), False, 'from psyneulink.core.components.process import Process\n'), ((2686, 2755), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[Reward]', 'name': '"""RewardProcess"""'}), "(default_variable=[0], pathway=[Reward], name='RewardProcess')\n", (2693, 2755), False, 'from psyneulink.core.components.process import Process\n'), ((2841, 3006), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[ColorNamingProcess, WordReadingProcess, RewardProcess]', 'controller': 'EVCControlMechanism', 'enable_controller': '(True)', 'name': '"""EVC Gratton System"""'}), "(processes=[ColorNamingProcess, WordReadingProcess, RewardProcess],\n controller=EVCControlMechanism, enable_controller=True, name=\n 'EVC Gratton System')\n", (2847, 3006), False, 'from psyneulink.core.components.system import System\n'), ((3156, 3182), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 'mySystem'}), '(system=mySystem)\n', (3165, 3182), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((4076, 4128), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A], name='p')\n", (4083, 4128), False, 'from psyneulink.core.components.process import Process\n'), ((4188, 4219), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p]', 'name': '"""s"""'}), "(processes=[p], name='s')\n", (4194, 4219), False, 'from psyneulink.core.components.system import System\n'), ((5046, 5101), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B], name='p')\n", (5053, 5101), False, 'from psyneulink.core.components.process import Process\n'), ((5161, 5192), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p]', 'name': '"""s"""'}), "(processes=[p], name='s')\n", (5167, 5192), False, 'from psyneulink.core.components.system import System\n'), ((5334, 5353), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (5343, 5353), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((6134, 6189), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B], name='p')\n", (6141, 6189), False, 'from psyneulink.core.components.process import Process\n'), ((6249, 6280), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p]', 'name': '"""s"""'}), "(processes=[p], name='s')\n", (6255, 6280), False, 'from psyneulink.core.components.system import System\n'), ((6422, 6441), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (6431, 6441), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((7461, 7516), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B], name='p')\n", (7468, 7516), False, 'from psyneulink.core.components.process import Process\n'), ((7576, 7631), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[A, C], name='q')\n", (7583, 7631), False, 'from psyneulink.core.components.process import Process\n'), ((7691, 7725), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (7697, 7725), False, 'from psyneulink.core.components.system import System\n'), ((7867, 7886), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (7876, 7886), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((9056, 9111), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B], name='p')\n", (9063, 9111), False, 'from psyneulink.core.components.process import Process\n'), ((9171, 9226), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[A, C], name='q')\n", (9178, 9226), False, 'from psyneulink.core.components.process import Process\n'), ((9286, 9320), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (9292, 9320), False, 'from psyneulink.core.components.system import System\n'), ((10589, 10644), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B], name='p')\n", (10596, 10644), False, 'from psyneulink.core.components.process import Process\n'), ((10704, 10759), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[A, C], name='q')\n", (10711, 10759), False, 'from psyneulink.core.components.process import Process\n'), ((10819, 10853), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (10825, 10853), False, 'from psyneulink.core.components.system import System\n'), ((10995, 11014), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (11004, 11014), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((12212, 12267), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, C], name='p')\n", (12219, 12267), False, 'from psyneulink.core.components.process import Process\n'), ((12327, 12382), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[B, C], name='q')\n", (12334, 12382), False, 'from psyneulink.core.components.process import Process\n'), ((12442, 12476), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (12448, 12476), False, 'from psyneulink.core.components.system import System\n'), ((12628, 12647), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (12637, 12647), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((13733, 13788), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, C], name='p')\n", (13740, 13788), False, 'from psyneulink.core.components.process import Process\n'), ((13848, 13903), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[B, C], name='q')\n", (13855, 13903), False, 'from psyneulink.core.components.process import Process\n'), ((13963, 13997), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (13969, 13997), False, 'from psyneulink.core.components.system import System\n'), ((14149, 14168), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (14158, 14168), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((15296, 15351), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, C], name='p')\n", (15303, 15351), False, 'from psyneulink.core.components.process import Process\n'), ((15411, 15466), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[B, C], name='q')\n", (15418, 15466), False, 'from psyneulink.core.components.process import Process\n'), ((15526, 15560), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (15532, 15560), False, 'from psyneulink.core.components.system import System\n'), ((15712, 15731), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (15721, 15731), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((17110, 17168), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B, D]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B, D], name='p')\n", (17117, 17168), False, 'from psyneulink.core.components.process import Process\n'), ((17228, 17286), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C, D]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[A, C, D], name='q')\n", (17235, 17286), False, 'from psyneulink.core.components.process import Process\n'), ((17346, 17380), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (17352, 17380), False, 'from psyneulink.core.components.system import System\n'), ((17522, 17541), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (17531, 17541), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((18949, 19004), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, C], name='p')\n", (18956, 19004), False, 'from psyneulink.core.components.process import Process\n'), ((19065, 19121), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, D]', 'name': '"""p1"""'}), "(default_variable=[0], pathway=[A, D], name='p1')\n", (19072, 19121), False, 'from psyneulink.core.components.process import Process\n'), ((19181, 19236), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[B, C], name='q')\n", (19188, 19236), False, 'from psyneulink.core.components.process import Process\n'), ((19297, 19353), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, D]', 'name': '"""q1"""'}), "(default_variable=[0], pathway=[B, D], name='q1')\n", (19304, 19353), False, 'from psyneulink.core.components.process import Process\n'), ((19413, 19455), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, p1, q, q1]', 'name': '"""s"""'}), "(processes=[p, p1, q, q1], name='s')\n", (19419, 19455), False, 'from psyneulink.core.components.system import System\n'), ((19631, 19650), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (19640, 19650), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((21223, 21281), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C, D]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, C, D], name='p')\n", (21230, 21281), False, 'from psyneulink.core.components.process import Process\n'), ((21341, 21399), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C, E]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[B, C, E], name='q')\n", (21348, 21399), False, 'from psyneulink.core.components.process import Process\n'), ((21459, 21493), 'psyneulink.core.components.system.System', 'System', ([], {'processes': '[p, q]', 'name': '"""s"""'}), "(processes=[p, q], name='s')\n", (21465, 21493), False, 'from psyneulink.core.components.system import System\n'), ((21645, 21664), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (21654, 21664), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((24680, 24709), 'psyneulink.core.components.system.System', 'System', ([], {'processes': 'p', 'name': '"""s"""'}), "(processes=p, name='s')\n", (24686, 24709), False, 'from psyneulink.core.components.system import System\n'), ((24885, 24904), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (24894, 24904), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((26578, 26633), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, B]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, B], name='p')\n", (26585, 26633), False, 'from psyneulink.core.components.process import Process\n'), ((26915, 26934), 'psyneulink.core.scheduling.scheduler.Scheduler', 'Scheduler', ([], {'system': 's'}), '(system=s)\n', (26924, 26934), False, 'from psyneulink.core.scheduling.scheduler import Scheduler\n'), ((4294, 4311), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['A', '(2)'], {}), '(A, 2)\n', (4305, 4311), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((4513, 4531), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (4524, 4531), False, 'import numpy\n'), ((5267, 5284), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['B', '(1)'], {}), '(B, 1)\n', (5278, 5284), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((5385, 5402), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (5396, 5402), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((5601, 5619), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (5612, 5619), False, 'import numpy\n'), ((6355, 6372), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['B', '(2)'], {}), '(B, 2)\n', (6366, 6372), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((6765, 6783), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (6776, 6783), False, 'import numpy\n'), ((7800, 7817), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(1)'], {}), '(C, 1)\n', (7811, 7817), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((7989, 8006), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (8000, 8006), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((9395, 9412), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(1)'], {}), '(C, 1)\n', (9406, 9412), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((9560, 9577), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (9571, 9577), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((10928, 10945), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(2)'], {}), '(C, 2)\n', (10939, 10945), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((11117, 11134), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (11128, 11134), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((12551, 12568), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(1)'], {}), '(C, 1)\n', (12562, 12568), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((14072, 14089), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(2)'], {}), '(C, 2)\n', (14083, 14089), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((15635, 15652), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(2)'], {}), '(C, 2)\n', (15646, 15652), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((15763, 15780), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (15774, 15780), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((17455, 17472), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['D', '(1)'], {}), '(D, 1)\n', (17466, 17472), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((17573, 17590), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (17584, 17590), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((17623, 17640), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (17634, 17640), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((19682, 19699), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (19693, 19699), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((19732, 19749), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (19743, 19749), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((19782, 19799), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(1)'], {}), '(B, 1)\n', (19793, 19799), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((21568, 21585), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['E', '(1)'], {}), '(E, 1)\n', (21579, 21585), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((21770, 21787), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['C', '(1)'], {}), '(C, 1)\n', (21781, 21787), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((21820, 21837), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['C', '(1)'], {}), '(C, 1)\n', (21831, 21837), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((23592, 23650), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C, E]', 'name': '"""p"""'}), "(default_variable=[0], pathway=[A, C, E], name='p')\n", (23599, 23650), False, 'from psyneulink.core.components.process import Process\n'), ((23726, 23785), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, C, F]', 'name': '"""p1"""'}), "(default_variable=[0], pathway=[A, C, F], name='p1')\n", (23733, 23785), False, 'from psyneulink.core.components.process import Process\n'), ((23861, 23920), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, D, E]', 'name': '"""p2"""'}), "(default_variable=[0], pathway=[A, D, E], name='p2')\n", (23868, 23920), False, 'from psyneulink.core.components.process import Process\n'), ((23996, 24055), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[A, D, F]', 'name': '"""p3"""'}), "(default_variable=[0], pathway=[A, D, F], name='p3')\n", (24003, 24055), False, 'from psyneulink.core.components.process import Process\n'), ((24131, 24189), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C, E]', 'name': '"""q"""'}), "(default_variable=[0], pathway=[B, C, E], name='q')\n", (24138, 24189), False, 'from psyneulink.core.components.process import Process\n'), ((24265, 24324), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, C, F]', 'name': '"""q1"""'}), "(default_variable=[0], pathway=[B, C, F], name='q1')\n", (24272, 24324), False, 'from psyneulink.core.components.process import Process\n'), ((24400, 24459), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, D, E]', 'name': '"""q2"""'}), "(default_variable=[0], pathway=[B, D, E], name='q2')\n", (24407, 24459), False, 'from psyneulink.core.components.process import Process\n'), ((24535, 24594), 'psyneulink.core.components.process.Process', 'Process', ([], {'default_variable': '[0]', 'pathway': '[B, D, F]', 'name': '"""q3"""'}), "(default_variable=[0], pathway=[B, D, F], name='q3')\n", (24542, 24594), False, 'from psyneulink.core.components.process import Process\n'), ((24936, 24953), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (24947, 24953), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((24986, 25003), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (24997, 25003), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((25036, 25053), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(1)'], {}), '(B, 1)\n', (25047, 25053), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((25086, 25103), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['C', '(1)'], {}), '(C, 1)\n', (25097, 25103), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((25136, 25153), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['D', '(2)'], {}), '(D, 2)\n', (25147, 25153), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((26848, 26865), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['B', '(2)'], {}), '(B, 2)\n', (26859, 26865), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((26966, 26983), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (26977, 26983), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((27216, 27234), 'numpy.array', 'numpy.array', (['[4.0]'], {}), '([4.0])\n', (27227, 27234), False, 'import numpy\n'), ((27563, 27581), 'numpy.array', 'numpy.array', (['[6.0]'], {}), '([6.0])\n', (27574, 27581), False, 'import numpy\n'), ((1281, 1301), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(0.2995)'}), '(slope=0.2995)\n', (1287, 1301), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((1370, 1390), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(0.2995)'}), '(slope=0.2995)\n', (1376, 1390), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((1532, 1571), 'psyneulink.core.components.functions.transferfunctions.Logistic', 'Logistic', ([], {'gain': '(1.0, ControlProjection)'}), '(gain=(1.0, ControlProjection))\n', (1540, 1571), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((1678, 1717), 'psyneulink.core.components.functions.transferfunctions.Logistic', 'Logistic', ([], {'gain': '(1.0, ControlProjection)'}), '(gain=(1.0, ControlProjection))\n', (1686, 1717), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((1813, 1852), 'psyneulink.core.components.functions.transferfunctions.Logistic', 'Logistic', ([], {'gain': '(1.0, ControlProjection)'}), '(gain=(1.0, ControlProjection))\n', (1821, 1852), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((1940, 2040), 'psyneulink.core.components.functions.distributionfunctions.DriftDiffusionAnalytical', 'DriftDiffusionAnalytical', ([], {'drift_rate': '(1.0)', 'threshold': '(0.1654)', 'noise': '(0.5)', 'starting_point': '(0)', 't0': '(0.25)'}), '(drift_rate=1.0, threshold=0.1654, noise=0.5,\n starting_point=0, t0=0.25)\n', (1964, 2040), False, 'from psyneulink.core.components.functions.distributionfunctions import DriftDiffusionAnalytical\n'), ((3996, 4022), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (4012, 4022), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((4829, 4855), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (4845, 4855), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((5004, 5021), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (5010, 5021), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((5915, 5932), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (5921, 5932), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((6055, 6081), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (6071, 6081), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((6477, 6486), 'psyneulink.core.scheduling.condition.AtPass', 'AtPass', (['(0)'], {}), '(0)\n', (6483, 6486), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((6488, 6505), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['B', '(2)'], {}), '(B, 2)\n', (6499, 6505), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((6543, 6553), 'psyneulink.core.scheduling.condition.JustRan', 'JustRan', (['A'], {}), '(A)\n', (6550, 6553), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((6555, 6565), 'psyneulink.core.scheduling.condition.JustRan', 'JustRan', (['B'], {}), '(B)\n', (6562, 6565), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((7107, 7133), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (7123, 7133), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((7282, 7299), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (7288, 7299), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((7419, 7436), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (7425, 7436), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((7922, 7936), 'psyneulink.core.scheduling.condition.AtNCalls', 'AtNCalls', (['A', '(1)'], {}), '(A, 1)\n', (7930, 7936), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((7938, 7955), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (7949, 7955), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((8229, 8247), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (8240, 8247), False, 'import numpy\n'), ((8293, 8311), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (8304, 8311), False, 'import numpy\n'), ((8702, 8728), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (8718, 8728), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((8877, 8894), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (8883, 8894), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((9014, 9031), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (9020, 9031), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((9487, 9501), 'psyneulink.core.scheduling.condition.AtNCalls', 'AtNCalls', (['A', '(1)'], {}), '(A, 1)\n', (9495, 9501), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((9503, 9520), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (9514, 9520), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((9772, 9790), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (9783, 9790), False, 'import numpy\n'), ((9836, 9854), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (9847, 9854), False, 'import numpy\n'), ((10235, 10261), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (10251, 10261), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((10410, 10427), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (10416, 10427), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((10547, 10564), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (10553, 10564), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((11050, 11064), 'psyneulink.core.scheduling.condition.AtNCalls', 'AtNCalls', (['A', '(1)'], {}), '(A, 1)\n', (11058, 11064), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((11066, 11083), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(2)'], {}), '(A, 2)\n', (11077, 11083), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((11357, 11375), 'numpy.array', 'numpy.array', (['[3.0]'], {}), '([3.0])\n', (11368, 11375), False, 'import numpy\n'), ((11421, 11439), 'numpy.array', 'numpy.array', (['[4.0]'], {}), '([4.0])\n', (11432, 11439), False, 'import numpy\n'), ((11819, 11845), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (11835, 11845), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((11996, 12020), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (12012, 12020), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((12170, 12187), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (12176, 12187), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((12683, 12700), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (12694, 12700), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((12702, 12719), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(1)'], {}), '(B, 1)\n', (12713, 12719), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((12940, 12958), 'numpy.array', 'numpy.array', (['[5.0]'], {}), '([5.0])\n', (12951, 12958), False, 'import numpy\n'), ((13340, 13366), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (13356, 13366), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((13517, 13541), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (13533, 13541), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((13691, 13708), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (13697, 13708), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((14204, 14221), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (14215, 14221), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((14223, 14240), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(1)'], {}), '(B, 1)\n', (14234, 14240), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((14461, 14480), 'numpy.array', 'numpy.array', (['[10.0]'], {}), '([10.0])\n', (14472, 14480), False, 'import numpy\n'), ((14866, 14890), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (14882, 14890), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((15042, 15066), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (15058, 15066), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((15218, 15242), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (15234, 15242), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((15817, 15834), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (15828, 15834), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((15836, 15853), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(1)'], {}), '(B, 1)\n', (15847, 15853), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((16071, 16089), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (16082, 16089), False, 'import numpy\n'), ((16135, 16153), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (16146, 16153), False, 'import numpy\n'), ((16199, 16217), 'numpy.array', 'numpy.array', (['[4.0]'], {}), '([4.0])\n', (16210, 16217), False, 'import numpy\n'), ((16576, 16593), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (16582, 16593), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((16716, 16742), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (16732, 16742), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((16893, 16919), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (16909, 16919), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((17068, 17085), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(1.0)'}), '(slope=1.0)\n', (17074, 17085), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((17677, 17694), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(3)'], {}), '(B, 3)\n', (17688, 17694), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((17696, 17713), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['C', '(3)'], {}), '(C, 3)\n', (17707, 17713), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((17934, 17952), 'numpy.array', 'numpy.array', (['[4.0]'], {}), '([4.0])\n', (17945, 17952), False, 'import numpy\n'), ((18343, 18367), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (18359, 18367), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((18519, 18543), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (18535, 18543), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((18695, 18719), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (18711, 18719), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((18871, 18895), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (18887, 18895), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((19534, 19551), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['C', '(1)'], {}), '(C, 1)\n', (19545, 19551), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((19553, 19570), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['D', '(1)'], {}), '(D, 1)\n', (19564, 19570), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((20019, 20037), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (20030, 20037), False, 'import numpy\n'), ((20083, 20101), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (20094, 20101), False, 'import numpy\n'), ((20147, 20165), 'numpy.array', 'numpy.array', (['[4.0]'], {}), '([4.0])\n', (20158, 20165), False, 'import numpy\n'), ((20211, 20229), 'numpy.array', 'numpy.array', (['[3.0]'], {}), '([3.0])\n', (20222, 20229), False, 'import numpy\n'), ((20590, 20607), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (20596, 20607), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((20728, 20745), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (20734, 20745), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((20868, 20894), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (20884, 20894), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((21043, 21060), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(1.0)'}), '(slope=1.0)\n', (21049, 21060), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((21181, 21198), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (21187, 21198), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((21700, 21717), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['A', '(1)'], {}), '(A, 1)\n', (21711, 21717), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((21719, 21736), 'psyneulink.core.scheduling.condition.EveryNCalls', 'EveryNCalls', (['B', '(1)'], {}), '(B, 1)\n', (21730, 21736), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((22060, 22078), 'numpy.array', 'numpy.array', (['[3.0]'], {}), '([3.0])\n', (22071, 22078), False, 'import numpy\n'), ((22124, 22142), 'numpy.array', 'numpy.array', (['[6.0]'], {}), '([6.0])\n', (22135, 22142), False, 'import numpy\n'), ((22620, 22644), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (22636, 22644), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((22796, 22820), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (22812, 22820), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((22972, 22996), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (22988, 22996), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((23148, 23172), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (23164, 23172), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((23324, 23348), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (23340, 23348), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((23500, 23524), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(1)'}), '(rate=1)\n', (23516, 23524), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((24788, 24805), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['E', '(1)'], {}), '(E, 1)\n', (24799, 24805), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((24807, 24824), 'psyneulink.core.scheduling.condition.AfterNCalls', 'AfterNCalls', (['F', '(1)'], {}), '(F, 1)\n', (24818, 24824), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n'), ((25610, 25628), 'numpy.array', 'numpy.array', (['[4.0]'], {}), '([4.0])\n', (25621, 25628), False, 'import numpy\n'), ((25677, 25695), 'numpy.array', 'numpy.array', (['[2.0]'], {}), '([2.0])\n', (25688, 25695), False, 'import numpy\n'), ((25744, 25763), 'numpy.array', 'numpy.array', (['[14.0]'], {}), '([14.0])\n', (25755, 25763), False, 'import numpy\n'), ((25812, 25830), 'numpy.array', 'numpy.array', (['[9.0]'], {}), '([9.0])\n', (25823, 25830), False, 'import numpy\n'), ((25879, 25898), 'numpy.array', 'numpy.array', (['[42.0]'], {}), '([42.0])\n', (25890, 25898), False, 'import numpy\n'), ((25947, 25966), 'numpy.array', 'numpy.array', (['[23.0]'], {}), '([23.0])\n', (25958, 25966), False, 'import numpy\n'), ((26361, 26387), 'psyneulink.core.components.functions.statefulfunctions.integratorfunctions.SimpleIntegrator', 'SimpleIntegrator', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (26377, 26387), False, 'from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import SimpleIntegrator\n'), ((26536, 26553), 'psyneulink.core.components.functions.transferfunctions.Linear', 'Linear', ([], {'slope': '(2.0)'}), '(slope=2.0)\n', (26542, 26553), False, 'from psyneulink.core.components.functions.transferfunctions import Linear, Logistic\n'), ((26791, 26798), 'psyneulink.core.scheduling.condition.Never', 'Never', ([], {}), '()\n', (26796, 26798), False, 'from psyneulink.core.scheduling.condition import AfterNCalls, All, Any, AtNCalls, AtPass, EveryNCalls, JustRan, Never\n')]
|
import numpy as np
import random
import math
import collections
from enum import Enum
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import animation
import json
import copy
import wordvectors.physicaldata.tools as tools
class Energy_Modes(Enum):
neighbouring = 1
rectangular = 2
class LogTypes(Enum):
NoStateChange = 1
RowNoStateChange = 2
def change_file_location (properties_location, new_location):
"""
Helper function to change the string in the properties file that points
to the location of the simulated data. This function can be usefull if one
changes computer or the file location in general.
Parameters
----------
properties_location :
String specifying where the properties are saved on the hard drive
new_location :
String with the path of the new location of the simulated data
"""
dc = DataCreator2DGrid(file=None)
dc.Load_properties(properties_location)
dc.file = new_location
dc.Save_properties(properties_location)
def Variate_beta(startbeta, stopbeta, num_steps, iterations, number_of_particles,size_of_system ):
"""
Simulates a number of systems with different beta values in order to find different properties
of these systems. The number of no change of states and the longest row of no state change ist outputted
as plot. Additionaly some simulated states are plotted.
Parameters
----------
startbeta :
Starting value for the beta variable
stopbeta :
Ending value for the beta variable
num_steps :
The number of systems that will be calculated with linearly changing beta
iterations :
The number of time steps for simulation one system
number_of_particles :
The number of particles that will be simulated for the systems
size_of_system :
The size of the system that will be simulated
"""
prg = tools.progress_log(num_steps)
delta_beta = (stopbeta-startbeta)/num_steps
current_beta = startbeta
data = [[] for k in range(num_steps)]
for k in range(num_steps):
datacreator = DataCreator2DGrid('temp.txt',number_of_particles, size_of_system,beta=current_beta)
datacreator.Simulate_System(iterations)
data[k] = [datacreator._Get_visual_system(),
datacreator.log[LogTypes.NoStateChange], datacreator.log[LogTypes.RowNoStateChange], current_beta]
current_beta += delta_beta
prg.update_progress(k)
prg.finished("Simulation finished!")
datacreator = DataCreator2DGrid('temp.txt',number_of_particles, size_of_system,beta=current_beta)
num_of_plots = min(10,num_steps)
states = [data[(k*num_steps)//num_of_plots][0] for k in range(num_of_plots)]
titles = [data[(k*num_steps)//num_of_plots][3] for k in range(num_of_plots)]
datacreator.plot_states(states, 5,titles=titles, discrete=False)
nostatechange = [data[k][1] for k in range(num_steps)]
rownostatechange = [data[k][2] for k in range(num_steps)]
xbeta = [data[k][3] for k in range(num_steps)]
plt.figure()
plt.title("Number of no change of state")
plt.plot(xbeta, nostatechange)
plt.figure()
plt.title("Number of longest row of no change of state")
plt.plot(xbeta, rownostatechange)
return data
class base_DataCreator(object):
"""Base class for data creation. Introduces general functions and variables that are needed for simulating
physical systems and plotting them in several ways."""
def __init__(self, coupling_constant, beta, energy_mode, number_of_particles, size_of_system, file, overwrite_file):
"""
Parameters
----------
coupling_constant :
strength of coupling, will be used in child classes for calculating the energy of the system
beta :
inverse of temperature for the system. Beta specifies the probability to change the state of
the system into a state with higher energy
energy_mode :
Determines the way the energy is calculated. For the mode the enum class Energy_Modes is used
number_of_particles :
The number of particles that will be simulated
size_of_system :
The size of the system that will be simulated
file :
Path of type string to which the calculated states will be saved
overwrite_file :
Boolean which determines wether the given file will be cleared or not
"""
self.coupling_constant = coupling_constant
self.beta = beta
self.energy_mode = energy_mode
self.number_of_particles = number_of_particles
self.size_of_system = size_of_system
self.file = file
self.log = {}
self.last_state = None
if overwrite_file:
s = open(file,'w')
s.close()
self._seperator = '#'
self._decay_constant_vis = 0.99
def get_possibilities(self):
raise NotImplementedError()
def Is_move_possible(self,particles, movement, particle_idx):
raise NotImplementedError()
def Calculate_energy(self, particles):
raise NotImplementedError()
def Generate_random_state(self):
raise NotImplementedError()
def Generate_state(self, stateproperties):
raise NotImplementedError()
def Particle_to_visual(self,particles):
raise NotImplementedError()
def Get_properties(self):
"""
Returns a dictionary with properties of the data creator object.
Properties:
- file
- coupling constant
- beta
- energy_mode
- number_of_particles
- size_of_system
"""
properties = {}
properties["file"] = self.file
properties["coupling_constant"] = self.coupling_constant
properties["beta"] = self.beta
properties["energy_mode"] = self.energy_mode
properties["number_of_particles"] = self.number_of_particles
properties["size_of_system"] = self.size_of_system
return properties
def Load_properties(self, properties):
"""
Loads the given properties dicitonary into the data creator object.
Parameters
----------
properties :
dictionary of properties of a data creator object as recieved from
the Get_properties method. Alternatively a string with the location of
the properties dictionary on the harddrive saved with the tools module.
"""
if type(properties) == str:
properties = tools.load_data(properties)
self.file = properties["file"]
self.coupling_constant = properties["coupling_constant"]
self.beta = properties["beta"]
self.energy_mode = properties["energy_mode"]
self.number_of_particles = properties["number_of_particles"]
self.size_of_system = properties["size_of_system"]
return properties
def Save_properties(self, properties_file_location):
"""
Saves properties of the creation object to the hard drive.
Parameters
----------
properties_file_location :
The path where the properties shall be saved to.
"""
tools.save_data(self.Get_properties(), properties_file_location)
def Get_particles_from_file(self):
"""
Iterator which gives back all states of the corresponding file in a iterative
way in the particle format.
"""
stream = open(self.file, mode='r')
for line in stream:
for particles in line.split(self._seperator):
yield self.str_to_particles(particles)
stream.close()
def _Get_visual_system(self):
"""
Goes over all states of the corresponding file and creates a visualization wich
can be used for ploting with Plot_state.
"""
visual_state = self.Particle_to_visual([])
for particles in self.Get_particles_from_file():
visual_state = np.minimum(self._decay_constant_vis *visual_state + self.Particle_to_visual(particles), 1.0)
return visual_state
def Visualize_system(self):
"""
Visualizes the time evolution of the system with a plot which is a superposition
of serveral states at different timesteps with an linear decay of intensity with time.
"""
self.Plot_state(self._Get_visual_system(), discrete=False)
def Animate_system(self, iterations=1000):
"""
Visualize the time evolution of the system by creating a animation of some states over
the whole time evolution. For running the animation in jupyter the command %matplotlib qt5
is needed. To get back images into the notebook use %matplotlib inline
Parameters
----------
iterations:
The number of states which are used for the animation.
"""
visual_state = self.Particle_to_visual([])
states_to_visualize = []
mod = self.get_number_of_states() // iterations
for idx, particles in enumerate(self.Get_particles_from_file()):
visual_state = np.minimum(self._decay_constant_vis *visual_state + self.Particle_to_visual(particles), 1.0)
if idx % mod == 0:
states_to_visualize.append(copy.deepcopy(visual_state))
self.Plot_animation(states_to_visualize)
def Plot_animation(self, states_to_visualize):
"""
Method that animates the given states.
Parameters
----------
states_to_visualize:
lists of states in particles format that will be animated
"""
# First set up the figure, the axis, and the plot element we want to animate
cmap = plt.get_cmap('Greys')
fig = plt.figure()
ax =plt.axes()
im = plt.imshow(states_to_visualize[0], interpolation='nearest', cmap=cmap)
# initialization function: plot the background of each frame
def init():
im.set_data(states_to_visualize[0])
return [im]
# animation function. This is called sequentially
def animate(i):
im.set_data(states_to_visualize[i])
return [im]
# call the animator. blit=True means only re-draw the parts that have changed.
#self.anim to keep animation running. See: https://github.com/matplotlib/matplotlib/issues/1656
self.anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=len(states_to_visualize), interval=20, blit=True)
#anim.save('basic_animation.html', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
def Plot_state(self, particles, discrete):
"""
Plots the given states.
Parameters
----------
particles :
The state that will be plotted in the particles format
discrete :
Specifies which colormap shall be used for the plot. If True,
only black and white will be used. For the visualization of a
time evolution discrete should be set to False.
"""
if discrete:
cmap = mpl.colors.ListedColormap(['white','black'])
else:
cmap = plt.get_cmap('Greys')
if discrete:
plt.imshow(self.Particle_to_visual(self.str_to_particles(particles)), interpolation='nearest', cmap=cmap)
else:
plt.imshow(self.str_to_particles(particles), interpolation='nearest', cmap=cmap)
def plot_states (self, particles, number_of_coloumns=4, titles = [], discrete = True):
"""
Plots the given states into several plots, one plot for one state. With
number_of_coloumns the number of plots in one line is configured. Optionally
titles for the individual plots can be given.
Parameters
----------
particles :
A list of states that will be plotted in the particles format
number_of_coloumns :
Number of plots in one line
titles :
A list of titles for the corresponding states
discrete :
Specifies which colormap shall be used for the plot. If True,
only black and white will be used. For the visualization of a
time evolution discrete should be set to False.
"""
numb_of_lines = math.ceil(len(particles)/number_of_coloumns)
plt.figure(figsize=(15,numb_of_lines * (15/number_of_coloumns)))
for k in range(len(particles)):
plt.subplot(numb_of_lines,number_of_coloumns,k+1)
if len(titles) > k:
plt.title(titles[k])
self.Plot_state(particles[k],discrete=discrete)
def vis_most_frequent_states(self, number_of_plots=9, start_idx=1):
"""
Visualizes the most frequent states by plotting them into a grid.
Parameters
----------
number_of_plots :
Number of states which will be plotted
start_idx :
The starting index in the list of the most frequent states.
So 1 meaning starting with the most frequent state (0 should
be skipped because its the number of states which are not represented
in the dictionary)
"""
count, dic, rev_dic = tools.Create_dic_from_file(self.file, max(100,number_of_plots), seperator=self._seperator)
states = [self.str_to_particles(count[k][0]) for k in range(start_idx,start_idx + number_of_plots)]
titles = [count[k][1] for k in range(start_idx,start_idx + number_of_plots)]
self.plot_states(states,3,titles)
return [states,titles]
def Next_step(self, particles):
"""
Simulates one time step for the given systems (position of particles).
For that it uses the methods of markov-chain monte carlo to calculate a possible
next state and accepting it with a propability that is related to the difference
in energy between the new and the old state. With the variable beta one can modify
the temperature of the system.
Parameters
----------
particles :
Current state given in particles format from which the time evolved state shall
be calculated.
"""
new_particles = np.copy(particles)
idx = random.randint(0,len(particles)-1) #choosing the particle to be moved
possibilities = self.get_possibilities() #np.array([-self.size_of_system, -1,1,self.size_of_system]) #possible movements
idx_mov = random.randint(0,len(possibilities)-1)
if self.Is_move_possible(particles, possibilities[idx_mov], idx) == False: #if move is not possible -> return old state of particles
return particles
#new_particles[idx] += possibilities[idx_mov]
new_particles[idx] = self.Apply_movement(new_particles[idx], possibilities[idx_mov])
[e_old, e_new] = [self.Calculate_energy(particles), self.Calculate_energy(new_particles)]
prob = min(math.exp(- self.beta * (e_new - e_old)),1)
new_particles.sort()
if prob > random.random():
return new_particles
return particles
def Apply_movement(self, particle, movement):
"""
Applies the given movement onto the particle with given index.
Parameters
----------
particle :
The particle in particles format on which the movement shall be applied
movement :
The movement direction that shall be applied
"""
return particle + movement
#TODO: Bug: If the simulation should start with the last simulated state it throws an Value error at
#elif self.last_state != None and new_system == False:
#ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
#TODO: Implement functionality to get multithreading by calculating several systems in parallel and stiching results together
#https://docs.python.org/2.7/library/multiprocessing.html
def Simulate_System(self, iterations, start_particles=None, new_system=False):
"""
Simulates a system via the methods of markov-chain monte carlo over given number
of time steps and saves it directly to hard drive using the given file location.
Parameters
----------
iterations :
Number of time steps for applying the monte carlo method. A time step of no change
in the state is counted as one iteration, too.
start_particles :
The state from which to start the time evolution.
new_system :
Specifies wheater the last simulated state should be used (=False), if possible,
or a new random state should be used (=True).
"""
_states_per_line = 100
prg = tools.progress_log(iterations)
_nostatechange = 0
_rownostatechange = 0
_maxrow = 0
if start_particles != None:
p = np.copy(start_particles)
elif type(self.last_state) != type(None) and new_system == False:
p = self.last_state
else:
p = self.Generate_random_state()
stream = open(self.file, mode='a')
#track = [[] for k in range(iterations+1)]
#track[0] = p
for k in range(iterations):
new_p = self.Next_step(p)
if (len(new_p) == len(p)) and all(new_p == p): #For ising model len has to be checked
_nostatechange += 1
_rownostatechange += 1
else:
_maxrow = max(_maxrow,_rownostatechange)
_rownostatechange = 0
p = new_p
#track[k+1] = p
text = str(list(p)) #With first converting to list an separator "," is added to the string
if k%_states_per_line == _states_per_line -1:
text += "\n"
else:
text += self._seperator
stream.write(text)
prg.update_progress(k)
self.log[LogTypes.NoStateChange] = _nostatechange
self.log[LogTypes.RowNoStateChange] = _maxrow
self.last_state = p
prg.finished()
stream.close()
def get_number_of_states(self):
"""
Returns the number of states currently simulated and saved on the hard drive.
"""
number = 0
for particles in self.Get_particles_from_file():
number += 1
return number
def str_to_particles(self, str_particles):
"""
Returns the state given as string in the particles format.
Parameters
----------
str_particles :
The state given in particles form but as string.
"""
if type(str_particles) == str:
return json.loads(str_particles)
else:
return str_particles
class DataCreator2DGrid(base_DataCreator):
"""
Child class of the base_DataCreator class. Provides functions to simulate a physical system
of particles on a 2D quadratic grid. States are normally handled in the 'particles' format
meaning a particle that is located at the position [x,y] is in particles format at integer
position x+y*size_of_system. A state in particles format lists all current particles in that
position format.
"""
#Attention: coupling constant was +1 before and is due to changes in energy calculation changed to -1
def __init__(self,file , number_of_particles=2, size_of_system=7, coupling_constant=-1, beta=0.7, energy_mode=Energy_Modes.neighbouring, overwrite_file=False):
"""
Parameters
----------
coupling_constant :
strength of coupling, will be used in child classes for calculating the energy of the system
beta :
inverse of temperature for the system. Beta specifies the probability to change the state of
the system into a state with higher energy
energy_mode :
Determines the way the energy is calculated. For the mode the enum class Energy_Modes is used
number_of_particles :
The number of particles that will be simulated
size_of_system :
The size of the system that will be simulated
file :
Path of type string to which the calculated states will be saved
overwrite_file :
Boolean which determines wether the given file will be cleared or not
"""
super().__init__(coupling_constant=coupling_constant, beta=beta, energy_mode=energy_mode,
number_of_particles=number_of_particles, size_of_system=size_of_system, file=file, overwrite_file=overwrite_file)
def Generate_random_state(self):
"""
Generates a random 2D state in the particles format.
The particles are set randomly onto the grid.
"""
#state = np.zeros([_size_of_system,_size_of_system])
particles = np.array([],dtype= int)
for k in range(self.number_of_particles):
valid_idx = False
while valid_idx == False:
idx= random.randint(0,self.size_of_system**2-1)
valid_idx = True
for p in particles:
if p == idx:
valid_idx = False
particles = np.append(particles, idx)
#state[idx//_size_of_system][idx%_size_of_system] = 1
particles.sort()
return particles
def Generate_state(self, particle_positions, plot = True):
"""
Generates a state in the particles format.
Parameters
----------
particles_position :
A list of positions of particles in the following format [x,y]
x meaning the x coordinate of the particle and y the y coordinate
plot :
If set to true the generated state will be plotted for visualization
"""
particles = []
for i,p in enumerate(particle_positions):
particles.append(p[0]+p[1]*self.size_of_system)
if i >= self.number_of_particles:
break
particles.sort()
if plot:
self.Plot_state(particles,discrete=True)
return particles
def Get_properties(self):
"""
Returns a dictionary with properties of the data creator object.
Properties:
- file
- coupling constant
- beta
- energy_mode
- number_of_particles
- size_of_system
"""
properties = super().Get_properties()
return properties
def Load_properties(self, properties):
"""
Loads the given properties dicitonary into the data creator object.
Parameters
----------
properties :
dictionary of properties of a data creator object as recieved from
the Get_properties method. Alternatively a string with the location of
the properties dictionary on the harddrive saved with the tools module.
"""
properties = super().Load_properties(properties)
def Particle_to_visual(self, particles):
"""
Converts the given state in the particle format into a format suitable for plotting.
So a two dimensional array is used for specifining which positions the particles
occupy.
Parameters
----------
particles :
The state in the particles format which will be converted
"""
state = np.array([[0 for l in range(self.size_of_system)] for k in range(self.size_of_system)])
for p in particles:
state[p//self.size_of_system][p%self.size_of_system] = 1
return state
def get_possibilities(self):
"""
Returns a list of all time evolutions one certain particle can do in one time
step using the particles format.
"""
return np.array([-self.size_of_system, -1,1,self.size_of_system])
def Is_move_possible (self, particles, movement, particle_idx):
"""
Tests if a given movement is valid for the given state. So it tests if the movement
takes one particle out of the grid or moves it into another particle.
Parameters
----------
particles :
The current state on which one time evolution step shall be applied
movement :
The chosen movement from the get_possibilities method
particle_idx :
The index of the particle that shall be moved
"""
[xmov,ymov] = [np.sign(movement) * (abs(movement)%self.size_of_system), np.sign(movement) * (abs(movement)//self.size_of_system)]
#print("x: %d y: %d" %(xmov,ymov))
#[xpos,ypos] = [particles[particle_idx]%self.size_of_system, particles[particle_idx]//self.size_of_system]
[xpos,ypos] = self.getxy_position(particles[particle_idx])
if xmov+xpos<0 or xmov+xpos>=self.size_of_system or ymov+ypos<0 or ymov+ypos>=self.size_of_system:
return False
for p in np.delete(particles, particle_idx):
#if particles[particle_idx] + movement == p:
# return False
if self.Apply_movement(particles[particle_idx], movement) == p:
return False
return True
def getxy_position(self,particle):
"""
Get xy-position of the given particle
Parameters
----------
particles :
One particle in the particles format given in particles lists
Returns
-------
Array of the form [x,y] with x and y coordinate
"""
return [particle%self.size_of_system, particle//self.size_of_system]
def getparticle_position(self,xyparticle):
"""
Get particle in particles format for given particle in xy format
The xy coordinates will used as modulo size of the system.
Parameters
----------
xyparticles :
One particle in the [x,y] format
Returns :
One particle in the particles format
"""
return xyparticle[0]%self.size_of_system + (xyparticle[1]%self.size_of_system)*self.size_of_system
def Calculate_energy (self, particles):
"""
Calculates the energy of the given system.
Parameters
----------
particles :
The state in the particles format whose energy shall be computed
"""
energy = 0
if self.energy_mode == Energy_Modes.rectangular: #rectangular
for p in range(len(particles)):
for k in range(p+1,len(particles)):
energy += self.coupling_constant * (1/self.dist_between_grid_points(particles[p], particles[k]))
elif self.energy_mode == Energy_Modes.neighbouring: #neighbouring
for p in range(len(particles)):
for k in range(p+1,len(particles)):
if self.is_neighboured(particles[p], particles[k]):
energy += self.coupling_constant
return energy
def is_neighboured (self, point_1, point_2):
"""
Gives back a boolean wheater the two given points are neighboured.
(diagonal is not neighboured)
Parameters
----------
point_1 :
First point in particles format that is looked at
point_2 :
Second point in particles format that is looked at
"""
p1 = np.array([point_1%self.size_of_system, point_1//self.size_of_system])
p2 = np.array([point_2%self.size_of_system, point_2//self.size_of_system])
diff = abs(p1 - p2)
if (diff[0] + diff[1]) == 1:
return True
return False
def dist_between_grid_points (self, point_1, point_2):
"""
Calculates the number of grid points that are between the two given points.
For counting only directly connected grid points are used (no diagonals).
Parameters
----------
point_1 :
First point in particles format that is looked at
point_2 :
Second point in particles format that is looked at
"""
p1 = np.array([point_1%self.size_of_system, point_1//self.size_of_system])
p2 = np.array([point_2%self.size_of_system, point_2//self.size_of_system])
diff = abs(p1 - p2)
dist = diff[0] + diff[1]
return dist
class DataCreator2DGridPeriodic (DataCreator2DGrid):
"""
Child class of the DataCreator2DGrid class. Provides functions to simulate a physical
system of particles on a 2D quadratic grid with a periodic boundary condition identifing
the opposite borders with each other. States are normally handled in the 'particles' format
meaning a particle that is located at the position [x,y] is in particles format at integer
position x+y*size_of_system. A state in particles format lists all current particles in that
position format.
"""
def Is_move_possible(self, particles, movement, particle_idx):
"""
Tests if a given movement is valid for the given state. So it tests if the movement
moves one particle into another particle.
Parameters
----------
particles :
The current state on which one time evolution step shall be applied
movement :
The chosen movement from the get_possibilities method
particle_idx :
The index of the particle that shall be moved
"""
for p in np.delete(particles, particle_idx):
if self.Apply_movement(particles[particle_idx], movement) == p:
return False
return True
def Apply_movement(self, particle, movement):
"""
Applies the given movement onto the particle with given index.
Parameters
----------
particle :
The particle in particles format on which the movement shall be applied
movement :
The movement direction that shall be applied
"""
[xmov,ymov] = [np.sign(movement) * (abs(movement)%self.size_of_system),
np.sign(movement) * (abs(movement)//self.size_of_system)]
[xpos,ypos] = self.getxy_position(particle)
[x,y] = [(xpos+xmov)%self.size_of_system,(ypos+ymov)%self.size_of_system]
return self.getparticle_position([x,y])
def is_neighboured(self, point_1, point_2):
"""
Gives back a boolean wheater the two given points are neighboured.
(diagonal is not neighboured, but periodic boundary condition is used.)
Parameters
----------
point_1 :
First point in particles format that is looked at
point_2 :
Second point in particles format that is looked at
"""
p1 = np.array([point_1%self.size_of_system, point_1//self.size_of_system])
p2 = np.array([point_2%self.size_of_system, point_2//self.size_of_system])
xdist=min(abs(p1[0]-p2[0]),abs(abs(p1[0]-p2[0])-self.size_of_system))
ydist=min(abs(p1[1]-p2[1]),abs(abs(p1[1]-p2[1])-self.size_of_system))
if xdist + ydist == 1:
return True
return False
def dist_between_grid_points(self,point_1,point_2):
"""
Calculates the number of grid points that are between the two given points.
The periodic boundary condition is used.
Parameters
----------
point_1 :
First point in particles format that is looked at
point_2 :
Second point in particles format that is looked at
"""
p1 = np.array([point_1%self.size_of_system, point_1//self.size_of_system])
p2 = np.array([point_2%self.size_of_system, point_2//self.size_of_system])
xdist=min(abs(p1[0]-p2[0]),abs(abs(p1[0]-p2[0])-self.size_of_system))
ydist=min(abs(p1[1]-p2[1]),abs(abs(p1[1]-p2[1])-self.size_of_system))
return xdist + ydist
class DataCreator2DIsingModel (DataCreator2DGridPeriodic):
"""
Child class of the DataCreator2DGridPeriodic class. Provides functions to simulate a
physical ising model on a 2D quadratic grid with periodic boundary condition identifing
the opposite borders with each other. A state the 'particles' format in the ising model
consists of all grid points with a positive spin on it. Such a grid point that is located
at the position [x,y] is in particles format at integer position x+y*size_of_system.
"""
def __init__(self,file , size_of_system=7, coupling_constant=1, beta=0.7, energy_mode=Energy_Modes.neighbouring, overwrite_file=False):
"""
Parameters
----------
coupling_constant :
strength of coupling, will be used in child classes for calculating the energy of the system
beta :
inverse of temperature for the system. Beta specifies the probability to change the state of
the system into a state with higher energy
energy_mode :
Determines the way the energy is calculated. For the mode the enum class Energy_Modes is used
size_of_system :
The size of the system that will be simulated
file :
Path of type string to which the calculated states will be saved
overwrite_file :
Boolean which determines wether the given file will be cleared or not
"""
super().__init__(coupling_constant=coupling_constant, beta=beta, energy_mode=energy_mode,
number_of_particles=None, size_of_system=size_of_system, file=file, overwrite_file=overwrite_file)
self._decay_constant_vis = 0
def Get_properties(self):
"""
Returns a dictionary with properties of the data creator object.
Properties:
- file
- coupling constant
- beta
- energy_mode
- size_of_system
"""
properties = {}
properties["file"] = self.file
properties["coupling_constant"] = self.coupling_constant
properties["beta"] = self.beta
properties["energy_mode"] = self.energy_mode
properties["size_of_system"] = self.size_of_system
return properties
def Load_properties(self, properties):
"""
Loads the given properties dicitonary into the data creator object.
Parameters
----------
properties :
dictionary of properties of a data creator object as recieved from
the Get_properties method. Alternatively a string with the location of
the properties dictionary on the harddrive saved with the tools module.
"""
if type(properties) == str:
properties = tools.load_data(properties)
self.file = properties["file"]
self.coupling_constant = properties["coupling_constant"]
self.beta = properties["beta"]
self.energy_mode = properties["energy_mode"]
self.size_of_system = properties["size_of_system"]
return properties
def Generate_random_state(self):
"""
Generates a random 2D state in the particles format.
The positive spins are set randomly onto the grid.
"""
places = [k for k in range(self.size_of_system**2)]
random.shuffle(places)
particles = np.array(places[:random.randint(0,self.size_of_system**2-1)], dtype=int)
particles.sort()
return particles
def get_neighbours (self, particle):
"""
Returns a list with all positions in the particles format which
are neighboured to the given particle position.
Parameters
----------
particle :
integer which specifies the position of one spin on the grid
"""
[x,y] = self.getxy_position(particle)
return [self.getparticle_position([x+1,y]),
self.getparticle_position([x-1,y]),
self.getparticle_position([x,y+1]),
self.getparticle_position([x,y-1])]
def Calculate_energy(self, particles):
"""
Calculates the energy of the given system.
Parameters
----------
particles :
The state in the particles format whose energy shall be computed
"""
energy = 0
if self.energy_mode == Energy_Modes.neighbouring:
for p in [k for k in range(self.size_of_system**2)]:
for q in self.get_neighbours(p):
energy += 2 * (int(p in particles)-0.5) * (int(q in particles)-0.5) * self.coupling_constant
elif self.energy_mode== Energy_Modes.rectangular:
for p in [k for k in range(self.size_of_system**2)]:
for q in [k for k in range(p+1,self.size_of_system**2)]:
energy += (4 * (int(p in particles)-0.5) * (int(q in particles)-0.5) *
self.coupling_constant * (1/self.dist_between_grid_points(p, q)))
return energy
def Next_step(self, particles):
"""
Simulates one time step for the given systems (orientation of spins).
For that it uses the methods of markov-chain monte carlo to calculate a possible
next state and accepting it with a propability that is related to the difference
in energy between the new and the old state. With the variable beta one can modify
the temperature of the system.
Parameters
----------
particles :
Current state given in particles format from which the time evolved state shall
be calculated.
"""
place = random.randint(0,self.size_of_system**2-1)
if place in particles:
new_particles = np.setdiff1d(particles, np.array([place]))
else:
new_particles = np.append(particles, np.array([place]))
new_particles.sort()
[e_old, e_new] = [self.Calculate_energy(particles), self.Calculate_energy(new_particles)]
prob = min(math.exp(- self.beta * (e_new - e_old)),1)
if prob > random.random():
return new_particles
return particles
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.axes",
"random.shuffle",
"matplotlib.pyplot.figure",
"matplotlib.colors.ListedColormap",
"random.randint",
"numpy.copy",
"json.loads",
"matplotlib.pyplot.imshow",
"wordvectors.physicaldata.tools.progress_log",
"numpy.append",
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"wordvectors.physicaldata.tools.load_data",
"random.random",
"numpy.delete",
"matplotlib.pyplot.subplot",
"math.exp",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.sign"
] |
[((1997, 2026), 'wordvectors.physicaldata.tools.progress_log', 'tools.progress_log', (['num_steps'], {}), '(num_steps)\n', (2015, 2026), True, 'import wordvectors.physicaldata.tools as tools\n'), ((3185, 3197), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3195, 3197), True, 'import matplotlib.pyplot as plt\n'), ((3203, 3244), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of no change of state"""'], {}), "('Number of no change of state')\n", (3212, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3280), 'matplotlib.pyplot.plot', 'plt.plot', (['xbeta', 'nostatechange'], {}), '(xbeta, nostatechange)\n', (3258, 3280), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3298), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3296, 3298), True, 'import matplotlib.pyplot as plt\n'), ((3304, 3360), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of longest row of no change of state"""'], {}), "('Number of longest row of no change of state')\n", (3313, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3399), 'matplotlib.pyplot.plot', 'plt.plot', (['xbeta', 'rownostatechange'], {}), '(xbeta, rownostatechange)\n', (3374, 3399), True, 'import matplotlib.pyplot as plt\n'), ((10113, 10134), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Greys"""'], {}), "('Greys')\n", (10125, 10134), True, 'import matplotlib.pyplot as plt\n'), ((10150, 10162), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10160, 10162), True, 'import matplotlib.pyplot as plt\n'), ((10176, 10186), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (10184, 10186), True, 'import matplotlib.pyplot as plt\n'), ((10201, 10271), 'matplotlib.pyplot.imshow', 'plt.imshow', (['states_to_visualize[0]'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(states_to_visualize[0], interpolation='nearest', cmap=cmap)\n", (10211, 10271), True, 'import matplotlib.pyplot as plt\n'), ((11069, 11079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11077, 11079), True, 'import matplotlib.pyplot as plt\n'), ((12883, 12950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, numb_of_lines * (15 / number_of_coloumns))'}), '(figsize=(15, numb_of_lines * (15 / number_of_coloumns)))\n', (12893, 12950), True, 'import matplotlib.pyplot as plt\n'), ((14819, 14837), 'numpy.copy', 'np.copy', (['particles'], {}), '(particles)\n', (14826, 14837), True, 'import numpy as np\n'), ((17428, 17458), 'wordvectors.physicaldata.tools.progress_log', 'tools.progress_log', (['iterations'], {}), '(iterations)\n', (17446, 17458), True, 'import wordvectors.physicaldata.tools as tools\n'), ((21679, 21702), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (21687, 21702), True, 'import numpy as np\n'), ((24739, 24799), 'numpy.array', 'np.array', (['[-self.size_of_system, -1, 1, self.size_of_system]'], {}), '([-self.size_of_system, -1, 1, self.size_of_system])\n', (24747, 24799), True, 'import numpy as np\n'), ((25899, 25933), 'numpy.delete', 'np.delete', (['particles', 'particle_idx'], {}), '(particles, particle_idx)\n', (25908, 25933), True, 'import numpy as np\n'), ((28406, 28479), 'numpy.array', 'np.array', (['[point_1 % self.size_of_system, point_1 // self.size_of_system]'], {}), '([point_1 % self.size_of_system, point_1 // self.size_of_system])\n', (28414, 28479), True, 'import numpy as np\n'), ((28490, 28563), 'numpy.array', 'np.array', (['[point_2 % self.size_of_system, point_2 // self.size_of_system]'], {}), '([point_2 % self.size_of_system, point_2 // self.size_of_system])\n', (28498, 28563), True, 'import numpy as np\n'), ((29149, 29222), 'numpy.array', 'np.array', (['[point_1 % self.size_of_system, point_1 // self.size_of_system]'], {}), '([point_1 % self.size_of_system, point_1 // self.size_of_system])\n', (29157, 29222), True, 'import numpy as np\n'), ((29233, 29306), 'numpy.array', 'np.array', (['[point_2 % self.size_of_system, point_2 // self.size_of_system]'], {}), '([point_2 % self.size_of_system, point_2 // self.size_of_system])\n', (29241, 29306), True, 'import numpy as np\n'), ((30528, 30562), 'numpy.delete', 'np.delete', (['particles', 'particle_idx'], {}), '(particles, particle_idx)\n', (30537, 30562), True, 'import numpy as np\n'), ((31868, 31941), 'numpy.array', 'np.array', (['[point_1 % self.size_of_system, point_1 // self.size_of_system]'], {}), '([point_1 % self.size_of_system, point_1 // self.size_of_system])\n', (31876, 31941), True, 'import numpy as np\n'), ((31952, 32025), 'numpy.array', 'np.array', (['[point_2 % self.size_of_system, point_2 // self.size_of_system]'], {}), '([point_2 % self.size_of_system, point_2 // self.size_of_system])\n', (31960, 32025), True, 'import numpy as np\n'), ((32702, 32775), 'numpy.array', 'np.array', (['[point_1 % self.size_of_system, point_1 // self.size_of_system]'], {}), '([point_1 % self.size_of_system, point_1 // self.size_of_system])\n', (32710, 32775), True, 'import numpy as np\n'), ((32786, 32859), 'numpy.array', 'np.array', (['[point_2 % self.size_of_system, point_2 // self.size_of_system]'], {}), '([point_2 % self.size_of_system, point_2 // self.size_of_system])\n', (32794, 32859), True, 'import numpy as np\n'), ((36469, 36491), 'random.shuffle', 'random.shuffle', (['places'], {}), '(places)\n', (36483, 36491), False, 'import random\n'), ((38910, 38957), 'random.randint', 'random.randint', (['(0)', '(self.size_of_system ** 2 - 1)'], {}), '(0, self.size_of_system ** 2 - 1)\n', (38924, 38957), False, 'import random\n'), ((6817, 6844), 'wordvectors.physicaldata.tools.load_data', 'tools.load_data', (['properties'], {}), '(properties)\n', (6832, 6844), True, 'import wordvectors.physicaldata.tools as tools\n'), ((11592, 11637), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['white', 'black']"], {}), "(['white', 'black'])\n", (11617, 11637), True, 'import matplotlib as mpl\n'), ((11672, 11693), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Greys"""'], {}), "('Greys')\n", (11684, 11693), True, 'import matplotlib.pyplot as plt\n'), ((13002, 13055), 'matplotlib.pyplot.subplot', 'plt.subplot', (['numb_of_lines', 'number_of_coloumns', '(k + 1)'], {}), '(numb_of_lines, number_of_coloumns, k + 1)\n', (13013, 13055), True, 'import matplotlib.pyplot as plt\n'), ((15551, 15589), 'math.exp', 'math.exp', (['(-self.beta * (e_new - e_old))'], {}), '(-self.beta * (e_new - e_old))\n', (15559, 15589), False, 'import math\n'), ((15643, 15658), 'random.random', 'random.random', ([], {}), '()\n', (15656, 15658), False, 'import random\n'), ((17593, 17617), 'numpy.copy', 'np.copy', (['start_particles'], {}), '(start_particles)\n', (17600, 17617), True, 'import numpy as np\n'), ((19459, 19484), 'json.loads', 'json.loads', (['str_particles'], {}), '(str_particles)\n', (19469, 19484), False, 'import json\n'), ((22062, 22087), 'numpy.append', 'np.append', (['particles', 'idx'], {}), '(particles, idx)\n', (22071, 22087), True, 'import numpy as np\n'), ((35894, 35921), 'wordvectors.physicaldata.tools.load_data', 'tools.load_data', (['properties'], {}), '(properties)\n', (35909, 35921), True, 'import wordvectors.physicaldata.tools as tools\n'), ((39290, 39328), 'math.exp', 'math.exp', (['(-self.beta * (e_new - e_old))'], {}), '(-self.beta * (e_new - e_old))\n', (39298, 39328), False, 'import math\n'), ((39352, 39367), 'random.random', 'random.random', ([], {}), '()\n', (39365, 39367), False, 'import random\n'), ((13102, 13122), 'matplotlib.pyplot.title', 'plt.title', (['titles[k]'], {}), '(titles[k])\n', (13111, 13122), True, 'import matplotlib.pyplot as plt\n'), ((21846, 21893), 'random.randint', 'random.randint', (['(0)', '(self.size_of_system ** 2 - 1)'], {}), '(0, self.size_of_system ** 2 - 1)\n', (21860, 21893), False, 'import random\n'), ((25403, 25420), 'numpy.sign', 'np.sign', (['movement'], {}), '(movement)\n', (25410, 25420), True, 'import numpy as np\n'), ((25460, 25477), 'numpy.sign', 'np.sign', (['movement'], {}), '(movement)\n', (25467, 25477), True, 'import numpy as np\n'), ((31090, 31107), 'numpy.sign', 'np.sign', (['movement'], {}), '(movement)\n', (31097, 31107), True, 'import numpy as np\n'), ((31171, 31188), 'numpy.sign', 'np.sign', (['movement'], {}), '(movement)\n', (31178, 31188), True, 'import numpy as np\n'), ((39038, 39055), 'numpy.array', 'np.array', (['[place]'], {}), '([place])\n', (39046, 39055), True, 'import numpy as np\n'), ((39122, 39139), 'numpy.array', 'np.array', (['[place]'], {}), '([place])\n', (39130, 39139), True, 'import numpy as np\n'), ((9662, 9689), 'copy.deepcopy', 'copy.deepcopy', (['visual_state'], {}), '(visual_state)\n', (9675, 9689), False, 'import copy\n'), ((36530, 36577), 'random.randint', 'random.randint', (['(0)', '(self.size_of_system ** 2 - 1)'], {}), '(0, self.size_of_system ** 2 - 1)\n', (36544, 36577), False, 'import random\n')]
|
import numpy as np
import time
import multiprocessing as mp
from multiprocessing.managers import SyncManager
from queue import PriorityQueue
from .XpcsAna.Xpcs import Xpcs
from .XsvsAna.Xsvs import Xsvs
from .SaxsAna.Saxs import Saxs
from .ProcData.Xdata import Xdata
from .Decorators import Decorators
from .misc.xsave import save_result
class MyManager(SyncManager):
pass
def Manager():
m = MyManager()
m.start()
return m
class Analysis(Xdata):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@Decorators.input2list
def analyze(self, series_id, method, first=0, last=np.inf, handle_existing='next',
nread_procs=4, chunk_size=200, verbose=True, dark=None,
dtype=np.float32, filename='', read_kwargs={}, **kwargs):
if not self.setup.wavelength:
raise ValueError('Setup is not defined properly. Cannot perform analysis.')
for sid in series_id:
if verbose:
print('\n\n#### Starting %s Analysis ####\nSeries: %d in folder %s\n' %
(method, sid, self.datdir))
print('Using {} processes to read data.'.format(nread_procs))
# if dark is not None:
# if type(dark) == int:
# print('Loading DB entry {} as dark.'.format(dark))
# dark = self.xana.get_item(dark)['Isaxs']
nf = self.meta.loc[sid, 'nframes']
first = first % nf + self.meta.loc[sid, 'first']
last = min([self.meta.loc[sid, 'nframes'], last])
last = (last - 1) % nf + self.meta.loc[sid, 'first']
# update meta database
# self.meta.loc[sid, 'first'] = first
# self.meta.loc[sid, 'last'] = last
# dict with options and variables passed to the data reader
read_opt = {'first': first,
'last': last,
'dark': dark,
'verbose': False,
'dtype': dtype,
'qsec': self.setup.qsec,
'output': '2dsection',
'nprocs': nread_procs,
'chunk_size':chunk_size
}
saxs_dict = read_opt.copy()
read_opt.update(read_kwargs)
proc_dat = {'nimages': last - first + 1,
'dim': self.setup.qsec_dim
}
# old chunks
# chunks = [np.arange(first + i*chunk_size, min(first + (i + 1)*chunk_size, last))
# for i in range(np.ceil((last - first) / chunk_size).astype(np.int32))]
# new chunks
ind_arange = np.arange(first,last+1)
bins = np.arange(0, nf, chunk_size)
digitized = np.digitize(ind_arange, bins)
chunks = [ind_arange[np.where(digitized==i)] for i in np.unique(digitized)]
if method in ['xpcs', 'xsvs']:
# Register a shared PriorityQueue
MyManager.register("PriorityQueue", PriorityQueue)
m = Manager()
dataQ = m.PriorityQueue(nread_procs)
indxQ = m.PriorityQueue()
#dataQ = mp.Queue(nread_procs)
#indxQ = mp.Queue()'symmetric_whole'
# add queues to read and process dictionaries
read_opt['dataQ'] = dataQ
read_opt['indxQ'] = indxQ
read_opt['method'] = 'queue_chunk'
proc_dat['dataQ'] = dataQ
for i, chunk in enumerate(chunks):
indxQ.put((i, chunk))
# h5 files can only be opened by one process at a time and, therefore,
# the processes have to acquire a lock for reading data
lock = 0
if 'h5' in self.fmtstr:
lock = mp.Lock()
read_opt['lock'] = lock
procs = []
for ip in range(nread_procs):
procs.append(mp.Process(target=self.get_series,
args=(sid,), kwargs=read_opt))
procs[ip].start()
time.sleep(2)
if method == 'xpcs':
saxs = kwargs.pop('saxs', 'compute')
Isaxs = self.get_xpcs_args(sid, saxs, saxs_dict)
dt = self.get_delay_time(sid)
savd = Xpcs.pyxpcs(proc_dat, self.setup.qroi, dt=dt, qv=self.setup.qv,
saxs=Isaxs, mask=self.setup.mask, ctr=self.setup.center,
qsec=self.setup.qsec[0], **kwargs)
elif method == 'xpcs_evt':
dt = self.get_delay_time(sid)
evt_dict = dict(method='events',
verbose=True,
qroi=self.setup.qroi,
dtype=np.uint32,
)
read_opt.update(evt_dict)
evt = self.get_series(sid, **read_opt)
savd = Xpcs.eventcorrelator(evt[1:], self.setup.qroi, self.setup.qv,
dt, method='events', **kwargs)
elif method == 'xsvs':
t_e = self.get_xsvs_args(sid,)
savd = Xsvs.pyxsvs(proc_dat, self.setup.qroi, t_e=t_e,
qv=self.setup.qv, qsec=self.setup.qsec[0],
**kwargs)
elif method == 'saxs':
read_opt['output'] = '2d'
proc_dat = {'get_series': self.get_series,
'sid': sid,
'setup': self.setup,
'mask': self.setup.mask}
savd = Saxs.pysaxs(proc_dat, **read_opt, **kwargs)
else:
raise ValueError('Analysis type %s not understood.' % method)
if method in ['xpcs', 'xsvs']:
# stopping processes
for ip in range(nread_procs):
procs[ip].join()
# closing queues
# dataQ.close()
# dataQ.join_thread()
# indxQ.close()
# indxQ.join_thread()
f = self.datdir.split('/')[-2] + '_s' + \
str(self.meta.loc[sid, 'series']) + filename
savfile = save_result(
savd, method, self.savdir, f, handle_existing)
self.add_db_entry(sid, savfile, method)
def get_xpcs_args(self, sid, saxs, read_opt):
''' Get Saxs and delay time for XPCS analysis.
'''
if saxs == 'compute':
print('Calculating average SAXS image.')
Isaxs = self.get_series(sid, method='average', **read_opt)[0]
elif isinstance(saxs, int):
if saxs == -1:
saxs = self.db.shape[0] - 1
print('Loading average SAXS from database entry {}'.format(saxs))
Isaxs = self.get_item(saxs)['Isaxs']
else:
Isaxs = saxs
return Isaxs
def get_delay_time(self, sid):
dt = 0
for attr in ['t_delay', 't_exposure', 't_readout', 't_latency', 'rate',
'pulseLength']:
if attr in self.meta.columns:
item = self.meta.loc[sid, attr]
if attr == 'rate':
dt += 1/item
elif attr == 'pulseLength':
dt += item * 1e-15
else:
dt += item
if attr == 't_delay':
break
return dt
def get_xsvs_args(self, sid):
''' Get exposure time for XSVS analysis
'''
t_e = 0
for attr in ['t_exposure', 'pulseLength']:
if attr in self.meta.columns:
item = self.meta.loc[sid, attr]
if attr == 'pulseLength':
t_e += item * 1e15
else:
t_e += item
return t_e
def defineqrois(self, input_, **kwargs):
if type(input_) == int:
Isaxs = self.get_item(input_)['Isaxs']
elif type(input_) == np.ndarray:
Isaxs = input_
if Isaxs.ndim == 3:
Isaxs = self.arrange_tiles(Isaxs)
Saxs.defineqrois(self.setup, Isaxs, **kwargs)
@staticmethod
def find_center(*args, **kwargs):
return Saxs.find_center(*args, **kwargs)
|
[
"multiprocessing.Lock",
"time.sleep",
"numpy.where",
"numpy.arange",
"multiprocessing.Process",
"numpy.digitize",
"numpy.unique"
] |
[((2749, 2775), 'numpy.arange', 'np.arange', (['first', '(last + 1)'], {}), '(first, last + 1)\n', (2758, 2775), True, 'import numpy as np\n'), ((2792, 2820), 'numpy.arange', 'np.arange', (['(0)', 'nf', 'chunk_size'], {}), '(0, nf, chunk_size)\n', (2801, 2820), True, 'import numpy as np\n'), ((2845, 2874), 'numpy.digitize', 'np.digitize', (['ind_arange', 'bins'], {}), '(ind_arange, bins)\n', (2856, 2874), True, 'import numpy as np\n'), ((2908, 2932), 'numpy.where', 'np.where', (['(digitized == i)'], {}), '(digitized == i)\n', (2916, 2932), True, 'import numpy as np\n'), ((2941, 2961), 'numpy.unique', 'np.unique', (['digitized'], {}), '(digitized)\n', (2950, 2961), True, 'import numpy as np\n'), ((3936, 3945), 'multiprocessing.Lock', 'mp.Lock', ([], {}), '()\n', (3943, 3945), True, 'import multiprocessing as mp\n'), ((4265, 4278), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4275, 4278), False, 'import time\n'), ((4097, 4161), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'self.get_series', 'args': '(sid,)', 'kwargs': 'read_opt'}), '(target=self.get_series, args=(sid,), kwargs=read_opt)\n', (4107, 4161), True, 'import multiprocessing as mp\n')]
|
"""
Created: 2018-08-08
Modified: 2019-03-07
Author: <NAME> <<EMAIL>>
"""
from numpy import array, zeros, arange
from scipy.optimize import root
from scipy.interpolate import lagrange
import common
from common import r0, th0, ph0, pph0, timesteps, get_val, get_der
from plotting import plot_orbit
steps_per_bounce = 64
dt, nt = timesteps(steps_per_bounce, nbounce = 100)
nlag = 1 # order of Lagrange extrapolation
z = zeros([3,nt+1])
z[:,0] = [r0,th0,r0]
zold = z[:,0]
def F(x, xold, pthold):
""" Cost function for implicit midpoint rule in axisymmetric field """
global pth
ret = zeros(3)
# evaluate at midpoint
[H, pth, vpar, dHdx, dHdpph, dpthdx,
dpthdpph, dvpardx, dvpardpph] = get_der(
array([x[2], 0.5*(x[1] + xold[1]), 0.0, pph0]))
ret[0] = dpthdx[0]*(x[1] - xold[1]) - dt*dHdx[0]
dpthdrmid = dpthdx[0]
pthdotbar = dpthdrmid*dHdx[1]-dpthdx[1]*dHdx[0]
ret[1] = dpthdrmid*(pth - pthold) + dt/2.0*pthdotbar
# evaluate at final position
[H, pth, vpar, dHdx, dHdpph, dpthdx,
dpthdpph, dvpardx, dvpardpph] = get_der(array([x[0],x[1],x[2],pph0]))
ret[2] = dpthdrmid*(pth-pthold) + dt*pthdotbar
return ret
#%%
from time import time
tic = time()
[H, pth, vpar] = get_val(array([r0,th0,ph0,pph0]))
for kt in range(nt):
pthold = pth
# Initialize via Lagrange extrapolation
if(kt>=nlag):
extrapr = lagrange(arange(-nlag,1), z[0,kt-nlag:kt+1])
extrapth = lagrange(arange(-nlag,1), z[1,kt-nlag:kt+1])
extraprmid = lagrange(arange(-nlag,1), z[2,kt-nlag:kt+1])
z0 = array([extrapr(1.0),extrapth(1.0),extraprmid(1.0)])
else:
z0 = z[:,kt]
sol = root(F, z0, method='hybr',tol=1e-12,args=(zold, pthold))
z[:,kt+1] = sol.x
zold = z[:,kt+1]
pthold = pth
print('Field evaluations: {}'.format(common.neval))
print('Time taken: {}'.format(time()-tic))
plot_orbit(z)
|
[
"plotting.plot_orbit",
"numpy.zeros",
"common.timesteps",
"time.time",
"numpy.array",
"numpy.arange",
"scipy.optimize.root"
] |
[((333, 373), 'common.timesteps', 'timesteps', (['steps_per_bounce'], {'nbounce': '(100)'}), '(steps_per_bounce, nbounce=100)\n', (342, 373), False, 'from common import r0, th0, ph0, pph0, timesteps, get_val, get_der\n'), ((424, 442), 'numpy.zeros', 'zeros', (['[3, nt + 1]'], {}), '([3, nt + 1])\n', (429, 442), False, 'from numpy import array, zeros, arange\n'), ((1252, 1258), 'time.time', 'time', ([], {}), '()\n', (1256, 1258), False, 'from time import time\n'), ((1941, 1954), 'plotting.plot_orbit', 'plot_orbit', (['z'], {}), '(z)\n', (1951, 1954), False, 'from plotting import plot_orbit\n'), ((603, 611), 'numpy.zeros', 'zeros', (['(3)'], {}), '(3)\n', (608, 611), False, 'from numpy import array, zeros, arange\n'), ((1284, 1311), 'numpy.array', 'array', (['[r0, th0, ph0, pph0]'], {}), '([r0, th0, ph0, pph0])\n', (1289, 1311), False, 'from numpy import array, zeros, arange\n'), ((1723, 1781), 'scipy.optimize.root', 'root', (['F', 'z0'], {'method': '"""hybr"""', 'tol': '(1e-12)', 'args': '(zold, pthold)'}), "(F, z0, method='hybr', tol=1e-12, args=(zold, pthold))\n", (1727, 1781), False, 'from scipy.optimize import root\n'), ((740, 788), 'numpy.array', 'array', (['[x[2], 0.5 * (x[1] + xold[1]), 0.0, pph0]'], {}), '([x[2], 0.5 * (x[1] + xold[1]), 0.0, pph0])\n', (745, 788), False, 'from numpy import array, zeros, arange\n'), ((1112, 1143), 'numpy.array', 'array', (['[x[0], x[1], x[2], pph0]'], {}), '([x[0], x[1], x[2], pph0])\n', (1117, 1143), False, 'from numpy import array, zeros, arange\n'), ((1442, 1458), 'numpy.arange', 'arange', (['(-nlag)', '(1)'], {}), '(-nlag, 1)\n', (1448, 1458), False, 'from numpy import array, zeros, arange\n'), ((1506, 1522), 'numpy.arange', 'arange', (['(-nlag)', '(1)'], {}), '(-nlag, 1)\n', (1512, 1522), False, 'from numpy import array, zeros, arange\n'), ((1572, 1588), 'numpy.arange', 'arange', (['(-nlag)', '(1)'], {}), '(-nlag, 1)\n', (1578, 1588), False, 'from numpy import array, zeros, arange\n'), ((1927, 1933), 'time.time', 'time', ([], {}), '()\n', (1931, 1933), False, 'from time import time\n')]
|
import pygame
from pygame.locals import DOUBLEBUF, OPENGL, RESIZABLE
import math
import numpy as np
from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, \
glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, \
GL_DEPTH_BUFFER_BIT
from OpenGL.GLU import gluPerspective
lastPosX = 0
lastPosY = 0
zoomScale = 1.0
dataL = 0
xRot = 0
yRot = 0
zRot = 0
def landmark_visualizer(landmarks, cameras, left_landmarks, right_landmarks):
glLineWidth(1.5)
glBegin(GL_LINES)
glColor3f(0.0, 1.0, 0.0)
for landmark in landmarks:
glVertex3fv(cameras[0])
glVertex3fv(landmark)
glColor3f(0.0, 0.0, 1.0)
for landmark in landmarks:
glVertex3fv(cameras[1])
glVertex3fv(landmark)
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(landmarks)):
glVertex3f(landmarks[i][0], landmarks[i][1], landmarks[i][2])
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(cameras)):
glVertex3f(cameras[i][0], cameras[i][1], cameras[i][2])
glEnd()
glLineWidth(1.5)
glBegin(GL_LINES)
glColor3f(0.0, 1.0, 0.0)
for landmark in left_landmarks:
glVertex3fv(cameras[0])
glVertex3fv(landmark)
glColor3f(0.0, 0.0, 1.0)
for landmark in right_landmarks:
glVertex3fv(cameras[1])
glVertex3fv(landmark)
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(left_landmarks)):
glVertex3f(left_landmarks[i][0], left_landmarks[i][1], left_landmarks[i][2])
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(right_landmarks)):
glVertex3f(right_landmarks[i][0], right_landmarks[i][1], right_landmarks[i][2])
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(cameras)):
glVertex3f(cameras[i][0], cameras[i][1], cameras[i][2])
glEnd()
def mouseMove(event):
global lastPosX, lastPosY, zoomScale, xRot, yRot, zRot
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 4:
glScaled(1.05, 1.05, 1.05)
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 5:
glScaled(0.95, 0.95, 0.95)
if event.type == pygame.MOUSEMOTION:
x, y = event.pos
dx = x - lastPosX
dy = y - lastPosY
mouseState = pygame.mouse.get_pressed()
if mouseState[0]:
modelView = (GLfloat * 16)()
mvm = glGetFloatv(GL_MODELVIEW_MATRIX, modelView)
temp = (GLfloat * 3)()
temp[0] = modelView[0] * dy + modelView[1] * dx
temp[1] = modelView[4] * dy + modelView[5] * dx
temp[2] = modelView[8] * dy + modelView[9] * dx
norm_xy = math.sqrt(temp[0] * temp[0] + temp[1] * temp[1] + temp[2] * temp[2])
glRotatef(math.sqrt(dx * dx + dy * dy), temp[0] / norm_xy, temp[1] / norm_xy, temp[2] / norm_xy)
lastPosX = x
lastPosY = y
def initialize_OpenGL():
pygame.init()
display = (300, 300)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL, RESIZABLE)
gluPerspective(45, (1.0 * display[0] / display[1]), 0.1, 50.0)
glTranslatef(0.0, 0.0, -5)
def start_OpenGL(landmarks, cameras, left_landmarks, right_landmarks):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
mouseMove(event)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
landmark_visualizer(landmarks, cameras, left_landmarks, right_landmarks)
pygame.display.flip()
pygame.time.wait(1)
def get_vector_direction(camera_position, landmark):
vector = []
for i in range(3):
vector.append(landmark[i] - camera_position[i])
return np.array(vector)
def get_vector_intersection(left_vector, left_camera_position, right_vector, right_camera_position):
n = np.cross(left_vector, right_vector)
n1 = np.cross(left_vector, n)
n2 = np.cross(right_vector, n)
top = np.dot(np.subtract(right_camera_position, left_camera_position), n2)
bottom = np.dot(left_vector, n2)
divided = top / bottom
mult = divided * left_vector
c1 = left_camera_position + mult
top = np.dot(np.subtract(left_camera_position, right_camera_position), n1)
bottom = np.dot(right_vector, n1)
divided = top / bottom
mult = divided * right_vector
c2 = right_camera_position + mult
center = (c1 + c2) / 2
return center
|
[
"OpenGL.GL.glVertex3fv",
"pygame.event.get",
"OpenGL.GL.glScaled",
"OpenGL.GL.glClear",
"OpenGL.GL.glGetFloatv",
"OpenGL.GL.glTranslatef",
"OpenGL.GL.glBegin",
"pygame.display.set_mode",
"OpenGL.GL.glVertex3f",
"OpenGL.GL.glLineWidth",
"pygame.quit",
"pygame.mouse.get_pressed",
"math.sqrt",
"numpy.cross",
"pygame.init",
"OpenGL.GL.glColor3f",
"pygame.time.wait",
"OpenGL.GL.glEnd",
"numpy.dot",
"OpenGL.GL.glPointSize",
"numpy.subtract",
"OpenGL.GLU.gluPerspective",
"pygame.display.flip",
"numpy.array"
] |
[((563, 579), 'OpenGL.GL.glLineWidth', 'glLineWidth', (['(1.5)'], {}), '(1.5)\n', (574, 579), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((584, 601), 'OpenGL.GL.glBegin', 'glBegin', (['GL_LINES'], {}), '(GL_LINES)\n', (591, 601), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((607, 631), 'OpenGL.GL.glColor3f', 'glColor3f', (['(0.0)', '(1.0)', '(0.0)'], {}), '(0.0, 1.0, 0.0)\n', (616, 631), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((730, 754), 'OpenGL.GL.glColor3f', 'glColor3f', (['(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 1.0)\n', (739, 754), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((852, 859), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (857, 859), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((865, 881), 'OpenGL.GL.glPointSize', 'glPointSize', (['(3.0)'], {}), '(3.0)\n', (876, 881), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((886, 904), 'OpenGL.GL.glBegin', 'glBegin', (['GL_POINTS'], {}), '(GL_POINTS)\n', (893, 904), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((909, 933), 'OpenGL.GL.glColor3f', 'glColor3f', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (918, 933), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1044, 1051), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (1049, 1051), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1057, 1073), 'OpenGL.GL.glPointSize', 'glPointSize', (['(3.0)'], {}), '(3.0)\n', (1068, 1073), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1078, 1096), 'OpenGL.GL.glBegin', 'glBegin', (['GL_POINTS'], {}), '(GL_POINTS)\n', (1085, 1096), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1101, 1125), 'OpenGL.GL.glColor3f', 'glColor3f', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1110, 1125), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1228, 1235), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (1233, 1235), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1241, 1257), 'OpenGL.GL.glLineWidth', 'glLineWidth', (['(1.5)'], {}), '(1.5)\n', (1252, 1257), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1262, 1279), 'OpenGL.GL.glBegin', 'glBegin', (['GL_LINES'], {}), '(GL_LINES)\n', (1269, 1279), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1285, 1309), 'OpenGL.GL.glColor3f', 'glColor3f', (['(0.0)', '(1.0)', '(0.0)'], {}), '(0.0, 1.0, 0.0)\n', (1294, 1309), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1413, 1437), 'OpenGL.GL.glColor3f', 'glColor3f', (['(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 1.0)\n', (1422, 1437), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1541, 1548), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (1546, 1548), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1554, 1570), 'OpenGL.GL.glPointSize', 'glPointSize', (['(3.0)'], {}), '(3.0)\n', (1565, 1570), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1575, 1593), 'OpenGL.GL.glBegin', 'glBegin', (['GL_POINTS'], {}), '(GL_POINTS)\n', (1582, 1593), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1598, 1622), 'OpenGL.GL.glColor3f', 'glColor3f', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1607, 1622), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1753, 1760), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (1758, 1760), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1766, 1782), 'OpenGL.GL.glPointSize', 'glPointSize', (['(3.0)'], {}), '(3.0)\n', (1777, 1782), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1787, 1805), 'OpenGL.GL.glBegin', 'glBegin', (['GL_POINTS'], {}), '(GL_POINTS)\n', (1794, 1805), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1810, 1834), 'OpenGL.GL.glColor3f', 'glColor3f', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (1819, 1834), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1969, 1976), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (1974, 1976), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1982, 1998), 'OpenGL.GL.glPointSize', 'glPointSize', (['(3.0)'], {}), '(3.0)\n', (1993, 1998), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2003, 2021), 'OpenGL.GL.glBegin', 'glBegin', (['GL_POINTS'], {}), '(GL_POINTS)\n', (2010, 2021), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2026, 2050), 'OpenGL.GL.glColor3f', 'glColor3f', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (2035, 2050), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2153, 2160), 'OpenGL.GL.glEnd', 'glEnd', ([], {}), '()\n', (2158, 2160), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((3238, 3251), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3249, 3251), False, 'import pygame\n'), ((3282, 3345), 'pygame.display.set_mode', 'pygame.display.set_mode', (['display', '(DOUBLEBUF | OPENGL)', 'RESIZABLE'], {}), '(display, DOUBLEBUF | OPENGL, RESIZABLE)\n', (3305, 3345), False, 'import pygame\n'), ((3351, 3411), 'OpenGL.GLU.gluPerspective', 'gluPerspective', (['(45)', '(1.0 * display[0] / display[1])', '(0.1)', '(50.0)'], {}), '(45, 1.0 * display[0] / display[1], 0.1, 50.0)\n', (3365, 3411), False, 'from OpenGL.GLU import gluPerspective\n'), ((3418, 3444), 'OpenGL.GL.glTranslatef', 'glTranslatef', (['(0.0)', '(0.0)', '(-5)'], {}), '(0.0, 0.0, -5)\n', (3430, 3444), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((3535, 3553), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3551, 3553), False, 'import pygame\n'), ((3668, 3718), 'OpenGL.GL.glClear', 'glClear', (['(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)'], {}), '(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n', (3675, 3718), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((3800, 3821), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3819, 3821), False, 'import pygame\n'), ((3826, 3845), 'pygame.time.wait', 'pygame.time.wait', (['(1)'], {}), '(1)\n', (3842, 3845), False, 'import pygame\n'), ((4009, 4025), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (4017, 4025), True, 'import numpy as np\n'), ((4137, 4172), 'numpy.cross', 'np.cross', (['left_vector', 'right_vector'], {}), '(left_vector, right_vector)\n', (4145, 4172), True, 'import numpy as np\n'), ((4182, 4206), 'numpy.cross', 'np.cross', (['left_vector', 'n'], {}), '(left_vector, n)\n', (4190, 4206), True, 'import numpy as np\n'), ((4216, 4241), 'numpy.cross', 'np.cross', (['right_vector', 'n'], {}), '(right_vector, n)\n', (4224, 4241), True, 'import numpy as np\n'), ((4335, 4358), 'numpy.dot', 'np.dot', (['left_vector', 'n2'], {}), '(left_vector, n2)\n', (4341, 4358), True, 'import numpy as np\n'), ((4549, 4573), 'numpy.dot', 'np.dot', (['right_vector', 'n1'], {}), '(right_vector, n1)\n', (4555, 4573), True, 'import numpy as np\n'), ((671, 694), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['cameras[0]'], {}), '(cameras[0])\n', (682, 694), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((703, 724), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['landmark'], {}), '(landmark)\n', (714, 724), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((794, 817), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['cameras[1]'], {}), '(cameras[1])\n', (805, 817), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((826, 847), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['landmark'], {}), '(landmark)\n', (837, 847), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((978, 1039), 'OpenGL.GL.glVertex3f', 'glVertex3f', (['landmarks[i][0]', 'landmarks[i][1]', 'landmarks[i][2]'], {}), '(landmarks[i][0], landmarks[i][1], landmarks[i][2])\n', (988, 1039), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1168, 1223), 'OpenGL.GL.glVertex3f', 'glVertex3f', (['cameras[i][0]', 'cameras[i][1]', 'cameras[i][2]'], {}), '(cameras[i][0], cameras[i][1], cameras[i][2])\n', (1178, 1223), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1354, 1377), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['cameras[0]'], {}), '(cameras[0])\n', (1365, 1377), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1386, 1407), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['landmark'], {}), '(landmark)\n', (1397, 1407), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1483, 1506), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['cameras[1]'], {}), '(cameras[1])\n', (1494, 1506), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1515, 1536), 'OpenGL.GL.glVertex3fv', 'glVertex3fv', (['landmark'], {}), '(landmark)\n', (1526, 1536), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1672, 1748), 'OpenGL.GL.glVertex3f', 'glVertex3f', (['left_landmarks[i][0]', 'left_landmarks[i][1]', 'left_landmarks[i][2]'], {}), '(left_landmarks[i][0], left_landmarks[i][1], left_landmarks[i][2])\n', (1682, 1748), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((1885, 1964), 'OpenGL.GL.glVertex3f', 'glVertex3f', (['right_landmarks[i][0]', 'right_landmarks[i][1]', 'right_landmarks[i][2]'], {}), '(right_landmarks[i][0], right_landmarks[i][1], right_landmarks[i][2])\n', (1895, 1964), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2093, 2148), 'OpenGL.GL.glVertex3f', 'glVertex3f', (['cameras[i][0]', 'cameras[i][1]', 'cameras[i][2]'], {}), '(cameras[i][0], cameras[i][1], cameras[i][2])\n', (2103, 2148), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2320, 2346), 'OpenGL.GL.glScaled', 'glScaled', (['(1.05)', '(1.05)', '(1.05)'], {}), '(1.05, 1.05, 1.05)\n', (2328, 2346), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2592, 2618), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (2616, 2618), False, 'import pygame\n'), ((4260, 4316), 'numpy.subtract', 'np.subtract', (['right_camera_position', 'left_camera_position'], {}), '(right_camera_position, left_camera_position)\n', (4271, 4316), True, 'import numpy as np\n'), ((4474, 4530), 'numpy.subtract', 'np.subtract', (['left_camera_position', 'right_camera_position'], {}), '(left_camera_position, right_camera_position)\n', (4485, 4530), True, 'import numpy as np\n'), ((2424, 2450), 'OpenGL.GL.glScaled', 'glScaled', (['(0.95)', '(0.95)', '(0.95)'], {}), '(0.95, 0.95, 0.95)\n', (2432, 2450), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2704, 2747), 'OpenGL.GL.glGetFloatv', 'glGetFloatv', (['GL_MODELVIEW_MATRIX', 'modelView'], {}), '(GL_MODELVIEW_MATRIX, modelView)\n', (2715, 2747), False, 'from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT\n'), ((2986, 3054), 'math.sqrt', 'math.sqrt', (['(temp[0] * temp[0] + temp[1] * temp[1] + temp[2] * temp[2])'], {}), '(temp[0] * temp[0] + temp[1] * temp[1] + temp[2] * temp[2])\n', (2995, 3054), False, 'import math\n'), ((3605, 3618), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3616, 3618), False, 'import pygame\n'), ((3077, 3105), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (3086, 3105), False, 'import math\n')]
|
import numpy as np
def sigmoid(x, derivative=False):
# Sigmoida in odvod
s = 1/(1 + np.exp(-x))
if not derivative:
return s
else:
return s * (1 - s)
def ReLu(x, derivative=False):
if not derivative:
return x if x > 0 else 0,
else:
return 1 if x > 0 else 0,
kernels = {
"ReLu": ReLu,
'Sigmoid': sigmoid
}
|
[
"numpy.exp"
] |
[((94, 104), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (100, 104), True, 'import numpy as np\n')]
|
###################
# PyCon 2018 Project Submission
# "Visualizing Global Refugee Crisis using Pythonic ETL"
# <EMAIL>
###################
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap
###################
# Generate a bar chart for total population by year from 1952-2016
###################
def total_refugee_population(dict_allyears):
plt.bar(range(len(dict_allyears)), dict_allyears.values(), align="center", color='#EE3224')
plt.xticks(range(len(dict_allyears)), list(dict_allyears.keys()), rotation=90)
plt.title('Total Refugee Population: 1952-2016',fontweight='bold', color='g', fontsize='12')
plt.xlabel('By Year', fontweight='bold', color='g', fontsize='10')
plt.ylabel('Total Population Size in Millions',fontweight='bold', color='g', fontsize='10')
plt.grid(True)
plt.show()
###################
# Generate a bar chart of total refugee population by year from 2007-2016
###################
def total_10_year_refugee_population(dict_year):
plt.bar(range(len(dict_year)), dict_year.values(), align="center", color='#EE3224')
plt.xticks(range(len(dict_year)), list(dict_year.keys()), rotation=90)
plt.grid(True)
plt.title('Total Refugee Population: 2007-2016',fontweight='bold', color='g', fontsize='12')
plt.xlabel('By Year', fontweight='bold', color='g', fontsize='10')
plt.ylabel('Total Population Size in Millions',fontweight='bold', color='g', fontsize='10')
plt.show()
###################
# Projection map of countries of asylum-seeking population
###################
def country_resid_highest_pop(top_10_country_latslons):
lats,lons = [],[]
for row in top_10_country_latslons:
lats.append(row[0])
lons.append(row[1])
country_map = Basemap(projection='moll', resolution = 'c', area_thresh=500.0,
lat_0=0, lon_0=50)
country_map.drawcoastlines()
country_map.drawcountries()
country_map.fillcontinents(color='beige', lake_color='lightblue')
country_map.drawmapboundary(fill_color='lightblue')
country_map.drawmeridians(np.arange(0, 420, 60),color='beige', dashes=[1,3])
country_map.drawparallels(np.arange(-90, 120, 60),color='beige', dashes=[1,3])
x,y = country_map(lons,lats)
country_map.plot(x, y, 'g^', color='red', markersize=6)
plt.title('Country of Residence With Highest Total Population From All Refugee Categories: 2007-2016')
plt.show()
###################
# Generate a stacked bar chart comparing population types from 2007-2016
###################
def ten_year_pop_type_comparison(dict_poptype_year):
df = pd.DataFrame.from_dict(dict_poptype_year, orient='columns', dtype=None)
df.plot(kind='bar', stacked=False)
plt.title('Total Population Type Comparison Across 10 Year Span: 2007-2016')
plt.ylabel('Population Type in Millions')
plt.show()
###################
# Generate a scatter plot to represent
# total refugee population for 10 year span from 2007 - 2016
###################
def total_pop_type_10_span(dict_poptype_count):
poptype_data = list(dict_poptype_count.values())
pop_types = list(dict_poptype_count.keys())
fix, ax = plt.subplots()
plt.plot(poptype_data, 'g^', linewidth=3, color='g')
labels = ['Internally Displaced','Returned IDPs','Asylum-seekers','Refugees(incl. refugee-like situations','Returnees','Stateless','Others of concern']
x1 = [0,1,2,3,4,5,6]
ax.set_xticks(x1)
ax.set_xticklabels(labels, rotation='vertical')
plt.ylabel('Population Type in Millions')
plt.title('Total Population Type Across Ten Year Span: 2007-2016')
plt.grid(True)
plt.show()
###################
# Projection map of country of residence and origin by population types
###################
def country_origin_pop_types(top_10_country_poptype_latslons,top_10_origin_poptype_latslons):
#create lists of lats/lons for country
clat,clon = [],[]
for row in top_10_country_poptype_latslons:
clat.append(row[0])
clon.append(row[1])
#Create lists of lats/lons for origin
olat,olon = [],[]
for row in top_10_origin_poptype_latslons:
olat.append(row[0])
olon.append(row[1])
poptype_map = Basemap(projection='moll', resolution = 'c', area_thresh=500.0,
lat_0=0, lon_0=50)
poptype_map.drawcoastlines()
poptype_map.drawcountries()
poptype_map.fillcontinents(color='beige', lake_color='lightblue')
poptype_map.drawmapboundary(fill_color='lightblue')
poptype_map.drawmeridians(np.arange(0, 420, 60),color='beige', dashes=[1,3])
poptype_map.drawparallels(np.arange(-90, 120, 60),color='beige', dashes=[1,3])
x,y = poptype_map(clon,clat)
a,b = poptype_map(olon,olat)
linexy,=poptype_map.plot(x, y, '*', color='red', markersize=6,label='Residence by Population Types')
lineab,=poptype_map.plot(a, b, 'g^', color='green', markersize=6,label='Origin by Population Types')
plt.legend(loc='upper center', bbox_to_anchor=(0.5,-0.05),ncol=5,fancybox=True,shadow=True)
plt.title('Top Ten Global Refugee Populations Based on Refugee(Incl. Refugee-Like Situations) and Asylum-Seeker Types')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"mpl_toolkits.basemap.Basemap"
] |
[((591, 689), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Refugee Population: 1952-2016"""'], {'fontweight': '"""bold"""', 'color': '"""g"""', 'fontsize': '"""12"""'}), "('Total Refugee Population: 1952-2016', fontweight='bold', color=\n 'g', fontsize='12')\n", (600, 689), True, 'from matplotlib import pyplot as plt\n'), ((686, 752), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""By Year"""'], {'fontweight': '"""bold"""', 'color': '"""g"""', 'fontsize': '"""10"""'}), "('By Year', fontweight='bold', color='g', fontsize='10')\n", (696, 752), True, 'from matplotlib import pyplot as plt\n'), ((755, 852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Population Size in Millions"""'], {'fontweight': '"""bold"""', 'color': '"""g"""', 'fontsize': '"""10"""'}), "('Total Population Size in Millions', fontweight='bold', color=\n 'g', fontsize='10')\n", (765, 852), True, 'from matplotlib import pyplot as plt\n'), ((849, 863), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (857, 863), True, 'from matplotlib import pyplot as plt\n'), ((866, 876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (874, 876), True, 'from matplotlib import pyplot as plt\n'), ((1202, 1216), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1210, 1216), True, 'from matplotlib import pyplot as plt\n'), ((1219, 1317), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Refugee Population: 2007-2016"""'], {'fontweight': '"""bold"""', 'color': '"""g"""', 'fontsize': '"""12"""'}), "('Total Refugee Population: 2007-2016', fontweight='bold', color=\n 'g', fontsize='12')\n", (1228, 1317), True, 'from matplotlib import pyplot as plt\n'), ((1314, 1380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""By Year"""'], {'fontweight': '"""bold"""', 'color': '"""g"""', 'fontsize': '"""10"""'}), "('By Year', fontweight='bold', color='g', fontsize='10')\n", (1324, 1380), True, 'from matplotlib import pyplot as plt\n'), ((1383, 1480), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Population Size in Millions"""'], {'fontweight': '"""bold"""', 'color': '"""g"""', 'fontsize': '"""10"""'}), "('Total Population Size in Millions', fontweight='bold', color=\n 'g', fontsize='10')\n", (1393, 1480), True, 'from matplotlib import pyplot as plt\n'), ((1477, 1487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1485, 1487), True, 'from matplotlib import pyplot as plt\n'), ((2607, 2678), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dict_poptype_year'], {'orient': '"""columns"""', 'dtype': 'None'}), "(dict_poptype_year, orient='columns', dtype=None)\n", (2629, 2678), True, 'import pandas as pd\n'), ((2718, 2794), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Population Type Comparison Across 10 Year Span: 2007-2016"""'], {}), "('Total Population Type Comparison Across 10 Year Span: 2007-2016')\n", (2727, 2794), True, 'from matplotlib import pyplot as plt\n'), ((2797, 2838), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population Type in Millions"""'], {}), "('Population Type in Millions')\n", (2807, 2838), True, 'from matplotlib import pyplot as plt\n'), ((2841, 2851), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2849, 2851), True, 'from matplotlib import pyplot as plt\n'), ((3151, 3165), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3163, 3165), True, 'from matplotlib import pyplot as plt\n'), ((3168, 3220), 'matplotlib.pyplot.plot', 'plt.plot', (['poptype_data', '"""g^"""'], {'linewidth': '(3)', 'color': '"""g"""'}), "(poptype_data, 'g^', linewidth=3, color='g')\n", (3176, 3220), True, 'from matplotlib import pyplot as plt\n'), ((3471, 3512), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Population Type in Millions"""'], {}), "('Population Type in Millions')\n", (3481, 3512), True, 'from matplotlib import pyplot as plt\n'), ((3515, 3581), 'matplotlib.pyplot.title', 'plt.title', (['"""Total Population Type Across Ten Year Span: 2007-2016"""'], {}), "('Total Population Type Across Ten Year Span: 2007-2016')\n", (3524, 3581), True, 'from matplotlib import pyplot as plt\n'), ((3584, 3598), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3592, 3598), True, 'from matplotlib import pyplot as plt\n'), ((3601, 3611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3609, 3611), True, 'from matplotlib import pyplot as plt\n'), ((4148, 4233), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""moll"""', 'resolution': '"""c"""', 'area_thresh': '(500.0)', 'lat_0': '(0)', 'lon_0': '(50)'}), "(projection='moll', resolution='c', area_thresh=500.0, lat_0=0, lon_0=50\n )\n", (4155, 4233), False, 'from mpl_toolkits.basemap import Basemap\n'), ((4851, 4950), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.05)', 'ncol': '(5)', 'fancybox': '(True)', 'shadow': '(True)'}), "(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=5,\n fancybox=True, shadow=True)\n", (4861, 4950), True, 'from matplotlib import pyplot as plt\n'), ((4945, 5074), 'matplotlib.pyplot.title', 'plt.title', (['"""Top Ten Global Refugee Populations Based on Refugee(Incl. Refugee-Like Situations) and Asylum-Seeker Types"""'], {}), "(\n 'Top Ten Global Refugee Populations Based on Refugee(Incl. Refugee-Like Situations) and Asylum-Seeker Types'\n )\n", (4954, 5074), True, 'from matplotlib import pyplot as plt\n'), ((5067, 5077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5075, 5077), True, 'from matplotlib import pyplot as plt\n'), ((1771, 1856), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""moll"""', 'resolution': '"""c"""', 'area_thresh': '(500.0)', 'lat_0': '(0)', 'lon_0': '(50)'}), "(projection='moll', resolution='c', area_thresh=500.0, lat_0=0, lon_0=50\n )\n", (1778, 1856), False, 'from mpl_toolkits.basemap import Basemap\n'), ((2314, 2426), 'matplotlib.pyplot.title', 'plt.title', (['"""Country of Residence With Highest Total Population From All Refugee Categories: 2007-2016"""'], {}), "(\n 'Country of Residence With Highest Total Population From All Refugee Categories: 2007-2016'\n )\n", (2323, 2426), True, 'from matplotlib import pyplot as plt\n'), ((2422, 2432), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2430, 2432), True, 'from matplotlib import pyplot as plt\n'), ((4448, 4469), 'numpy.arange', 'np.arange', (['(0)', '(420)', '(60)'], {}), '(0, 420, 60)\n', (4457, 4469), True, 'import numpy as np\n'), ((4527, 4550), 'numpy.arange', 'np.arange', (['(-90)', '(120)', '(60)'], {}), '(-90, 120, 60)\n', (4536, 4550), True, 'import numpy as np\n'), ((2083, 2104), 'numpy.arange', 'np.arange', (['(0)', '(420)', '(60)'], {}), '(0, 420, 60)\n', (2092, 2104), True, 'import numpy as np\n'), ((2164, 2187), 'numpy.arange', 'np.arange', (['(-90)', '(120)', '(60)'], {}), '(-90, 120, 60)\n', (2173, 2187), True, 'import numpy as np\n')]
|
import time
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
# from torch_geometric.nn import VGAE
from torch_geometric.loader import DataLoader
from torch_geometric.utils import (degree, negative_sampling,
batched_negative_sampling,
add_self_loops, to_undirected)
from torch.utils.tensorboard import SummaryWriter
from genome_graph import gen_g2g_graph
from gene_graph_dataset import G3MedianDataset
from phylognn_model import G3Median_GCNConv, G3Median_VGAE
from sklearn.metrics import (roc_auc_score, roc_curve,
average_precision_score,
precision_recall_curve,
f1_score, matthews_corrcoef)
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from dcj_comp import dcj_dist
from genome_file import mat2adj
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpuid", type=int, default = 0)
# parser.add_argument("--run", type=int)
parser.add_argument("--seqlen", type=int)
parser.add_argument("--rate", type=float, default = 0.1)
parser.add_argument("--samples", type=int, default = 1000)
parser.add_argument("--epoch", type=int, default = 1000)
parser.add_argument("--cvsplit", type=int, default = 5)
parser.add_argument("--freq", type=int, default = 20)
parser.add_argument("--shuffle", type=int, default = 1)
parser.add_argument("--vals", type=int, default = 100)
parser.add_argument("--valr", type=float, default = 0.1)
args = parser.parse_args()
gpuid = args.gpuid # 0
# train_p, test_p, val_p = 0.7, 0.2, 0.1
train_batch, test_batch, val_batch = 256, 64, 8
device = torch.device('cuda:' + str(gpuid) if torch.cuda.is_available() else 'cpu')
dataset = G3MedianDataset('dataset_g3m', args.seqlen, int(args.seqlen * args.rate), args.samples)
# val_dataset = G3MedianDataset('val_seq_g3m', args.seqlen, int(args.seqlen * args.valr), args.vals)
val_seq, tar_seq = torch.load(f'val_seq_g3m_3_{args.seqlen}_{int(args.seqlen * args.valr)}_{args.vals}/'
f'raw/g3raw_{args.seqlen}_{int(args.seqlen * args.valr)}.pt')
val_dataset = [gen_g2g_graph(s, t) for s,t in zip(val_seq, tar_seq)]
in_channels, out_channels = None, 128
dataset = dataset.shuffle()
# from torch_geometric.data import Batch
def train(model, train_loader):
model.train()
total_loss = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
z = model.encode(data.x, data.edge_index)
loss = model.recon_loss_wt(z, data.pos_edge_label_index, data.neg_edge_label_index, 2, 1) * 5
loss = loss + (1 / data.num_nodes) * model.kl_loss() * 0.5
loss.backward()
optimizer.step()
total_loss += loss
return total_loss/len(train_loader)
# @torch.no_grad()
# def test(model, test_loader):
# model.eval()
# auc, ap = 0, 0
# for data in test_loader:
# data = data.to(device)
# z = model.encode(data.x, data.edge_index)
# # loss += model.recon_loss(z, data.pos_edge_label_index, data.neg_edge_label_index)
# tauc, tap = model.test(z, data.pos_edge_label_index) #, data.neg_edge_label_index)
# auc += tauc
# ap += tap
# return auc/len(test_loader), ap/len(test_loader)
@torch.no_grad()
def median_score(model, val_dataset, val_sequence):
model.eval()
count, num = 0, 0
for d, seqs in zip(val_dataset, val_sequence):
d = d.to(device)
z = model.encode(d.x, d.edge_index)
res = model.decoder.forward_all(z).detach().cpu().numpy()
pred_seqs = mat2adj(res)
pred_dist = min([sum([dcj_dist(pred, s)[-1] for s in seqs]) for pred in pred_seqs])
low_dist = np.ceil(sum([dcj_dist(seqs[0], seqs[1])[-1],
dcj_dist(seqs[0], seqs[2])[-1],
dcj_dist(seqs[1], seqs[2])[-1]])/2)
# print(f'{pred_dist:>3} -- {low_dist:<3}')
diff = pred_dist - low_dist
if diff == 0:
num += 1
count += diff
return count / len(val_dataset), num / len(val_dataset)
@torch.no_grad()
def predict(model, test_loader):
model.eval()
y_list, pred_list = [], []
for data in test_loader:
data = data.to(device)
z = model.encode(data.x, data.edge_index)
# loss += model.recon_loss(z, data.pos_edge_label_index, data.neg_edge_label_index)
y, pred = model.pred(z, data.pos_edge_label_index, data.neg_edge_label_index)
y_list.append(y)
pred_list.append(pred)
return y_list, pred_list
@torch.no_grad()
def val(model, val_loader):
model.eval()
loss = 0
for data in val_loader:
data = data.to(device)
z = model.encode(data.x, data.edge_index)
loss += model.recon_loss_wt(z, data.pos_edge_label_index, data.neg_edge_label_index, 2, 1)
# tauc, tap = model.test(z, data.pos_edge_label_index, data.neg_edge_label_index)
return loss/len(val_loader)
def auc_ap(y_list, pred_list):
pred_accuracy = [[roc_auc_score(y, pred), average_precision_score(y, pred)]
for y, pred in zip(y_list, pred_list)]
auc, ap = np.mean(pred_accuracy, axis = 0)
return auc, ap
def cal_accuracy(y_list, pred_list):
# pred_accuracy = np.zeros((len(y_list), 2))
# for i in range(len(y_list)):
# y, pred = y_list[i], pred_list[i]
# pred_accuracy[i] = [roc_auc_score(y, pred),
# average_precision_score(y, pred)]
figsize = (6,6)
y, pred = np.concatenate([[t, p] for t, p in zip(y_list, pred_list)], axis = -1)
auc, ap = roc_auc_score(y, pred), average_precision_score(y, pred)
auc_figure = plt.figure(figsize=figsize)
fpr, tpr, _ = roc_curve(y, pred)
plt.plot(fpr, tpr, color='g', lw=0.3)
# for i in range(len(y_list)):
# y, pred = y_list[i], pred_list[i]
# fpr, tpr, _ = roc_curve(y, pred)
# plt.plot(fpr, tpr, color='g', lw=0.3)
plt.plot([0, 1], [0, 1], color="navy", lw=0.3, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title(f'Receiver Operating Characteristic ({auc:.4f})')
# plt.legend(loc="lower right")
ap_figure = plt.figure(figsize=figsize)
prc, rec, _ = precision_recall_curve(y, pred)
plt.plot(rec, prc, color='c', lw=0.3)
# for i in range(len(y_list)):
# y, pred = y_list[i], pred_list[i]
# prc, rec, _ = precision_recall_curve(y, pred)
# plt.plot(rec, prc, color='c', lw=0.3)
plt.plot([0, 1], [0, 1], color="navy", lw=0.3, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title(f'Precision-Recall Curve ({ap:.4f})')
return [auc, ap], [auc_figure, ap_figure] #, ('auc', 'ap')
print(f'{time.ctime()} -- seqlen:{args.seqlen:0>4} '
f'rate:{args.rate:.2f} samples:{args.samples:0>5} -- fold: {args.vals:0>4}')
model = G3Median_VGAE(G3Median_GCNConv(in_channels, out_channels)).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10,
min_lr=0.00001,verbose=True)
writer = SummaryWriter(log_dir='exps_g3median_' f'{args.seqlen:0>4}' '/e' f'{args.samples:0>5}' '_r'
f'{args.rate:0>3.1f}' '_' 'run_' f'{args.vals:0>4}')
train_loader = DataLoader(dataset, batch_size = train_batch, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size = val_batch)
start_time = time.time()
for epoch in range(1, args.epoch + 1):
loss = train(model, train_loader)
tloss = val(model, val_loader)
scheduler.step(tloss)
writer.add_scalar('loss/train', loss, epoch)
writer.add_scalar('loss/val', tloss, epoch)
# if epoch % args.freq != 0:
# continue
score, acc = median_score(model, val_dataset, val_seq)
y_list, pred_list = predict(model, val_loader)
auc, ap = auc_ap(y_list, pred_list)
writer.add_scalar('auc/test', auc, epoch)
writer.add_scalar('ap/test', ap, epoch)
writer.add_scalar('score/test', score, epoch)
writer.add_scalar('acc/test', acc, epoch)
end_time = time.time()
print(f'{time.ctime()} -- seqlen:{args.seqlen:0>4} '
f'rate:{args.rate:.2f} samples:{args.samples:0>5} -- fold: {args.vals:0>2}'
f' -- {(end_time - start_time)/args.epoch:>10.3f}s * {args.epoch:0>4} epoches')
writer.close()
# torch.save(y_pred_res,
# f'y_pred/ldel' f'{args.seqlen:0>4}'
# '-r' f'{args.rate:0>3.1f}'
# '-s' f'{args.samples:0>5}'
# '-' f'{int(time.time()):0>10}.pt')
|
[
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"time.ctime",
"matplotlib.pyplot.figure",
"numpy.mean",
"genome_graph.gen_g2g_graph",
"torch.no_grad",
"dcj_comp.dcj_dist",
"torch_geometric.loader.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.tensorboard.SummaryWriter",
"sklearn.metrics.average_precision_score",
"matplotlib.pyplot.ylim",
"phylognn_model.G3Median_GCNConv",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_recall_curve",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"time.time",
"matplotlib.pyplot.xlabel",
"genome_file.mat2adj"
] |
[((960, 985), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (983, 985), False, 'import argparse\n'), ((3434, 3449), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3447, 3449), False, 'import torch\n'), ((4276, 4291), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4289, 4291), False, 'import torch\n'), ((4793, 4808), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4806, 4808), False, 'import torch\n'), ((7481, 7579), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.5)', 'patience': '(10)', 'min_lr': '(1e-05)', 'verbose': '(True)'}), "(optimizer, mode='min', factor=0.5, patience=10, min_lr=\n 1e-05, verbose=True)\n", (7498, 7579), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((7616, 7743), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'f"""exps_g3median_{args.seqlen:0>4}/e{args.samples:0>5}_r{args.rate:0>3.1f}_run_{args.vals:0>4}"""'}), "(log_dir=\n f'exps_g3median_{args.seqlen:0>4}/e{args.samples:0>5}_r{args.rate:0>3.1f}_run_{args.vals:0>4}'\n )\n", (7629, 7743), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((7801, 7858), 'torch_geometric.loader.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'train_batch', 'shuffle': '(True)'}), '(dataset, batch_size=train_batch, shuffle=True)\n', (7811, 7858), False, 'from torch_geometric.loader import DataLoader\n'), ((7874, 7919), 'torch_geometric.loader.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'val_batch'}), '(val_dataset, batch_size=val_batch)\n', (7884, 7919), False, 'from torch_geometric.loader import DataLoader\n'), ((7936, 7947), 'time.time', 'time.time', ([], {}), '()\n', (7945, 7947), False, 'import time\n'), ((8589, 8600), 'time.time', 'time.time', ([], {}), '()\n', (8598, 8600), False, 'import time\n'), ((2213, 2232), 'genome_graph.gen_g2g_graph', 'gen_g2g_graph', (['s', 't'], {}), '(s, t)\n', (2226, 2232), False, 'from genome_graph import gen_g2g_graph\n'), ((5429, 5459), 'numpy.mean', 'np.mean', (['pred_accuracy'], {'axis': '(0)'}), '(pred_accuracy, axis=0)\n', (5436, 5459), True, 'import numpy as np\n'), ((5982, 6009), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5992, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6033, 6051), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'pred'], {}), '(y, pred)\n', (6042, 6051), False, 'from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, precision_recall_curve, f1_score, matthews_corrcoef\n'), ((6056, 6093), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""g"""', 'lw': '(0.3)'}), "(fpr, tpr, color='g', lw=0.3)\n", (6064, 6093), True, 'import matplotlib.pyplot as plt\n'), ((6273, 6335), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': '(0.3)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=0.3, linestyle='--')\n", (6281, 6335), True, 'import matplotlib.pyplot as plt\n'), ((6340, 6360), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6348, 6360), True, 'import matplotlib.pyplot as plt\n'), ((6365, 6385), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6373, 6385), True, 'import matplotlib.pyplot as plt\n'), ((6390, 6423), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (6400, 6423), True, 'import matplotlib.pyplot as plt\n'), ((6428, 6460), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (6438, 6460), True, 'import matplotlib.pyplot as plt\n'), ((6465, 6524), 'matplotlib.pyplot.title', 'plt.title', (['f"""Receiver Operating Characteristic ({auc:.4f})"""'], {}), "(f'Receiver Operating Characteristic ({auc:.4f})')\n", (6474, 6524), True, 'import matplotlib.pyplot as plt\n'), ((6582, 6609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6592, 6609), True, 'import matplotlib.pyplot as plt\n'), ((6633, 6664), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y', 'pred'], {}), '(y, pred)\n', (6655, 6664), False, 'from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, precision_recall_curve, f1_score, matthews_corrcoef\n'), ((6669, 6706), 'matplotlib.pyplot.plot', 'plt.plot', (['rec', 'prc'], {'color': '"""c"""', 'lw': '(0.3)'}), "(rec, prc, color='c', lw=0.3)\n", (6677, 6706), True, 'import matplotlib.pyplot as plt\n'), ((6903, 6965), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': '(0.3)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=0.3, linestyle='--')\n", (6911, 6965), True, 'import matplotlib.pyplot as plt\n'), ((6970, 6990), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6978, 6990), True, 'import matplotlib.pyplot as plt\n'), ((6995, 7015), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (7003, 7015), True, 'import matplotlib.pyplot as plt\n'), ((7020, 7040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (7030, 7040), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7068), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (7055, 7068), True, 'import matplotlib.pyplot as plt\n'), ((7073, 7120), 'matplotlib.pyplot.title', 'plt.title', (['f"""Precision-Recall Curve ({ap:.4f})"""'], {}), "(f'Precision-Recall Curve ({ap:.4f})')\n", (7082, 7120), True, 'import matplotlib.pyplot as plt\n'), ((1763, 1788), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1786, 1788), False, 'import torch\n'), ((3747, 3759), 'genome_file.mat2adj', 'mat2adj', (['res'], {}), '(res)\n', (3754, 3759), False, 'from genome_file import mat2adj\n'), ((5899, 5921), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'pred'], {}), '(y, pred)\n', (5912, 5921), False, 'from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, precision_recall_curve, f1_score, matthews_corrcoef\n'), ((5923, 5955), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y', 'pred'], {}), '(y, pred)\n', (5946, 5955), False, 'from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, precision_recall_curve, f1_score, matthews_corrcoef\n'), ((5297, 5319), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'pred'], {}), '(y, pred)\n', (5310, 5319), False, 'from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, precision_recall_curve, f1_score, matthews_corrcoef\n'), ((5321, 5353), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y', 'pred'], {}), '(y, pred)\n', (5344, 5353), False, 'from sklearn.metrics import roc_auc_score, roc_curve, average_precision_score, precision_recall_curve, f1_score, matthews_corrcoef\n'), ((7204, 7216), 'time.ctime', 'time.ctime', ([], {}), '()\n', (7214, 7216), False, 'import time\n'), ((7354, 7397), 'phylognn_model.G3Median_GCNConv', 'G3Median_GCNConv', (['in_channels', 'out_channels'], {}), '(in_channels, out_channels)\n', (7370, 7397), False, 'from phylognn_model import G3Median_GCNConv, G3Median_VGAE\n'), ((8610, 8622), 'time.ctime', 'time.ctime', ([], {}), '()\n', (8620, 8622), False, 'import time\n'), ((3799, 3816), 'dcj_comp.dcj_dist', 'dcj_dist', (['pred', 's'], {}), '(pred, s)\n', (3807, 3816), False, 'from dcj_comp import dcj_dist\n'), ((3893, 3919), 'dcj_comp.dcj_dist', 'dcj_dist', (['seqs[0]', 'seqs[1]'], {}), '(seqs[0], seqs[1])\n', (3901, 3919), False, 'from dcj_comp import dcj_dist\n'), ((3958, 3984), 'dcj_comp.dcj_dist', 'dcj_dist', (['seqs[0]', 'seqs[2]'], {}), '(seqs[0], seqs[2])\n', (3966, 3984), False, 'from dcj_comp import dcj_dist\n'), ((4023, 4049), 'dcj_comp.dcj_dist', 'dcj_dist', (['seqs[1]', 'seqs[2]'], {}), '(seqs[1], seqs[2])\n', (4031, 4049), False, 'from dcj_comp import dcj_dist\n')]
|
## Imports and Setup
print("Importing")
# Suppress all the deprecated warnings!
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import argparse
import numpy as np
import tensorflow as tf
from time import time
from data_loader import load_data, load_npz, load_random, load_ogb, load_ogb_2, load_ogb_3
from train import train
import os.path
from os import mkdir
import torch
from auxiliary import evaluate_model, load_train_result
from analysis import get_split_pred, plot_accs, plot_losses
from analysis import plot_pred_histograms, plot_contingency_matrices
# Remove TF warnings
tf.logging.set_verbosity(tf.logging.ERROR)
seed = 234
np.random.seed(seed)
tf.set_random_seed(seed)
class default_args:
dataset = 'ogbn-arxiv'
epochs = 50 #200
dim = 16
gcn_layer = 2
lpa_iter = 5
l2_weight = 5e-4
lpa_weight = 1
dropout = 0
lr = 0.2
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=default_args.dataset, help='which dataset to use')
parser.add_argument('--epochs', type=int, default=default_args.epochs, help='the number of epochs')
parser.add_argument('--dim', type=int, default=default_args.dim, help='dimension of hidden layers')
parser.add_argument('--gcn_layer', type=int, default=default_args.gcn_layer, help='number of GCN layers')
parser.add_argument('--lpa_iter', type=int, default=default_args.lpa_iter, help='number of LPA iterations')
parser.add_argument('--l2_weight', type=float, default=default_args.l2_weight, help='weight of l2 regularization')
parser.add_argument('--lpa_weight', type=float, default=default_args.lpa_weight, help='weight of LP regularization')
parser.add_argument('--dropout', type=float, default=default_args.dropout, help='dropout rate')
parser.add_argument('--lr', type=float, default=default_args.lr, help='learning rate')
args = parser.parse_args()
t = time()
if args.dataset in ['cora', 'citeseer', 'pubmed']:
data = load_data(args.dataset)
elif args.dataset in ['coauthor-cs', 'coauthor-phy']:
data = load_npz(args.dataset)
elif args.dataset == 'ogbn-arxiv':
data = load_ogb_3(args.dataset)
else:
n_nodes = 1000
data = load_random(n_nodes=n_nodes, n_train=100, n_val=200, p=10/n_nodes)
def get_foldername(args):
foldername = "GCN-LPA"
vars_args = vars(args)
vars_default_args = vars(default_args)
for key in vars_args.keys():
if key == 'epochs':
continue
if vars_args[key] != vars_default_args[key]:
foldername += "_" + key + "={}".format(vars_args[key])
return foldername
foldername = get_foldername(args)
save_dir = foldername + "/save_1/"
# Create the folder if it doesn't exist
if not os.path.isdir(foldername + "/"):
mkdir(foldername + "/")
# We either train a model or load a pre-trained model, but not both
do_training = True
if do_training:
## Train Model
training_vars, model = train(args, data, save_dir=save_dir)
else:
## Load Model
training_vars, model = load_train_result(args, data, save_dir=save_dir)
## Evaluate the model (on all the data)
print("Evaluating the Model")
output = evaluate_model(data, model, save_dir)
## Prepare for General Analysis
print("Preparing for Analysis")
def count_parameters(model):
param_counts = [np.prod(v.shape.as_list()) for v in model.vars]
print(param_counts)
total = sum(param_counts)
return total
param_count = count_parameters(model)
# Reformat and split the prediction matrix
pred = torch.from_numpy(output)
pred = pred.argmax(dim=-1, keepdim=True)
preds = get_split_pred(pred)
# Unpack training_vars
num_epochs = training_vars['num_epochs']
train_accs = training_vars['train_accs']
val_accs = training_vars['val_accs']
test_accs = training_vars['test_accs']
train_losses = training_vars['train_losses']
val_losses = training_vars['val_losses']
test_losses = training_vars['test_losses']
accs = [train_accs, val_accs, test_accs]
losses = [train_losses, val_losses, test_losses]
# Create Analysis Work on Model
print("Analyzing")
print("Making Plots")
# Generate plots of accuracy and loss vs epochs
plot_accs(foldername, num_epochs, accs)
plot_losses(foldername, num_epochs, losses)
print("Making Histograms")
# Generate histogram of predicted labels
plot_pred_histograms(foldername, preds)
# Save the data
# save_data(foldername, pred, accs, losses, param_count)
print("Making Matrices")
# Plot the contigency matrices
plot_contingency_matrices(foldername, preds)
print('time used: %d s' % (time() - t))
|
[
"analysis.plot_accs",
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.logging.set_verbosity",
"data_loader.load_data",
"warnings.simplefilter",
"data_loader.load_random",
"tensorflow.set_random_seed",
"data_loader.load_npz",
"train.train",
"analysis.plot_losses",
"analysis.get_split_pred",
"torch.from_numpy",
"analysis.plot_contingency_matrices",
"time.time",
"auxiliary.evaluate_model",
"analysis.plot_pred_histograms",
"auxiliary.load_train_result",
"data_loader.load_ogb_3"
] |
[((116, 169), 'warnings.simplefilter', 'simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (128, 169), False, 'from warnings import simplefilter\n'), ((627, 669), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (651, 669), True, 'import tensorflow as tf\n'), ((682, 702), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (696, 702), True, 'import numpy as np\n'), ((703, 727), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (721, 727), True, 'import tensorflow as tf\n'), ((924, 949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (947, 949), False, 'import argparse\n'), ((1942, 1948), 'time.time', 'time', ([], {}), '()\n', (1946, 1948), False, 'from time import time\n'), ((3194, 3231), 'auxiliary.evaluate_model', 'evaluate_model', (['data', 'model', 'save_dir'], {}), '(data, model, save_dir)\n', (3208, 3231), False, 'from auxiliary import evaluate_model, load_train_result\n'), ((3558, 3582), 'torch.from_numpy', 'torch.from_numpy', (['output'], {}), '(output)\n', (3574, 3582), False, 'import torch\n'), ((3632, 3652), 'analysis.get_split_pred', 'get_split_pred', (['pred'], {}), '(pred)\n', (3646, 3652), False, 'from analysis import get_split_pred, plot_accs, plot_losses\n'), ((4176, 4215), 'analysis.plot_accs', 'plot_accs', (['foldername', 'num_epochs', 'accs'], {}), '(foldername, num_epochs, accs)\n', (4185, 4215), False, 'from analysis import get_split_pred, plot_accs, plot_losses\n'), ((4216, 4259), 'analysis.plot_losses', 'plot_losses', (['foldername', 'num_epochs', 'losses'], {}), '(foldername, num_epochs, losses)\n', (4227, 4259), False, 'from analysis import get_split_pred, plot_accs, plot_losses\n'), ((4329, 4368), 'analysis.plot_pred_histograms', 'plot_pred_histograms', (['foldername', 'preds'], {}), '(foldername, preds)\n', (4349, 4368), False, 'from analysis import plot_pred_histograms, plot_contingency_matrices\n'), ((4500, 4544), 'analysis.plot_contingency_matrices', 'plot_contingency_matrices', (['foldername', 'preds'], {}), '(foldername, preds)\n', (4525, 4544), False, 'from analysis import plot_pred_histograms, plot_contingency_matrices\n'), ((2012, 2035), 'data_loader.load_data', 'load_data', (['args.dataset'], {}), '(args.dataset)\n', (2021, 2035), False, 'from data_loader import load_data, load_npz, load_random, load_ogb, load_ogb_2, load_ogb_3\n'), ((2801, 2824), 'os.mkdir', 'mkdir', (["(foldername + '/')"], {}), "(foldername + '/')\n", (2806, 2824), False, 'from os import mkdir\n'), ((2975, 3011), 'train.train', 'train', (['args', 'data'], {'save_dir': 'save_dir'}), '(args, data, save_dir=save_dir)\n', (2980, 3011), False, 'from train import train\n'), ((3064, 3112), 'auxiliary.load_train_result', 'load_train_result', (['args', 'data'], {'save_dir': 'save_dir'}), '(args, data, save_dir=save_dir)\n', (3081, 3112), False, 'from auxiliary import evaluate_model, load_train_result\n'), ((2101, 2123), 'data_loader.load_npz', 'load_npz', (['args.dataset'], {}), '(args.dataset)\n', (2109, 2123), False, 'from data_loader import load_data, load_npz, load_random, load_ogb, load_ogb_2, load_ogb_3\n'), ((2170, 2194), 'data_loader.load_ogb_3', 'load_ogb_3', (['args.dataset'], {}), '(args.dataset)\n', (2180, 2194), False, 'from data_loader import load_data, load_npz, load_random, load_ogb, load_ogb_2, load_ogb_3\n'), ((2231, 2299), 'data_loader.load_random', 'load_random', ([], {'n_nodes': 'n_nodes', 'n_train': '(100)', 'n_val': '(200)', 'p': '(10 / n_nodes)'}), '(n_nodes=n_nodes, n_train=100, n_val=200, p=10 / n_nodes)\n', (2242, 2299), False, 'from data_loader import load_data, load_npz, load_random, load_ogb, load_ogb_2, load_ogb_3\n'), ((4574, 4580), 'time.time', 'time', ([], {}), '()\n', (4578, 4580), False, 'from time import time\n')]
|
from functools import partial
import numpy as np
import scarlet
from numpy.testing import assert_almost_equal, assert_equal
class TestWavelet(object):
def get_psfs(self, sigmas, boxsize):
psf = scarlet.GaussianPSF(sigmas, boxsize=boxsize)
return psf.get_model()
"""Test the wavelet object"""
def test_transform_inverse(self):
psf = self.get_psfs(1, 128)[0]
starlet_transform = scarlet.Starlet.from_image(psf, scales=3)
# Test number of levels
assert_equal(starlet_transform.coefficients.shape[0], 4)
# Test inverse
inverse = starlet_transform.image
assert_almost_equal(inverse, psf)
def test_setter(self):
psf = self.get_psfs(1, 128)[0]
starlet = scarlet.Starlet.from_image(psf, scales=3)
star_coeff = starlet.coefficients
star_coeff[:, 10:20, :] = 0
new_starlet = scarlet.Starlet.from_coefficients(star_coeff)
assert_almost_equal(new_starlet.image, starlet.image)
# Test inverse
star_coeff[:, :, :] = 0
assert_almost_equal(starlet.image, psf)
|
[
"scarlet.GaussianPSF",
"scarlet.Starlet.from_coefficients",
"numpy.testing.assert_almost_equal",
"scarlet.Starlet.from_image",
"numpy.testing.assert_equal"
] |
[((209, 253), 'scarlet.GaussianPSF', 'scarlet.GaussianPSF', (['sigmas'], {'boxsize': 'boxsize'}), '(sigmas, boxsize=boxsize)\n', (228, 253), False, 'import scarlet\n'), ((426, 467), 'scarlet.Starlet.from_image', 'scarlet.Starlet.from_image', (['psf'], {'scales': '(3)'}), '(psf, scales=3)\n', (452, 467), False, 'import scarlet\n'), ((509, 565), 'numpy.testing.assert_equal', 'assert_equal', (['starlet_transform.coefficients.shape[0]', '(4)'], {}), '(starlet_transform.coefficients.shape[0], 4)\n', (521, 565), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((640, 673), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['inverse', 'psf'], {}), '(inverse, psf)\n', (659, 673), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((759, 800), 'scarlet.Starlet.from_image', 'scarlet.Starlet.from_image', (['psf'], {'scales': '(3)'}), '(psf, scales=3)\n', (785, 800), False, 'import scarlet\n'), ((902, 947), 'scarlet.Starlet.from_coefficients', 'scarlet.Starlet.from_coefficients', (['star_coeff'], {}), '(star_coeff)\n', (935, 947), False, 'import scarlet\n'), ((956, 1009), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['new_starlet.image', 'starlet.image'], {}), '(new_starlet.image, starlet.image)\n', (975, 1009), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((1073, 1112), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['starlet.image', 'psf'], {}), '(starlet.image, psf)\n', (1092, 1112), False, 'from numpy.testing import assert_almost_equal, assert_equal\n')]
|
from plume.tree import DecisionTreeClassifier
from plume.knn import KNeighborClassifier
from plume.ensemble import AdaBoostClassifier, BaggingClassifier, \
RandomForestsClassifier
import numpy as np
def test_adaboost():
clf = AdaBoostClassifier(DecisionTreeClassifier)
train_x = np.array([
[1, 1, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 0],
[0, 0, 1],
])
train_y = np.array([1, 1, 1, -1, -1])
clf.fit(train_x, train_y)
print(clf.predict(train_x))
def test_bagging():
clfs = [KNeighborClassifier(2) for i in range(7)]
train_x = np.array([[1, 1], [0.1, 0.1], [0.5, 0.7], [10, 10], [10, 11]])
train_y = np.array(['A', 'A', 'A', 'B', 'B'])
test_x = np.array([[11, 12], [12, 13], [11, 13], [0.05, 0.1]])
b = BaggingClassifier(clfs)
b.fit(train_x, train_y)
print(b.predict(test_x))
def test_rf():
clf = RandomForestsClassifier()
train_x = np.array([
[1, 1, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 0],
[0, 0, 1],
])
train_y = np.array([1, 1, 1, -1, -1])
print(clf.fit(train_x, train_y).predict(train_x))
if __name__ == '__main__':
test_adaboost()
test_bagging()
test_rf()
|
[
"numpy.array",
"plume.ensemble.BaggingClassifier",
"plume.ensemble.AdaBoostClassifier",
"plume.ensemble.RandomForestsClassifier",
"plume.knn.KNeighborClassifier"
] |
[((235, 277), 'plume.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', (['DecisionTreeClassifier'], {}), '(DecisionTreeClassifier)\n', (253, 277), False, 'from plume.ensemble import AdaBoostClassifier, BaggingClassifier, RandomForestsClassifier\n'), ((292, 357), 'numpy.array', 'np.array', (['[[1, 1, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0], [0, 0, 1]]'], {}), '([[1, 1, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0], [0, 0, 1]])\n', (300, 357), True, 'import numpy as np\n'), ((419, 446), 'numpy.array', 'np.array', (['[1, 1, 1, -1, -1]'], {}), '([1, 1, 1, -1, -1])\n', (427, 446), True, 'import numpy as np\n'), ((599, 661), 'numpy.array', 'np.array', (['[[1, 1], [0.1, 0.1], [0.5, 0.7], [10, 10], [10, 11]]'], {}), '([[1, 1], [0.1, 0.1], [0.5, 0.7], [10, 10], [10, 11]])\n', (607, 661), True, 'import numpy as np\n'), ((676, 711), 'numpy.array', 'np.array', (["['A', 'A', 'A', 'B', 'B']"], {}), "(['A', 'A', 'A', 'B', 'B'])\n", (684, 711), True, 'import numpy as np\n'), ((725, 778), 'numpy.array', 'np.array', (['[[11, 12], [12, 13], [11, 13], [0.05, 0.1]]'], {}), '([[11, 12], [12, 13], [11, 13], [0.05, 0.1]])\n', (733, 778), True, 'import numpy as np\n'), ((787, 810), 'plume.ensemble.BaggingClassifier', 'BaggingClassifier', (['clfs'], {}), '(clfs)\n', (804, 810), False, 'from plume.ensemble import AdaBoostClassifier, BaggingClassifier, RandomForestsClassifier\n'), ((894, 919), 'plume.ensemble.RandomForestsClassifier', 'RandomForestsClassifier', ([], {}), '()\n', (917, 919), False, 'from plume.ensemble import AdaBoostClassifier, BaggingClassifier, RandomForestsClassifier\n'), ((934, 999), 'numpy.array', 'np.array', (['[[1, 1, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0], [0, 0, 1]]'], {}), '([[1, 1, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0], [0, 0, 1]])\n', (942, 999), True, 'import numpy as np\n'), ((1061, 1088), 'numpy.array', 'np.array', (['[1, 1, 1, -1, -1]'], {}), '([1, 1, 1, -1, -1])\n', (1069, 1088), True, 'import numpy as np\n'), ((543, 565), 'plume.knn.KNeighborClassifier', 'KNeighborClassifier', (['(2)'], {}), '(2)\n', (562, 565), False, 'from plume.knn import KNeighborClassifier\n')]
|
import numpy as np
# array A / B
arrayA, arrayB = (np.array([int(i) for i in input().split()]) for _ in range(2))
# produ interno
# produ externo
print('{}\n{}'.format(np.inner(arrayA, arrayB), np.outer(arrayA, arrayB)))
|
[
"numpy.outer",
"numpy.inner"
] |
[((169, 193), 'numpy.inner', 'np.inner', (['arrayA', 'arrayB'], {}), '(arrayA, arrayB)\n', (177, 193), True, 'import numpy as np\n'), ((195, 219), 'numpy.outer', 'np.outer', (['arrayA', 'arrayB'], {}), '(arrayA, arrayB)\n', (203, 219), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Specification for Generation of training data sets"""
import os
import pathlib
import shutil
from sets.training_sets import (
TrainingSets,
XML_NS
)
from cv2 import (
cv2
)
import pytest
import numpy as np
import lxml.etree as etree
RES_ROOT = os.path.join('tests', 'resources')
def generate_image(path_image, words, columns, rows, params=None):
"""Generate synthetic in-memory image data"""
arr_floats = np.random.rand(rows, columns) * 255
arr_ints = arr_floats.astype(np.uint8)
if words:
for word in words:
render_text = word[1]
origin = (word[0][0] + 10, word[0][1] + 10)
arr_ints = cv2.putText(
arr_ints, render_text, origin, cv2.FONT_HERSHEY_COMPLEX, 1.0, (0, 0, 0), 3, bottomLeftOrigin=False)
cv2.imwrite(str(path_image), arr_ints, params)
return path_image
def extract_words(path_xml_data):
"""Get origin and textdata for all words in path_data"""
words = []
root = etree.parse(str(path_xml_data)).getroot()
root_tag = root.xpath('namespace-uri(.)')
ns_prefix = [k for (k, v) in XML_NS.items() if v == root_tag][0]
if 'alto' in ns_prefix:
strings = root.findall(f'.//{ns_prefix}:String', XML_NS)
words = [((int(s.attrib['HPOS']), int(s.attrib['VPOS'])),
s.attrib['CONTENT']) for s in strings]
elif ns_prefix in ('page2013', 'page2019'):
page_words = root.findall(f'.//{ns_prefix}:Word', XML_NS)
for page_word in page_words:
txt = page_word.find(f'.//{ns_prefix}:Unicode', XML_NS).text
p1 = page_word.find(
f'{ns_prefix}:Coords',
XML_NS).attrib['points'].split()[0]
origin = (int(p1.split(',')[0]), int(p1.split(',')[1]))
words.append((origin, txt))
return words
@pytest.fixture(name='fixture_alto_tif')
def _fixture_alto_tif(tmpdir):
res_alto = os.path.join(RES_ROOT, 'xml', '1667522809_J_0073_0512.xml')
path = tmpdir.mkdir('training').join('1667522809_J_0073_0512.xml')
shutil.copyfile(res_alto, path)
words = extract_words(path)
file_path = tmpdir.mkdir('scan').join('1667522809_J_0073_0512.tif')
tif_params = [
cv2.IMWRITE_TIFF_RESUNIT,
2,
cv2.IMWRITE_TIFF_XDPI,
300,
cv2.IMWRITE_TIFF_YDPI,
300]
# 6619x9976px
generate_image(
file_path,
words=words,
columns=6619,
rows=9976,
params=tif_params)
return str(path)
def test_create_sets_from_alto_and_tif(fixture_alto_tif):
"""Create text-image pairs from ALTO V3 and TIF"""
path_input_dir = os.path.dirname(fixture_alto_tif)
path_input_parent = pathlib.Path(path_input_dir).parent
path_tif = os.path.join(
path_input_parent,
'scan',
'1667522809_J_0073_0512.tif')
assert os.path.exists(path_tif)
training_data = TrainingSets(fixture_alto_tif, path_tif)
data = training_data.create(min_chars=32, folder_out=path_input_dir)
# assert
assert len(data) == 225
path_items = os.listdir(os.path.dirname(fixture_alto_tif))
tifs = [tif for tif in path_items if str(tif).endswith(".tif")]
assert len(tifs) == 225
lines = [txt for txt in path_items if str(txt).endswith(".gt.txt")]
# one more txt since summery
assert len(lines) == 226
@pytest.fixture(name='fixture_page2013_jpg')
def _fixture_page2013_jpg(tmpdir):
res = os.path.join(RES_ROOT, 'xml', '288652.xml')
path_page = tmpdir.mkdir('training').join('288652.xml')
shutil.copyfile(res, path_page)
words = extract_words(path_page)
file_path = tmpdir.mkdir('images').join('288652.jpg')
# 2257x3062px
generate_image(file_path, words=words, columns=2091, rows=2938)
return str(path_page)
def test_create_sets_from_page2013_and_jpg(fixture_page2013_jpg):
"""Create text-image pairs from PAGE2013 and JPG with defaults"""
path_input_dir = os.path.dirname(fixture_page2013_jpg)
path_input_parent = pathlib.Path(path_input_dir).parent
path_image = os.path.join(path_input_parent, 'images', '288652.jpg')
assert os.path.exists(path_image)
# act
training_data = TrainingSets(fixture_page2013_jpg, path_image)
data = training_data.create(
min_chars=8,
folder_out=path_input_dir,
revert=True)
# assert
assert len(data) == 32
path_items = os.listdir(os.path.dirname(fixture_page2013_jpg))
assert len([tif for tif in path_items if str(tif).endswith(".tif")]) == 32
txt_files = sorted(
[txt for txt in path_items if str(txt).endswith(".gt.txt")])
# additional summary written
assert len(txt_files) == 33
# assert mixed content
with open(os.path.join(os.path.dirname(fixture_page2013_jpg), txt_files[2])) as txt_file:
arab = txt_file.readline().strip()
assert 'XIX' in arab
def test_create_sets_from_page2013_and_jpg_no_summary(
fixture_page2013_jpg):
"""Create text-image pairs from PAGE2013 and JPG without summary"""
path_input_dir = os.path.dirname(fixture_page2013_jpg)
path_input_parent = pathlib.Path(path_input_dir).parent
path_image = os.path.join(path_input_parent, 'images', '288652.jpg')
assert os.path.exists(path_image)
# act
training_data = TrainingSets(fixture_page2013_jpg, path_image)
data = training_data.create(
min_chars=8,
folder_out=path_input_dir,
summary=False, revert=True)
# assert
expected_len = 32
assert len(data) == expected_len
path_items = os.listdir(os.path.dirname(fixture_page2013_jpg))
tifs = [tif for tif in path_items if str(tif).endswith(".tif")]
assert len(tifs) == expected_len
txt_files = [txt for txt in path_items if str(txt).endswith(".gt.txt")]
assert len(txt_files) == expected_len
# no summary written
assert len(txt_files) == 32
@pytest.fixture(name='fixture_page2019_png')
def _fixture_page2019_png(tmpdir):
res = os.path.join(RES_ROOT, 'xml', 'OCR-RESULT_0001.xml')
path_page = tmpdir.mkdir('training').join('OCR-RESULT_0001.xml')
shutil.copyfile(res, path_page)
words = extract_words(path_page)
file_path = tmpdir.mkdir('images').join('OCR-RESULT_0001.png')
# 2257x3062px
generate_image(file_path, words=words, columns=2164, rows=2448)
return str(path_page)
def test_create_sets_from_page2019_and_png(fixture_page2019_png):
"""Create text-image pairs from PAGE2013 and JPG without summary"""
path_input_dir = os.path.dirname(fixture_page2019_png)
path_input_parent = pathlib.Path(path_input_dir).parent
path_image = os.path.join(
path_input_parent,
'images',
'OCR-RESULT_0001.png')
assert os.path.exists(path_image)
# act
training_data = TrainingSets(fixture_page2019_png, path_image)
data = training_data.create(
min_chars=8,
folder_out=path_input_dir)
# assert
expected_len = 33
assert len(data) == expected_len
path_items = os.listdir(os.path.dirname(fixture_page2019_png))
tifs = [tif for tif in path_items if str(tif).endswith(".tif")]
assert len(tifs) == expected_len
txt_files = [txt for txt in path_items if str(txt).endswith(".gt.txt")]
# summary written
assert len(txt_files) == 34
|
[
"cv2.cv2.putText",
"sets.training_sets.TrainingSets",
"numpy.random.rand",
"os.path.dirname",
"pytest.fixture",
"os.path.exists",
"pathlib.Path",
"shutil.copyfile",
"sets.training_sets.XML_NS.items",
"os.path.join"
] |
[((287, 321), 'os.path.join', 'os.path.join', (['"""tests"""', '"""resources"""'], {}), "('tests', 'resources')\n", (299, 321), False, 'import os\n'), ((1869, 1908), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""fixture_alto_tif"""'}), "(name='fixture_alto_tif')\n", (1883, 1908), False, 'import pytest\n'), ((3400, 3443), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""fixture_page2013_jpg"""'}), "(name='fixture_page2013_jpg')\n", (3414, 3443), False, 'import pytest\n'), ((5956, 5999), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""fixture_page2019_png"""'}), "(name='fixture_page2019_png')\n", (5970, 5999), False, 'import pytest\n'), ((1955, 2014), 'os.path.join', 'os.path.join', (['RES_ROOT', '"""xml"""', '"""1667522809_J_0073_0512.xml"""'], {}), "(RES_ROOT, 'xml', '1667522809_J_0073_0512.xml')\n", (1967, 2014), False, 'import os\n'), ((2090, 2121), 'shutil.copyfile', 'shutil.copyfile', (['res_alto', 'path'], {}), '(res_alto, path)\n', (2105, 2121), False, 'import shutil\n'), ((2686, 2719), 'os.path.dirname', 'os.path.dirname', (['fixture_alto_tif'], {}), '(fixture_alto_tif)\n', (2701, 2719), False, 'import os\n'), ((2795, 2864), 'os.path.join', 'os.path.join', (['path_input_parent', '"""scan"""', '"""1667522809_J_0073_0512.tif"""'], {}), "(path_input_parent, 'scan', '1667522809_J_0073_0512.tif')\n", (2807, 2864), False, 'import os\n'), ((2901, 2925), 'os.path.exists', 'os.path.exists', (['path_tif'], {}), '(path_tif)\n', (2915, 2925), False, 'import os\n'), ((2947, 2987), 'sets.training_sets.TrainingSets', 'TrainingSets', (['fixture_alto_tif', 'path_tif'], {}), '(fixture_alto_tif, path_tif)\n', (2959, 2987), False, 'from sets.training_sets import TrainingSets, XML_NS\n'), ((3490, 3533), 'os.path.join', 'os.path.join', (['RES_ROOT', '"""xml"""', '"""288652.xml"""'], {}), "(RES_ROOT, 'xml', '288652.xml')\n", (3502, 3533), False, 'import os\n'), ((3598, 3629), 'shutil.copyfile', 'shutil.copyfile', (['res', 'path_page'], {}), '(res, path_page)\n', (3613, 3629), False, 'import shutil\n'), ((4001, 4038), 'os.path.dirname', 'os.path.dirname', (['fixture_page2013_jpg'], {}), '(fixture_page2013_jpg)\n', (4016, 4038), False, 'import os\n'), ((4116, 4171), 'os.path.join', 'os.path.join', (['path_input_parent', '"""images"""', '"""288652.jpg"""'], {}), "(path_input_parent, 'images', '288652.jpg')\n", (4128, 4171), False, 'import os\n'), ((4183, 4209), 'os.path.exists', 'os.path.exists', (['path_image'], {}), '(path_image)\n', (4197, 4209), False, 'import os\n'), ((4241, 4287), 'sets.training_sets.TrainingSets', 'TrainingSets', (['fixture_page2013_jpg', 'path_image'], {}), '(fixture_page2013_jpg, path_image)\n', (4253, 4287), False, 'from sets.training_sets import TrainingSets, XML_NS\n'), ((5120, 5157), 'os.path.dirname', 'os.path.dirname', (['fixture_page2013_jpg'], {}), '(fixture_page2013_jpg)\n', (5135, 5157), False, 'import os\n'), ((5235, 5290), 'os.path.join', 'os.path.join', (['path_input_parent', '"""images"""', '"""288652.jpg"""'], {}), "(path_input_parent, 'images', '288652.jpg')\n", (5247, 5290), False, 'import os\n'), ((5302, 5328), 'os.path.exists', 'os.path.exists', (['path_image'], {}), '(path_image)\n', (5316, 5328), False, 'import os\n'), ((5360, 5406), 'sets.training_sets.TrainingSets', 'TrainingSets', (['fixture_page2013_jpg', 'path_image'], {}), '(fixture_page2013_jpg, path_image)\n', (5372, 5406), False, 'from sets.training_sets import TrainingSets, XML_NS\n'), ((6046, 6098), 'os.path.join', 'os.path.join', (['RES_ROOT', '"""xml"""', '"""OCR-RESULT_0001.xml"""'], {}), "(RES_ROOT, 'xml', 'OCR-RESULT_0001.xml')\n", (6058, 6098), False, 'import os\n'), ((6172, 6203), 'shutil.copyfile', 'shutil.copyfile', (['res', 'path_page'], {}), '(res, path_page)\n', (6187, 6203), False, 'import shutil\n'), ((6586, 6623), 'os.path.dirname', 'os.path.dirname', (['fixture_page2019_png'], {}), '(fixture_page2019_png)\n', (6601, 6623), False, 'import os\n'), ((6701, 6765), 'os.path.join', 'os.path.join', (['path_input_parent', '"""images"""', '"""OCR-RESULT_0001.png"""'], {}), "(path_input_parent, 'images', 'OCR-RESULT_0001.png')\n", (6713, 6765), False, 'import os\n'), ((6802, 6828), 'os.path.exists', 'os.path.exists', (['path_image'], {}), '(path_image)\n', (6816, 6828), False, 'import os\n'), ((6860, 6906), 'sets.training_sets.TrainingSets', 'TrainingSets', (['fixture_page2019_png', 'path_image'], {}), '(fixture_page2019_png, path_image)\n', (6872, 6906), False, 'from sets.training_sets import TrainingSets, XML_NS\n'), ((459, 488), 'numpy.random.rand', 'np.random.rand', (['rows', 'columns'], {}), '(rows, columns)\n', (473, 488), True, 'import numpy as np\n'), ((2744, 2772), 'pathlib.Path', 'pathlib.Path', (['path_input_dir'], {}), '(path_input_dir)\n', (2756, 2772), False, 'import pathlib\n'), ((3131, 3164), 'os.path.dirname', 'os.path.dirname', (['fixture_alto_tif'], {}), '(fixture_alto_tif)\n', (3146, 3164), False, 'import os\n'), ((4063, 4091), 'pathlib.Path', 'pathlib.Path', (['path_input_dir'], {}), '(path_input_dir)\n', (4075, 4091), False, 'import pathlib\n'), ((4467, 4504), 'os.path.dirname', 'os.path.dirname', (['fixture_page2013_jpg'], {}), '(fixture_page2013_jpg)\n', (4482, 4504), False, 'import os\n'), ((5182, 5210), 'pathlib.Path', 'pathlib.Path', (['path_input_dir'], {}), '(path_input_dir)\n', (5194, 5210), False, 'import pathlib\n'), ((5633, 5670), 'os.path.dirname', 'os.path.dirname', (['fixture_page2013_jpg'], {}), '(fixture_page2013_jpg)\n', (5648, 5670), False, 'import os\n'), ((6648, 6676), 'pathlib.Path', 'pathlib.Path', (['path_input_dir'], {}), '(path_input_dir)\n', (6660, 6676), False, 'import pathlib\n'), ((7097, 7134), 'os.path.dirname', 'os.path.dirname', (['fixture_page2019_png'], {}), '(fixture_page2019_png)\n', (7112, 7134), False, 'import os\n'), ((692, 808), 'cv2.cv2.putText', 'cv2.putText', (['arr_ints', 'render_text', 'origin', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)', '(0, 0, 0)', '(3)'], {'bottomLeftOrigin': '(False)'}), '(arr_ints, render_text, origin, cv2.FONT_HERSHEY_COMPLEX, 1.0, (\n 0, 0, 0), 3, bottomLeftOrigin=False)\n', (703, 808), False, 'from cv2 import cv2\n'), ((1140, 1154), 'sets.training_sets.XML_NS.items', 'XML_NS.items', ([], {}), '()\n', (1152, 1154), False, 'from sets.training_sets import TrainingSets, XML_NS\n'), ((4799, 4836), 'os.path.dirname', 'os.path.dirname', (['fixture_page2013_jpg'], {}), '(fixture_page2013_jpg)\n', (4814, 4836), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
def bou(z):
#razones adimensionales
a=4.6 #dimensiones zapata
b=14. #dimensiones zapata
q=1000./(a*b) #carga
m=a/z #adimensional
n=b/z #adimensional
#solución de la ecuación de Boussinesq
sigma=q/(4*3.141589)*((2*m*n*(m**2+n**2+1)**0.5)/(m**2+n**2+1+(m**2)*(n**2))*(m**2+n**2+2)/(m**2+n**2+1)+np.arcsin((2*m*n*(m**2+n**2+1)**0.5)/(m**2+n**2+1+(m**2)*(n**2))))
return sigma
|
[
"numpy.arcsin"
] |
[((441, 538), 'numpy.arcsin', 'np.arcsin', (['(2 * m * n * (m ** 2 + n ** 2 + 1) ** 0.5 / (m ** 2 + n ** 2 + 1 + m ** 2 *\n n ** 2))'], {}), '(2 * m * n * (m ** 2 + n ** 2 + 1) ** 0.5 / (m ** 2 + n ** 2 + 1 +\n m ** 2 * n ** 2))\n', (450, 538), True, 'import numpy as np\n')]
|
# Aprendizaje Automático: Proyecto Final
# Clasificación de símbolos Devanagari
# <NAME>
# <NAME>
# png_to_np.py
# Lee los datos en formato .png y los escribe como arrays de numpy (sin marco)
import glob
import numpy as np
import matplotlib.pyplot as plt
# Paths
CHARACTERS='datos/characters.txt'
TRAIN_IMG_DIR='datos/DevanagariHandwrittenCharacterDataset/Train/'
TEST_IMG_DIR='datos/DevanagariHandwrittenCharacterDataset/Test/'
TRAIN_GRAY='datos/DevanagariGrayscale/train.npz'
TEST_GRAY='datos/DevanagariGrayscale/test.npz'
# Names of classes
with open(CHARACTERS,'r') as f:
characters = f.read().split('\n')[:-1]
# Load data from .png format
def loadPng(folder, characters=characters):
data=[]
label=[]
l=0
for c in characters:
l+=1 # Classes from 1 to 46
print('Cargando: '+c)
path_to_folder=folder+c+'/*.png'
for img_path in glob.glob(path_to_folder):
image = plt.imread(img_path)[2:-2,2:-2] # Cut frame
data.append(image)
label.append(l)
return np.array(data,np.float32), np.array(label,np.int8)
# Save greyscale vector
def saveGrey(filename, data, label):
np.savez_compressed(filename, data, label)
print('Cargando datos de entrenamiento')
# Load train data from images
train_mat, train_label = loadPng(TRAIN_IMG_DIR, characters)
# Matrix to vector
train=np.reshape(train_mat,(train_mat.shape[0],784))
# Save train as greyscale
saveGrey(TRAIN_GRAY, train, train_label)
print('Datos de entrenamiento guardados en grayscale')
print('Cargando datos de test')
# Load test data from images
test_mat, test_label = loadPng(TEST_IMG_DIR, characters)
# Matrix to vector
test=np.reshape(test_mat,(test_mat.shape[0],784))
# Save test as greyscale
saveGrey(TEST_GRAY, test, test_label)
print('Datos de test guardados en grayscale')
|
[
"numpy.savez_compressed",
"numpy.array",
"numpy.reshape",
"glob.glob",
"matplotlib.pyplot.imread"
] |
[((1383, 1431), 'numpy.reshape', 'np.reshape', (['train_mat', '(train_mat.shape[0], 784)'], {}), '(train_mat, (train_mat.shape[0], 784))\n', (1393, 1431), True, 'import numpy as np\n'), ((1702, 1748), 'numpy.reshape', 'np.reshape', (['test_mat', '(test_mat.shape[0], 784)'], {}), '(test_mat, (test_mat.shape[0], 784))\n', (1712, 1748), True, 'import numpy as np\n'), ((1183, 1225), 'numpy.savez_compressed', 'np.savez_compressed', (['filename', 'data', 'label'], {}), '(filename, data, label)\n', (1202, 1225), True, 'import numpy as np\n'), ((892, 917), 'glob.glob', 'glob.glob', (['path_to_folder'], {}), '(path_to_folder)\n', (901, 917), False, 'import glob\n'), ((1066, 1092), 'numpy.array', 'np.array', (['data', 'np.float32'], {}), '(data, np.float32)\n', (1074, 1092), True, 'import numpy as np\n'), ((1093, 1117), 'numpy.array', 'np.array', (['label', 'np.int8'], {}), '(label, np.int8)\n', (1101, 1117), True, 'import numpy as np\n'), ((939, 959), 'matplotlib.pyplot.imread', 'plt.imread', (['img_path'], {}), '(img_path)\n', (949, 959), True, 'import matplotlib.pyplot as plt\n')]
|
import unittest
import numpy as np
from eoflow.models.losses import CategoricalCrossEntropy, CategoricalFocalLoss
from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss
class TestLosses(unittest.TestCase):
def test_shapes(self):
for loss_fn in [CategoricalFocalLoss(from_logits=True), CategoricalCrossEntropy(from_logits=True)]:
ones_1 = np.ones((1, 1024, 2))
ones_2 = np.ones((1, 32, 32, 2))
val1 = loss_fn(ones_1, 1-ones_1).numpy()
val2 = loss_fn(ones_2, 1-ones_2).numpy()
# Values should be scalars
self.assertEqual(val1.shape, ())
self.assertEqual(val2.shape, ())
# Loss values should be equal as they represent the same data, just in different shapes
self.assertAlmostEqual(val1, val2, 10)
def test_focal_loss_values(self):
ones = np.ones((32, 32))
zeros = np.zeros((32, 32))
mixed = np.concatenate([ones[:16], zeros[:16]])
# Predict everything as class 1
y_pred = np.stack([zeros, ones], axis=-1)
y_true1 = np.stack([ones, zeros], axis=-1) # All class 0
y_true2 = np.stack([zeros, ones], axis=-1) # All class 1
y_true3 = np.stack([mixed, 1-mixed], axis=-1) # Half class 1, half class 0
for loss_fn in [CategoricalFocalLoss(from_logits=False),
CategoricalFocalLoss(from_logits=False, class_weights=np.array([0, 1]))]:
# Compute loss values for different labels
val1 = loss_fn(y_true1, y_pred).numpy() # Should be biggest (all are wrong)
val2 = loss_fn(y_true2, y_pred).numpy() # Should be 0 (all are correct)
val3 = loss_fn(y_true3, y_pred).numpy() # Should be in between (half are correct)
self.assertAlmostEqual(val2, 0.0, 10)
self.assertGreaterEqual(val3, val2)
self.assertGreaterEqual(val1, val3)
def test_jaccard_loss(self):
loss_fn = JaccardDistanceLoss(from_logits=False, smooth=1)
y_true = np.zeros([1, 32, 32, 3])
y_true[:, :16, :16, 0] = np.ones((1, 16, 16))
y_true[:, 16:, :16, 1] = np.ones((1, 16, 16))
y_true[:, :, 16:, 2] = np.ones((1, 32, 16))
y_pred = np.zeros([1, 32, 32, 3])
y_pred[..., 0] = 1
val_1 = loss_fn(y_true, y_true).numpy()
val_2 = loss_fn(y_true, y_pred).numpy()
y_pred[..., 0] = 0
y_pred[..., 1] = 1
val_3 = loss_fn(y_true, y_pred).numpy()
y_pred[..., 1] = 0
y_pred[..., 2] = 1
val_4 = loss_fn(y_true, y_pred).numpy()
self.assertEqual(val_1, 0.0)
self.assertAlmostEqual(val_2, 2.743428, 5)
self.assertAlmostEqual(val_3, 2.743428, 5)
self.assertAlmostEqual(val_4, 2.491730, 5)
loss_fn = JaccardDistanceLoss(from_logits=False, smooth=1, class_weights=np.array([0, 1, 1]))
val_1 = loss_fn(y_true, y_true).numpy()
val_2 = loss_fn(y_true, y_pred).numpy()
y_pred[..., 0] = 0
y_pred[..., 1] = 1
val_3 = loss_fn(y_true, y_pred).numpy()
y_pred[..., 1] = 0
y_pred[..., 2] = 1
val_4 = loss_fn(y_true, y_pred).numpy()
self.assertEqual(val_1, 0.0)
self.assertAlmostEqual(val_2, 1.495621, 5)
self.assertAlmostEqual(val_3, 1.248781, 5)
self.assertAlmostEqual(val_4, 1.495621, 5)
def test_tanimoto_loss(self):
y_true = np.zeros([1, 32, 32, 2], dtype=np.float32)
y_true[:, 16:, :16, 1] = np.ones((1, 16, 16))
y_true[..., 0] = np.ones([1, 32, 32]) - y_true[..., 1]
y_pred = np.zeros([1, 32, 32, 2], dtype=np.float32)
y_pred[..., 0] = 1
self.assertEqual(TanimotoDistanceLoss(from_logits=False)(y_true, y_true).numpy(), 0.0)
self.assertEqual(TanimotoDistanceLoss(from_logits=False)(y_pred, y_pred).numpy(), 0.0)
self.assertAlmostEqual(TanimotoDistanceLoss(from_logits=False)(y_true, y_pred).numpy(), 1.25, 5)
self.assertAlmostEqual(TanimotoDistanceLoss(from_logits=False, normalise=True)(y_true, y_pred).numpy(),
1.2460148, 5)
self.assertAlmostEqual(TanimotoDistanceLoss(from_logits=False, class_weights=np.array([1, 0]))(y_true,
y_pred).numpy(),
0.25, 5)
y_true = np.zeros([1, 32, 32, 2], dtype=np.float32)
y_true[..., 0] = np.ones([1, 32, 32]) - y_true[..., 1]
self.assertEqual(TanimotoDistanceLoss(from_logits=False, normalise=True)(y_true, y_pred).numpy(), 0.)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.stack",
"eoflow.models.losses.CategoricalFocalLoss",
"eoflow.models.losses.TanimotoDistanceLoss",
"numpy.zeros",
"numpy.ones",
"eoflow.models.losses.JaccardDistanceLoss",
"numpy.array",
"eoflow.models.losses.CategoricalCrossEntropy",
"numpy.concatenate"
] |
[((4703, 4718), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4716, 4718), False, 'import unittest\n'), ((896, 913), 'numpy.ones', 'np.ones', (['(32, 32)'], {}), '((32, 32))\n', (903, 913), True, 'import numpy as np\n'), ((930, 948), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {}), '((32, 32))\n', (938, 948), True, 'import numpy as np\n'), ((965, 1004), 'numpy.concatenate', 'np.concatenate', (['[ones[:16], zeros[:16]]'], {}), '([ones[:16], zeros[:16]])\n', (979, 1004), True, 'import numpy as np\n'), ((1063, 1095), 'numpy.stack', 'np.stack', (['[zeros, ones]'], {'axis': '(-1)'}), '([zeros, ones], axis=-1)\n', (1071, 1095), True, 'import numpy as np\n'), ((1115, 1147), 'numpy.stack', 'np.stack', (['[ones, zeros]'], {'axis': '(-1)'}), '([ones, zeros], axis=-1)\n', (1123, 1147), True, 'import numpy as np\n'), ((1181, 1213), 'numpy.stack', 'np.stack', (['[zeros, ones]'], {'axis': '(-1)'}), '([zeros, ones], axis=-1)\n', (1189, 1213), True, 'import numpy as np\n'), ((1247, 1284), 'numpy.stack', 'np.stack', (['[mixed, 1 - mixed]'], {'axis': '(-1)'}), '([mixed, 1 - mixed], axis=-1)\n', (1255, 1284), True, 'import numpy as np\n'), ((2002, 2050), 'eoflow.models.losses.JaccardDistanceLoss', 'JaccardDistanceLoss', ([], {'from_logits': '(False)', 'smooth': '(1)'}), '(from_logits=False, smooth=1)\n', (2021, 2050), False, 'from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss\n'), ((2069, 2093), 'numpy.zeros', 'np.zeros', (['[1, 32, 32, 3]'], {}), '([1, 32, 32, 3])\n', (2077, 2093), True, 'import numpy as np\n'), ((2127, 2147), 'numpy.ones', 'np.ones', (['(1, 16, 16)'], {}), '((1, 16, 16))\n', (2134, 2147), True, 'import numpy as np\n'), ((2181, 2201), 'numpy.ones', 'np.ones', (['(1, 16, 16)'], {}), '((1, 16, 16))\n', (2188, 2201), True, 'import numpy as np\n'), ((2233, 2253), 'numpy.ones', 'np.ones', (['(1, 32, 16)'], {}), '((1, 32, 16))\n', (2240, 2253), True, 'import numpy as np\n'), ((2272, 2296), 'numpy.zeros', 'np.zeros', (['[1, 32, 32, 3]'], {}), '([1, 32, 32, 3])\n', (2280, 2296), True, 'import numpy as np\n'), ((3464, 3506), 'numpy.zeros', 'np.zeros', (['[1, 32, 32, 2]'], {'dtype': 'np.float32'}), '([1, 32, 32, 2], dtype=np.float32)\n', (3472, 3506), True, 'import numpy as np\n'), ((3540, 3560), 'numpy.ones', 'np.ones', (['(1, 16, 16)'], {}), '((1, 16, 16))\n', (3547, 3560), True, 'import numpy as np\n'), ((3642, 3684), 'numpy.zeros', 'np.zeros', (['[1, 32, 32, 2]'], {'dtype': 'np.float32'}), '([1, 32, 32, 2], dtype=np.float32)\n', (3650, 3684), True, 'import numpy as np\n'), ((4454, 4496), 'numpy.zeros', 'np.zeros', (['[1, 32, 32, 2]'], {'dtype': 'np.float32'}), '([1, 32, 32, 2], dtype=np.float32)\n', (4462, 4496), True, 'import numpy as np\n'), ((280, 318), 'eoflow.models.losses.CategoricalFocalLoss', 'CategoricalFocalLoss', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (300, 318), False, 'from eoflow.models.losses import CategoricalCrossEntropy, CategoricalFocalLoss\n'), ((320, 361), 'eoflow.models.losses.CategoricalCrossEntropy', 'CategoricalCrossEntropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (343, 361), False, 'from eoflow.models.losses import CategoricalCrossEntropy, CategoricalFocalLoss\n'), ((386, 407), 'numpy.ones', 'np.ones', (['(1, 1024, 2)'], {}), '((1, 1024, 2))\n', (393, 407), True, 'import numpy as np\n'), ((429, 452), 'numpy.ones', 'np.ones', (['(1, 32, 32, 2)'], {}), '((1, 32, 32, 2))\n', (436, 452), True, 'import numpy as np\n'), ((1338, 1377), 'eoflow.models.losses.CategoricalFocalLoss', 'CategoricalFocalLoss', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (1358, 1377), False, 'from eoflow.models.losses import CategoricalCrossEntropy, CategoricalFocalLoss\n'), ((3586, 3606), 'numpy.ones', 'np.ones', (['[1, 32, 32]'], {}), '([1, 32, 32])\n', (3593, 3606), True, 'import numpy as np\n'), ((4522, 4542), 'numpy.ones', 'np.ones', (['[1, 32, 32]'], {}), '([1, 32, 32])\n', (4529, 4542), True, 'import numpy as np\n'), ((2898, 2917), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (2906, 2917), True, 'import numpy as np\n'), ((1457, 1473), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1465, 1473), True, 'import numpy as np\n'), ((3738, 3777), 'eoflow.models.losses.TanimotoDistanceLoss', 'TanimotoDistanceLoss', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (3758, 3777), False, 'from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss\n'), ((3833, 3872), 'eoflow.models.losses.TanimotoDistanceLoss', 'TanimotoDistanceLoss', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (3853, 3872), False, 'from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss\n'), ((3934, 3973), 'eoflow.models.losses.TanimotoDistanceLoss', 'TanimotoDistanceLoss', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (3954, 3973), False, 'from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss\n'), ((4039, 4094), 'eoflow.models.losses.TanimotoDistanceLoss', 'TanimotoDistanceLoss', ([], {'from_logits': '(False)', 'normalise': '(True)'}), '(from_logits=False, normalise=True)\n', (4059, 4094), False, 'from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss\n'), ((4585, 4640), 'eoflow.models.losses.TanimotoDistanceLoss', 'TanimotoDistanceLoss', ([], {'from_logits': '(False)', 'normalise': '(True)'}), '(from_logits=False, normalise=True)\n', (4605, 4640), False, 'from eoflow.models.losses import JaccardDistanceLoss, TanimotoDistanceLoss\n'), ((4250, 4266), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (4258, 4266), True, 'import numpy as np\n')]
|
import numpy as np
from keras import backend as Theano
from keras.layers import Dense, Input, Convolution2D, Flatten, merge
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.optimizers import Adadelta, RMSprop, Adam, SGD
from keras.regularizers import l1, l2
from keras.initializations import normal, glorot_uniform
# def my_init(shape, name=None):
# return normal(shape, scale=0.01, name=name)
class QNeuralNetwork:
def create_model(self):
my_init = 'glorot_uniform'
# This is the place where neural network model initialized
self.state_in = Input(self.state_dim)
# self.state_inp = BatchNormalization()(self.state_in)
self.l1 = Convolution2D(32, 8, 8, activation='relu', init=my_init, subsample=(4, 4), border_mode='same')(
self.state_in)
# self.l1bn = BatchNormalization()(self.l1)
self.l2 = Convolution2D(64, 4, 4, activation='relu', init=my_init, subsample=(2, 2), border_mode='same')(
self.l1)
# self.l2bn = BatchNormalization()(self.l2)
self.l3 = Convolution2D(64, 3, 3, activation='relu', init=my_init, subsample=(1, 1), border_mode='same')(
self.l2)
# self.l3bn = BatchNormalization()(self.l3)
self.h = Flatten()(self.l3)
if self.DUELING_ARCHITECTURE:
self.hida = Dense(256, init=my_init, activation='relu')(self.h)
self.hidv = Dense(256, init=my_init, activation='relu')(self.h)
self.v = Dense(1)(self.hidv)
self.a = Dense(self.action_dim)(self.hida)
self.q = merge([self.a, self.v], mode='concat')
else:
self.hid = Dense(512, init=my_init, activation='relu')(self.h)
self.q = Dense(self.action_dim, init=my_init)(self.hid)
self.model = Model(self.state_in, self.q)
# def create_model(self):
# # This is the place where neural network model initialized
# self.state_in = Input(self.state_dim) # This layer is required for any network.
# if self.DUELING_ARCHITECTURE:
# self.hida = Dense(10, activation='relu')(self.state_in)
# self.hidv = Dense(10, activation='relu')(self.state_in)
# self.v = Dense(1)(self.hidv)
# self.a = Dense(self.action_dim)(self.hida)
# self.q = merge([self.a, self.v], mode='concat')
# else:
# self.hid = Dense(64, activation='relu', init='lecun_uniform')(self.state_in)
# self.q = Dense(self.action_dim, init='lecun_uniform')(self.hid)
# self.model = Model(self.state_in, self.q) # Complete the model
def __init__(self, state_dim, action_dim, batch_size=32, learning_rate=1., DUELING_ARCHITECTURE=False):
""" Initialize Q-network.
Args:
state_dim: dimensionality of space of states
action_dim: dimensionality of space of actions
batch_size: size of minibatch for network's train
learning_rate: learning rate of optimizer
DUELING_ARCHITECTURE: dueling network architecture activation
"""
# Assign network features
self.state_dim = state_dim
self.action_dim = action_dim
self.batch_size = batch_size
self.learning_rate = learning_rate
self.DUELING_ARCHITECTURE = DUELING_ARCHITECTURE
# Create input for training
self.actions = Input(shape=(self.batch_size,), dtype='int32')
self.target = Input(shape=(self.batch_size,), dtype='float32')
self.weights = Input(shape=(self.batch_size,), dtype='float32') # These weights are for weighted update
# Initialize model
self.create_model()
# Compute q-values
self.Qs = self.model.layers[-1].output
# Make a function for get output of network
self.q_value = Theano.function([self.state_in], self.Qs[:, :self.action_dim])
# Get q-values for corresponding actions
tmp = np.arange(self.batch_size)
actionmask = Theano.T.eq(Theano.T.arange(self.action_dim).reshape((1, -1)),
self.actions.reshape((-1, 1))).astype(Theano.T.config.floatX)
if DUELING_ARCHITECTURE:
self.a_output = self.Qs[tmp, self.actions.reshape((self.batch_size,))]
self.v_output = self.Qs[tmp, -1]
self.q_output = self.a_output + self.v_output - self.Qs[tmp,
: self.action_dim].mean(axis=1)
else:
self.q_output = Theano.T.sum(self.Qs * actionmask, axis=1)
# Compute TD-error
self.error = (self.q_output - self.target.reshape((self.batch_size,)))
# Make a MSE-cost function
self.error_ = self.weights * self.error
quadratic_part = Theano.T.minimum(abs(self.error_), 1.)
linear_part = abs(self.error_) - quadratic_part
loss = 0.5 * quadratic_part ** 2 + 1. * linear_part
self.cost = Theano.T.sum(loss)/self.batch_size
# Initialize an optimizer
self.opt = RMSprop(lr=self.learning_rate, rho=0.95, epsilon=1e-6)
self.params = self.model.trainable_weights
self.updates = self.opt.get_updates(self.params, [], self.cost)
# Make a function to update weights and get information about cost an TD-errors
self.tr_step = Theano.function(
[self.target, self.state_in, self.actions, self.weights], # Input
[self.cost, self.error, self.q_output, self.Qs,self.error_], # Output when make a training step
updates=self.updates) # Update weights
def get_output(self, state):
# This is a function for simple agent-network interaction
return self.q_value([state])
def q_actions(self, state, actions):
# This is a function for simple agent-network interaction
return self.q_value([state])[np.array(range(self.batch_size)), actions]
def train_step(self, target, state_in, actions, weights=None):
""" This is a function which agent calls when want to train network.
If there is no prioritized xp-replay there is no weighted update and weights are set as 1
"""
if weights is None:
weights = np.ones((1, state_in.shape[0]))
return self.tr_step([target, state_in, actions, weights])
|
[
"keras.layers.Convolution2D",
"keras.backend.function",
"keras.layers.Flatten",
"keras.backend.T.sum",
"keras.models.Model",
"numpy.ones",
"keras.backend.T.arange",
"keras.layers.Dense",
"numpy.arange",
"keras.layers.Input",
"keras.optimizers.RMSprop",
"keras.layers.merge"
] |
[((627, 648), 'keras.layers.Input', 'Input', (['self.state_dim'], {}), '(self.state_dim)\n', (632, 648), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1840, 1868), 'keras.models.Model', 'Model', (['self.state_in', 'self.q'], {}), '(self.state_in, self.q)\n', (1845, 1868), False, 'from keras.models import Model\n'), ((3453, 3499), 'keras.layers.Input', 'Input', ([], {'shape': '(self.batch_size,)', 'dtype': '"""int32"""'}), "(shape=(self.batch_size,), dtype='int32')\n", (3458, 3499), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((3522, 3570), 'keras.layers.Input', 'Input', ([], {'shape': '(self.batch_size,)', 'dtype': '"""float32"""'}), "(shape=(self.batch_size,), dtype='float32')\n", (3527, 3570), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((3594, 3642), 'keras.layers.Input', 'Input', ([], {'shape': '(self.batch_size,)', 'dtype': '"""float32"""'}), "(shape=(self.batch_size,), dtype='float32')\n", (3599, 3642), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((3892, 3954), 'keras.backend.function', 'Theano.function', (['[self.state_in]', 'self.Qs[:, :self.action_dim]'], {}), '([self.state_in], self.Qs[:, :self.action_dim])\n', (3907, 3954), True, 'from keras import backend as Theano\n'), ((4018, 4044), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (4027, 4044), True, 'import numpy as np\n'), ((5116, 5171), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'self.learning_rate', 'rho': '(0.95)', 'epsilon': '(1e-06)'}), '(lr=self.learning_rate, rho=0.95, epsilon=1e-06)\n', (5123, 5171), False, 'from keras.optimizers import Adadelta, RMSprop, Adam, SGD\n'), ((5405, 5572), 'keras.backend.function', 'Theano.function', (['[self.target, self.state_in, self.actions, self.weights]', '[self.cost, self.error, self.q_output, self.Qs, self.error_]'], {'updates': 'self.updates'}), '([self.target, self.state_in, self.actions, self.weights], [\n self.cost, self.error, self.q_output, self.Qs, self.error_], updates=\n self.updates)\n', (5420, 5572), True, 'from keras import backend as Theano\n'), ((730, 828), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(8)', '(8)'], {'activation': '"""relu"""', 'init': 'my_init', 'subsample': '(4, 4)', 'border_mode': '"""same"""'}), "(32, 8, 8, activation='relu', init=my_init, subsample=(4, 4),\n border_mode='same')\n", (743, 828), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((923, 1021), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(4)', '(4)'], {'activation': '"""relu"""', 'init': 'my_init', 'subsample': '(2, 2)', 'border_mode': '"""same"""'}), "(64, 4, 4, activation='relu', init=my_init, subsample=(2, 2),\n border_mode='same')\n", (936, 1021), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1110, 1208), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'activation': '"""relu"""', 'init': 'my_init', 'subsample': '(1, 1)', 'border_mode': '"""same"""'}), "(64, 3, 3, activation='relu', init=my_init, subsample=(1, 1),\n border_mode='same')\n", (1123, 1208), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1296, 1305), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1303, 1305), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1623, 1661), 'keras.layers.merge', 'merge', (['[self.a, self.v]'], {'mode': '"""concat"""'}), "([self.a, self.v], mode='concat')\n", (1628, 1661), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((4593, 4635), 'keras.backend.T.sum', 'Theano.T.sum', (['(self.Qs * actionmask)'], {'axis': '(1)'}), '(self.Qs * actionmask, axis=1)\n', (4605, 4635), True, 'from keras import backend as Theano\n'), ((5028, 5046), 'keras.backend.T.sum', 'Theano.T.sum', (['loss'], {}), '(loss)\n', (5040, 5046), True, 'from keras import backend as Theano\n'), ((6294, 6325), 'numpy.ones', 'np.ones', (['(1, state_in.shape[0])'], {}), '((1, state_in.shape[0]))\n', (6301, 6325), True, 'import numpy as np\n'), ((1378, 1421), 'keras.layers.Dense', 'Dense', (['(256)'], {'init': 'my_init', 'activation': '"""relu"""'}), "(256, init=my_init, activation='relu')\n", (1383, 1421), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1454, 1497), 'keras.layers.Dense', 'Dense', (['(256)'], {'init': 'my_init', 'activation': '"""relu"""'}), "(256, init=my_init, activation='relu')\n", (1459, 1497), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1527, 1535), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1532, 1535), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1568, 1590), 'keras.layers.Dense', 'Dense', (['self.action_dim'], {}), '(self.action_dim)\n', (1573, 1590), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1699, 1742), 'keras.layers.Dense', 'Dense', (['(512)'], {'init': 'my_init', 'activation': '"""relu"""'}), "(512, init=my_init, activation='relu')\n", (1704, 1742), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((1772, 1808), 'keras.layers.Dense', 'Dense', (['self.action_dim'], {'init': 'my_init'}), '(self.action_dim, init=my_init)\n', (1777, 1808), False, 'from keras.layers import Dense, Input, Convolution2D, Flatten, merge\n'), ((4079, 4111), 'keras.backend.T.arange', 'Theano.T.arange', (['self.action_dim'], {}), '(self.action_dim)\n', (4094, 4111), True, 'from keras import backend as Theano\n')]
|
import os
import numpy as np
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import np_tif
from stack_registration import bucket
def main():
assert os.path.isdir('./../images')
if not os.path.isdir('./../images/figure_3'):
os.mkdir('./../images/figure_3')
#####################################################################
# meltmount mix data
data = np_tif.tif_to_array(
'./../../stimulated_emission_imaging-data' +
'/2018_02_23_STE_phase_cr_bead_4' +
'/dataset_green_1010mW_single_shot.tif').astype(np.float64)
# get rid of overexposed rows at top and bottom of images
less_rows = 3
data = data[:, 0+less_rows:data.shape[1]-less_rows, :]
data = data[:, ::-1, :] # flip up down
# reshape to hyperstack
num_delays = 3
data = data.reshape((
data.shape[0]/num_delays,# phase plate angle number
num_delays,
data.shape[1],
data.shape[2],
))
# Get the average pixel brightness in the background region of the
# meltmount mix data. We'll use it to account for laser intensity
# fluctuations
avg_laser_brightness = get_bg_level(data.mean(axis=(0, 1)))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], data.shape[1], 1, 1)
# get zero delay images, max delay images and phase contrast images
zero_delay_images = data[:, 1, :, :] # zero red/green delay
max_delay_images = data[
:, 0:3:2, :, :].mean(axis=1) # average max and min delay
phase_contrast_images = data[:, 0, :, :] # red before green (min delay)
# from the image where red/green are simultaneous, subtract the
# average of the max and min delay images
STE_stack = zero_delay_images - max_delay_images
# phase contrast image (no STE) stack: there is a large background
# variation that has nothing to do with the sample; it's due to
# multiple reflections in the microscope. Some of it moves when you
# move the phase plate, and some of it doesn't. This step subtracts
# off the stationary component. For each image we use in the figure,
# we subtract the minimum contrast image with the closest phase plate angle.
# minimum contrast phase plate angle closest to first 7 phase plate angles:
min_contrast_index_1 = 5
# minimum contrast phase plate angle closest to last 7 phase plate angles:
min_contrast_index_2 = 11
phase_stack = phase_contrast_images
phase_stack[0:8, ...] = phase_stack[0:8, ...] - phase_contrast_images[
min_contrast_index_1:min_contrast_index_1 + 1, :, :]
phase_stack[8:15, ...] = phase_stack[8:15, ...] - phase_contrast_images[
min_contrast_index_2:min_contrast_index_2 + 1, :, :]
# Luckily the non-stationary component is comprised of stripes that
# are completely outside of the microscope's spatial pass-band. The
# smoothing step below strongly attenuates this striping artifact
# with almost no effect on spatial frequencies due to the sample.
sigma = 9 # tune this parameter to reject high spatial frequencies
STE_stack = gaussian_filter(STE_stack, sigma=(0, sigma, sigma))
phase_stack = gaussian_filter(phase_stack, sigma=(0, sigma, sigma))
# crop images to center bead and fit into figure
top = 0
bot = 122
left = 109
right = 361
phase_cropped = phase_stack[:, top:bot, left:right]
STE_cropped = STE_stack[:, top:bot, left:right]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
bucket_width = 8 # bucket width in pixels
phase_cropped = bucket(
phase_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
STE_cropped = bucket(
STE_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
# display images from the two phase plate angles that maximize bead
# contrast (+/- contrast)
zero_phase_angle = 8
pi_phase_angle = 0
n_mix_zero_phase_bead_image = phase_cropped[zero_phase_angle, :, :]
n_mix_pi_phase_bead_image = phase_cropped[pi_phase_angle, :, :]
n_mix_zero_phase_STE_image = STE_cropped[zero_phase_angle, :, :]
n_mix_pi_phase_STE_image = STE_cropped[pi_phase_angle, :, :]
#####################################################################
#####################################################################
# meltmount n = 1.54 data
data = np_tif.tif_to_array(
'./../../stimulated_emission_imaging-data' +
'/2018_02_27_STE_phase_n_1_54_cr_bead_0' +
'/dataset_green_970mW_single_shot.tif').astype(np.float64)
# get rid of overexposed rows at top and bottom of images
data = data[:, 0+less_rows:data.shape[1]-less_rows, :]
# reshape to hyperstack
data = data.reshape((
data.shape[0]/num_delays,# phase plate angle number
num_delays,
data.shape[1],
data.shape[2],
))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], data.shape[1], 1, 1)
# get zero delay images, max delay images and phase contrast images
zero_delay_images = data[:, 1, :, :] # zero red/green delay
max_delay_images = data[
:, 0:3:2, :, :].mean(axis=1) # average max and min delay
phase_contrast_images = data[:, 0, :, :] # red before green (min delay)
# from the image where red/green are simultaneous, subtract the
# average of the max and min delay images
STE_stack = zero_delay_images - max_delay_images
# phase contrast image (no STE) stack: there is a large background
# variation that has nothing to do with the sample; it's due to
# multiple reflections in the microscope. Some of it moves when you
# move the phase plate, and some of it doesn't. This step subtracts
# off the stationary component. For each image we use in the figure,
# we subtract the minimum contrast image with the closest phase plate angle.
# minimum contrast phase plate angle closest to first 7 phase plate angles:
min_contrast_index_1 = 5
# minimum contrast phase plate angle closest to last 7 phase plate angles:
min_contrast_index_2 = 11
phase_stack = phase_contrast_images
phase_stack[0:8, ...] = phase_stack[0:8, ...] - phase_contrast_images[
min_contrast_index_1:min_contrast_index_1 + 1, :, :]
phase_stack[8:15, ...] = phase_stack[8:15, ...] - phase_contrast_images[
min_contrast_index_2:min_contrast_index_2 + 1, :, :]
# Luckily the non-stationary component is comprised of stripes that
# are completely outside of the microscope's spatial pass-band. The
# smoothing step below strongly attenuates this striping artifact
# with almost no effect on spatial frequencies due to the sample.
STE_stack = gaussian_filter(STE_stack, sigma=(0, sigma, sigma))
phase_stack = gaussian_filter(phase_stack, sigma=(0, sigma, sigma))
# crop images to center bead and fit into figure
top = 0
bot = 122
left = 44
right = 296
phase_cropped = phase_stack[:,top:bot,left:right]
STE_cropped = STE_stack[:,top:bot,left:right]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
phase_cropped = bucket(
phase_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
STE_cropped = bucket(
STE_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
# display images from the two phase plate angles that maximize bead
# contrast (+/- contrast)
zero_phase_angle = 8
pi_phase_angle = 13
n_1_53_zero_phase_bead_image = phase_cropped[zero_phase_angle, :, :]
n_1_53_pi_phase_bead_image = phase_cropped[pi_phase_angle, :, :]
n_1_53_zero_phase_STE_image = STE_cropped[zero_phase_angle, :, :]
n_1_53_pi_phase_STE_image = STE_cropped[pi_phase_angle, :, :]
#####################################################################
#####################################################################
# meltmount n = 1.61 data
data = np_tif.tif_to_array(
'./../../stimulated_emission_imaging-data' +
'/2018_02_26_STE_phase_n_1_61_cr_bead_0' +
'/dataset_green_1060mW_single_shot.tif').astype(np.float64)
# get rid of overexposed rows at top and bottom of images
data = data[:, 0+less_rows:data.shape[1]-less_rows, :]
data = data[:, ::-1, :] # flip up down
# reshape to hyperstack
data = data.reshape((
data.shape[0]/num_delays,# phase plate angle number
num_delays,
data.shape[1],
data.shape[2],
))
# scale all images to have the same background brightness. This
# amounts to a correction of roughly 1% or less
local_laser_brightness = get_bg_level(data)
data = data * (avg_laser_brightness / local_laser_brightness).reshape(
data.shape[0], data.shape[1], 1, 1)
# get zero delay images, max delay images and phase contrast images
zero_delay_images = data[:, 1, :, :] # zero red/green delay
max_delay_images = data[
:, 0:3:2, :, :].mean(axis=1) # average max and min delay
phase_contrast_images = data[:, 0, :, :] # red before green (min delay)
# from the image where red/green are simultaneous, subtract the
# average of the max and min delay images
STE_stack = zero_delay_images - max_delay_images
# phase contrast image (no STE) stack: there is a large background
# variation that has nothing to do with the sample; it's due to
# multiple reflections in the microscope. Some of it moves when you
# move the phase plate, and some of it doesn't. This step subtracts
# off the stationary component. For each image we use in the figure,
# we subtract the minimum contrast image with the closest phase plate angle.
# minimum contrast phase plate angle closest to first 7 phase plate angles:
min_contrast_index_1 = 5
# minimum contrast phase plate angle closest to last 7 phase plate angles:
min_contrast_index_2 = 11
phase_stack = phase_contrast_images
phase_stack[0:8, ...] = phase_stack[0:8, ...] - phase_contrast_images[
min_contrast_index_1:min_contrast_index_1 + 1, :, :]
phase_stack[8:15, ...] = phase_stack[8:15, ...] - phase_contrast_images[
min_contrast_index_2:min_contrast_index_2 + 1, :, :]
# Luckily the non-stationary component is comprised of stripes that
# are completely outside of the microscope's spatial pass-band. The
# smoothing step below strongly attenuates this striping artifact
# with almost no effect on spatial frequencies due to the sample.
STE_stack = gaussian_filter(STE_stack, sigma=(0, sigma, sigma))
phase_stack = gaussian_filter(phase_stack, sigma=(0, sigma, sigma))
# crop images to center bead and fit into figure
top = 0
bot = 122
left = 59
right = 311
phase_cropped = phase_stack[:,top:bot,left:right]
STE_cropped = STE_stack[:,top:bot,left:right]
# Our pixels are tiny (8.7 nm/pixel) to give large dynamic range.
# This is not great for viewing, because fluctuations can swamp the
# signal. This step bins the pixels into a more typical size.
phase_cropped = bucket(
phase_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
STE_cropped = bucket(
STE_cropped, (1, bucket_width, bucket_width)) / bucket_width**2
# display images from the two phase plate angles that maximize bead
# contrast (+/- contrast)
zero_phase_angle = 8
pi_phase_angle = 0
n_1_61_zero_phase_bead_image = phase_cropped[zero_phase_angle, :, :]
n_1_61_pi_phase_bead_image = phase_cropped[pi_phase_angle, :, :]
n_1_61_zero_phase_STE_image = STE_cropped[zero_phase_angle, :, :]
n_1_61_pi_phase_STE_image = STE_cropped[pi_phase_angle, :, :]
#####################################################################
#####################################################################
# start plotting all the images
# get max and min values to unify the colorbar
all_phase = np.concatenate((
n_mix_zero_phase_bead_image,
n_1_53_zero_phase_bead_image,
n_1_61_zero_phase_bead_image,
n_mix_pi_phase_bead_image,
n_1_53_pi_phase_bead_image,
n_1_61_pi_phase_bead_image), axis=0)
all_STE = np.concatenate((
n_mix_zero_phase_STE_image,
n_1_53_zero_phase_STE_image,
n_1_61_zero_phase_STE_image,
n_mix_pi_phase_STE_image,
n_1_53_pi_phase_STE_image,
n_1_61_pi_phase_STE_image), axis=0)
max_phase = int(np.amax(all_phase)) + 1
min_phase = int(np.amin(all_phase)) - 1
max_ste = int(np.amax(all_STE)) + 1
min_ste = int(np.amin(all_STE)) - 1
# make scale bar black to give lower limit on colorbar
bar_left = 1
bar_right = 6
bar_vert = -2
n_mix_zero_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_53_zero_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_61_zero_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_mix_pi_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_53_pi_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_1_61_pi_phase_bead_image[bar_vert, bar_left:bar_right] = min_phase
n_mix_zero_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_53_zero_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_61_zero_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_mix_pi_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_53_pi_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
n_1_61_pi_phase_STE_image[bar_vert, bar_left:bar_right] = min_ste
# create wider image comprised of three side-by-side images
# get width of wider image
num_angles, height, width = STE_cropped.shape
between_pics = int(16 / bucket_width)
big_width = width*3 + between_pics*2
# initialize wide phase contrast image and make "between color" white
between_color = max_phase # makes it white and gives upper limit on colorbar
zero_phase_bead_image = np.zeros((height,big_width)) + between_color
pi_phase_bead_image = np.zeros((height,big_width)) + between_color
# initialize wide STE image and make "between color" white
between_color = max_ste # makes it white and gives upper limit on colorbar
zero_phase_STE_image = np.zeros((height,big_width)) + between_color
pi_phase_STE_image = np.zeros((height,big_width)) + between_color
# n = 1.53 images on left side of wide image
left = 0
right = width
zero_phase_bead_image[:,left:right] = n_1_53_zero_phase_bead_image
pi_phase_bead_image[:,left:right] = n_1_53_pi_phase_bead_image
zero_phase_STE_image[:,left:right] = n_1_53_zero_phase_STE_image
pi_phase_STE_image[:,left:right] = n_1_53_pi_phase_STE_image
# n = 1.58/1.61 mix images in center of wide image
left = width + between_pics
right = width*2 + between_pics
zero_phase_bead_image[:,left:right] = n_mix_zero_phase_bead_image
pi_phase_bead_image[:,left:right] = n_mix_pi_phase_bead_image
zero_phase_STE_image[:,left:right] = n_mix_zero_phase_STE_image
pi_phase_STE_image[:,left:right] = n_mix_pi_phase_STE_image
# n = 1.61 on right side of wide image
left = width*2 + between_pics*2
right = big_width
zero_phase_bead_image[:,left:right] = n_1_61_zero_phase_bead_image
pi_phase_bead_image[:,left:right] = n_1_61_pi_phase_bead_image
zero_phase_STE_image[:,left:right] = n_1_61_zero_phase_STE_image
pi_phase_STE_image[:,left:right] = n_1_61_pi_phase_STE_image
# generate and save plot
fig, (ax0, ax1) = plt.subplots(nrows=2,ncols=1,figsize=(20,7))
cax0 = ax0.imshow(pi_phase_bead_image, cmap=plt.cm.gray,
interpolation='nearest', vmax=2500, vmin=-4200)
ax0.axis('off')
divider = make_axes_locatable(ax0)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax0, cax = cax)
ax0.set_title('Phase contrast image of scattered light from bead',fontsize=30)
ax0.text(
12, 14, r'$\Delta n\approx +0.05$',
fontsize=38, color='black', fontweight='bold')
ax0.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='black', fontweight='bold')
ax0.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='black', fontweight='bold')
cax1 = ax1.imshow(pi_phase_STE_image, cmap=plt.cm.gray,
interpolation='nearest')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax1, cax = cax)
ax1.text(
12, 14 ,r'$\Delta n\approx +0.05$',
fontsize=38, color='black', fontweight='bold')
ax1.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='black', fontweight='bold')
ax1.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='black', fontweight='bold')
ax1.set_title('Change due to excitation',fontsize=30,)
ax1.axis('off')
plt.savefig('./../images/figure_3/STE_crimson_bead_pi_phase.svg',
bbox_inches='tight', pad_inches=0.1)
plt.show()
fig, (ax0, ax1) = plt.subplots(nrows=2,ncols=1,figsize=(20,7))
cax0 = ax0.imshow(zero_phase_bead_image, cmap=plt.cm.gray,
interpolation='nearest', vmin=-2300)
ax0.axis('off')
divider = make_axes_locatable(ax0)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax0, cax = cax)
ax0.set_title('Phase contrast image of scattered light from bead',fontsize=30)
ax0.text(
12, 14, r'$\Delta n\approx +0.05$',
fontsize=38, color='white', fontweight='bold')
ax0.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='white', fontweight='bold')
ax0.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='white', fontweight='bold')
cax1 = ax1.imshow(zero_phase_STE_image, cmap=plt.cm.gray,
interpolation='nearest')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right",size="1%",pad=0.25)
plt.colorbar(cax1, cax = cax)
ax1.text(
12, 14, r'$\Delta n\approx +0.05$',
fontsize=38, color='white', fontweight='bold')
ax1.text(
53, 14, r'$\Delta n\approx 0$',
fontsize=38, color='white', fontweight='bold')
ax1.text(
79, 14, r'$\Delta n\approx -0.01$',
fontsize=38, color='white', fontweight='bold')
ax1.set_title('Change due to excitation',fontsize=30)
ax1.axis('off')
plt.savefig('./../images/figure_3/STE_crimson_bead_zero_phase.svg',
bbox_inches='tight', pad_inches=0.1)
plt.show()
return None
def get_bg_level(data):
num_regions = 2
# region 1
bg_up = 2
bg_down = 120
bg_left = 285
bg_right = 379
bg_level = data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
# region 2
bg_up = 2
bg_down = 120
bg_left = 1
bg_right = 81
bg_level += data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
return(bg_level / num_regions)
main()
|
[
"mpl_toolkits.axes_grid1.make_axes_locatable",
"os.mkdir",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"numpy.amin",
"os.path.isdir",
"scipy.ndimage.gaussian_filter",
"numpy.zeros",
"matplotlib.pyplot.colorbar",
"numpy.amax",
"stack_registration.bucket",
"matplotlib.pyplot.subplots",
"np_tif.tif_to_array",
"numpy.concatenate"
] |
[((244, 272), 'os.path.isdir', 'os.path.isdir', (['"""./../images"""'], {}), "('./../images')\n", (257, 272), False, 'import os\n'), ((3460, 3511), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['STE_stack'], {'sigma': '(0, sigma, sigma)'}), '(STE_stack, sigma=(0, sigma, sigma))\n', (3475, 3511), False, 'from scipy.ndimage import gaussian_filter\n'), ((3531, 3584), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['phase_stack'], {'sigma': '(0, sigma, sigma)'}), '(phase_stack, sigma=(0, sigma, sigma))\n', (3546, 3584), False, 'from scipy.ndimage import gaussian_filter\n'), ((7509, 7560), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['STE_stack'], {'sigma': '(0, sigma, sigma)'}), '(STE_stack, sigma=(0, sigma, sigma))\n', (7524, 7560), False, 'from scipy.ndimage import gaussian_filter\n'), ((7580, 7633), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['phase_stack'], {'sigma': '(0, sigma, sigma)'}), '(phase_stack, sigma=(0, sigma, sigma))\n', (7595, 7633), False, 'from scipy.ndimage import gaussian_filter\n'), ((11556, 11607), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['STE_stack'], {'sigma': '(0, sigma, sigma)'}), '(STE_stack, sigma=(0, sigma, sigma))\n', (11571, 11607), False, 'from scipy.ndimage import gaussian_filter\n'), ((11627, 11680), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['phase_stack'], {'sigma': '(0, sigma, sigma)'}), '(phase_stack, sigma=(0, sigma, sigma))\n', (11642, 11680), False, 'from scipy.ndimage import gaussian_filter\n'), ((13022, 13226), 'numpy.concatenate', 'np.concatenate', (['(n_mix_zero_phase_bead_image, n_1_53_zero_phase_bead_image,\n n_1_61_zero_phase_bead_image, n_mix_pi_phase_bead_image,\n n_1_53_pi_phase_bead_image, n_1_61_pi_phase_bead_image)'], {'axis': '(0)'}), '((n_mix_zero_phase_bead_image, n_1_53_zero_phase_bead_image,\n n_1_61_zero_phase_bead_image, n_mix_pi_phase_bead_image,\n n_1_53_pi_phase_bead_image, n_1_61_pi_phase_bead_image), axis=0)\n', (13036, 13226), True, 'import numpy as np\n'), ((13289, 13487), 'numpy.concatenate', 'np.concatenate', (['(n_mix_zero_phase_STE_image, n_1_53_zero_phase_STE_image,\n n_1_61_zero_phase_STE_image, n_mix_pi_phase_STE_image,\n n_1_53_pi_phase_STE_image, n_1_61_pi_phase_STE_image)'], {'axis': '(0)'}), '((n_mix_zero_phase_STE_image, n_1_53_zero_phase_STE_image,\n n_1_61_zero_phase_STE_image, n_mix_pi_phase_STE_image,\n n_1_53_pi_phase_STE_image, n_1_61_pi_phase_STE_image), axis=0)\n', (13303, 13487), True, 'import numpy as np\n'), ((16724, 16771), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'figsize': '(20, 7)'}), '(nrows=2, ncols=1, figsize=(20, 7))\n', (16736, 16771), True, 'import matplotlib.pyplot as plt\n'), ((16940, 16964), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax0'], {}), '(ax0)\n', (16959, 16964), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((17029, 17056), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cax0'], {'cax': 'cax'}), '(cax0, cax=cax)\n', (17041, 17056), True, 'import matplotlib.pyplot as plt\n'), ((17621, 17645), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax1'], {}), '(ax1)\n', (17640, 17645), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((17710, 17737), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cax1'], {'cax': 'cax'}), '(cax1, cax=cax)\n', (17722, 17737), True, 'import matplotlib.pyplot as plt\n'), ((18170, 18276), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./../images/figure_3/STE_crimson_bead_pi_phase.svg"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "('./../images/figure_3/STE_crimson_bead_pi_phase.svg',\n bbox_inches='tight', pad_inches=0.1)\n", (18181, 18276), True, 'import matplotlib.pyplot as plt\n'), ((18295, 18305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18303, 18305), True, 'import matplotlib.pyplot as plt\n'), ((18331, 18378), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'figsize': '(20, 7)'}), '(nrows=2, ncols=1, figsize=(20, 7))\n', (18343, 18378), True, 'import matplotlib.pyplot as plt\n'), ((18538, 18562), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax0'], {}), '(ax0)\n', (18557, 18562), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((18627, 18654), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cax0'], {'cax': 'cax'}), '(cax0, cax=cax)\n', (18639, 18654), True, 'import matplotlib.pyplot as plt\n'), ((19221, 19245), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax1'], {}), '(ax1)\n', (19240, 19245), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((19310, 19337), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cax1'], {'cax': 'cax'}), '(cax1, cax=cax)\n', (19322, 19337), True, 'import matplotlib.pyplot as plt\n'), ((19769, 19877), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./../images/figure_3/STE_crimson_bead_zero_phase.svg"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "('./../images/figure_3/STE_crimson_bead_zero_phase.svg',\n bbox_inches='tight', pad_inches=0.1)\n", (19780, 19877), True, 'import matplotlib.pyplot as plt\n'), ((19896, 19906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19904, 19906), True, 'import matplotlib.pyplot as plt\n'), ((285, 322), 'os.path.isdir', 'os.path.isdir', (['"""./../images/figure_3"""'], {}), "('./../images/figure_3')\n", (298, 322), False, 'import os\n'), ((333, 365), 'os.mkdir', 'os.mkdir', (['"""./../images/figure_3"""'], {}), "('./../images/figure_3')\n", (341, 365), False, 'import os\n'), ((4093, 4147), 'stack_registration.bucket', 'bucket', (['phase_cropped', '(1, bucket_width, bucket_width)'], {}), '(phase_cropped, (1, bucket_width, bucket_width))\n', (4099, 4147), False, 'from stack_registration import bucket\n'), ((4195, 4247), 'stack_registration.bucket', 'bucket', (['STE_cropped', '(1, bucket_width, bucket_width)'], {}), '(STE_cropped, (1, bucket_width, bucket_width))\n', (4201, 4247), False, 'from stack_registration import bucket\n'), ((8090, 8144), 'stack_registration.bucket', 'bucket', (['phase_cropped', '(1, bucket_width, bucket_width)'], {}), '(phase_cropped, (1, bucket_width, bucket_width))\n', (8096, 8144), False, 'from stack_registration import bucket\n'), ((8192, 8244), 'stack_registration.bucket', 'bucket', (['STE_cropped', '(1, bucket_width, bucket_width)'], {}), '(STE_cropped, (1, bucket_width, bucket_width))\n', (8198, 8244), False, 'from stack_registration import bucket\n'), ((12137, 12191), 'stack_registration.bucket', 'bucket', (['phase_cropped', '(1, bucket_width, bucket_width)'], {}), '(phase_cropped, (1, bucket_width, bucket_width))\n', (12143, 12191), False, 'from stack_registration import bucket\n'), ((12239, 12291), 'stack_registration.bucket', 'bucket', (['STE_cropped', '(1, bucket_width, bucket_width)'], {}), '(STE_cropped, (1, bucket_width, bucket_width))\n', (12245, 12291), False, 'from stack_registration import bucket\n'), ((15124, 15153), 'numpy.zeros', 'np.zeros', (['(height, big_width)'], {}), '((height, big_width))\n', (15132, 15153), True, 'import numpy as np\n'), ((15196, 15225), 'numpy.zeros', 'np.zeros', (['(height, big_width)'], {}), '((height, big_width))\n', (15204, 15225), True, 'import numpy as np\n'), ((15413, 15442), 'numpy.zeros', 'np.zeros', (['(height, big_width)'], {}), '((height, big_width))\n', (15421, 15442), True, 'import numpy as np\n'), ((15484, 15513), 'numpy.zeros', 'np.zeros', (['(height, big_width)'], {}), '((height, big_width))\n', (15492, 15513), True, 'import numpy as np\n'), ((481, 631), 'np_tif.tif_to_array', 'np_tif.tif_to_array', (["('./../../stimulated_emission_imaging-data' +\n '/2018_02_23_STE_phase_cr_bead_4' + '/dataset_green_1010mW_single_shot.tif'\n )"], {}), "('./../../stimulated_emission_imaging-data' +\n '/2018_02_23_STE_phase_cr_bead_4' + '/dataset_green_1010mW_single_shot.tif'\n )\n", (500, 631), False, 'import np_tif\n'), ((4909, 5064), 'np_tif.tif_to_array', 'np_tif.tif_to_array', (["('./../../stimulated_emission_imaging-data' +\n '/2018_02_27_STE_phase_n_1_54_cr_bead_0' +\n '/dataset_green_970mW_single_shot.tif')"], {}), "('./../../stimulated_emission_imaging-data' +\n '/2018_02_27_STE_phase_n_1_54_cr_bead_0' +\n '/dataset_green_970mW_single_shot.tif')\n", (4928, 5064), False, 'import np_tif\n'), ((8911, 9067), 'np_tif.tif_to_array', 'np_tif.tif_to_array', (["('./../../stimulated_emission_imaging-data' +\n '/2018_02_26_STE_phase_n_1_61_cr_bead_0' +\n '/dataset_green_1060mW_single_shot.tif')"], {}), "('./../../stimulated_emission_imaging-data' +\n '/2018_02_26_STE_phase_n_1_61_cr_bead_0' +\n '/dataset_green_1060mW_single_shot.tif')\n", (8930, 9067), False, 'import np_tif\n'), ((13556, 13574), 'numpy.amax', 'np.amax', (['all_phase'], {}), '(all_phase)\n', (13563, 13574), True, 'import numpy as np\n'), ((13601, 13619), 'numpy.amin', 'np.amin', (['all_phase'], {}), '(all_phase)\n', (13608, 13619), True, 'import numpy as np\n'), ((13644, 13660), 'numpy.amax', 'np.amax', (['all_STE'], {}), '(all_STE)\n', (13651, 13660), True, 'import numpy as np\n'), ((13685, 13701), 'numpy.amin', 'np.amin', (['all_STE'], {}), '(all_STE)\n', (13692, 13701), True, 'import numpy as np\n')]
|
import copy
import numpy as np
import time
import matplotlib.pyplot as plt
import memory_profiler
from floris.simulation import Floris
from conftest import SampleInputs
def time_profile(input_dict):
floris = Floris.from_dict(input_dict.floris)
start = time.perf_counter()
floris.steady_state_atmospheric_condition()
end = time.perf_counter()
return end - start
def internal_probe(input_dict):
floris = Floris(input_dict=input_dict.floris)
internal_quantity = floris.steady_state_atmospheric_condition()
return internal_quantity
def memory_profile(input_dict):
floris = Floris(input_dict=input_dict.floris)
mem_usage = memory_profiler.memory_usage(
(floris.steady_state_atmospheric_condition, (), {}),
max_usage=True
)
return mem_usage
if __name__=="__main__":
sample_inputs = SampleInputs()
TURBINE_DIAMETER = sample_inputs.floris["turbine"]["rotor_diameter"]
# Use Gauss models
sample_inputs.floris["wake"]["model_strings"] = {
"velocity_model": "gauss",
"deflection_model": "gauss",
"combination_model": None,
"turbulence_model": None,
}
### Time scaling
# N = 30
# wd_calc_time = np.zeros(N)
# wd_size = np.zeros(N)
# wind_direction_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_direction_scaling_inputs.floris["flow_field"]["wind_directions"] = factor * [270.0]
# wind_direction_scaling_inputs.floris["flow_field"]["wind_speeds"] = [8.0]
# wd_calc_time[i] = time_profile(copy.deepcopy(wind_direction_scaling_inputs))
# wd_size[i] = factor
# print("wind direction", i, wd_calc_time[i])
# ws_calc_time = np.zeros(N)
# ws_size = np.zeros(N)
# wind_speed_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_speed_scaling_inputs.floris["flow_field"]["wind_directions"] = [270.0]
# wind_speed_scaling_inputs.floris["flow_field"]["wind_speeds"] = factor * [8.0]
# ws_calc_time[i] = time_profile(copy.deepcopy(wind_speed_scaling_inputs))
# ws_size[i] = factor
# print("wind speed", i, ws_calc_time[i])
# turb_calc_time = np.zeros(N)
# turb_size = np.zeros(N)
# turbine_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 3
# turbine_scaling_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(factor)]
# turbine_scaling_inputs.floris["farm"]["layout_y"] = factor * [0.0]
# turb_calc_time[i] = time_profile(copy.deepcopy(turbine_scaling_inputs))
# turb_size[i] = factor
# print("n turbine", i, turb_calc_time[i])
# internal_quantity = np.zeros(N)
# scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(5):
# factor = (i+1) * 2
# scaling_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(factor)]
# scaling_inputs.floris["farm"]["layout_y"] = factor * [0.0]
# factor = (i+1) * 20
# scaling_inputs.floris["flow_field"]["wind_directions"] = factor * [270.0]
# scaling_inputs.floris["flow_field"]["wind_speeds"] = factor * [8.0]
# internal_quantity[i] = time_profile(scaling_inputs)
# print("n turbine", i, internal_quantity[i])
# plt.figure()
# plt.plot(wd_size, wd_calc_time, 'b+-', label='wind direction')
# plt.plot(ws_size, ws_calc_time, 'g+-', label='wind speed')
# plt.plot(turb_size, turb_calc_time, 'r+-', label='n turbine')
# # plt.plot(simulation_size, internal_quantity, 'b+-', label='internal quantity')
# plt.legend(loc="upper left")
# plt.grid(True)
### Timing larger sizes in each dimension
n_wind_directions = 1
n_wind_speeds = 1
n_turbines = 3
sample_inputs.floris["wake"]["model_strings"] = {
# "velocity_model": "jensen",
# "deflection_model": "jimenez",
"velocity_model": "cc",
"deflection_model": "gauss",
"combination_model": None,
"turbulence_model": None,
}
sample_inputs.floris["solver"] = {
"type": "turbine_grid",
"turbine_grid_points": 5
}
# sample_inputs.floris["wake"]["enable_transverse_velocities"] = False
# sample_inputs.floris["wake"]["enable_secondary_steering"] = False
# sample_inputs.floris["wake"]["enable_yaw_added_recovery"] = False
sample_inputs.floris["flow_field"]["wind_directions"] = n_wind_directions * [270.0]
sample_inputs.floris["flow_field"]["wind_speeds"] = n_wind_speeds * [8.0]
sample_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(n_turbines)]
sample_inputs.floris["farm"]["layout_y"] = n_turbines * [0.0]
N = 1
times = np.zeros(N)
for i in range(N):
print(f"Iteration {i}")
times[i] = time_profile(copy.deepcopy(sample_inputs))
print(f" {times[i]}")
print(f"Total time: {np.sum(times)}")
print(f"Average per iteration: { np.sum(times) / N }")
### Memory scaling
# N = 6
# simulation_size = np.arange(N)
# wd_space = np.zeros(N)
# wind_direction_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_direction_scaling_inputs.floris["farm"]["wind_directions"] = factor * [270.0]
# wind_direction_scaling_inputs.floris["farm"]["wind_speeds"] = [8.0]
# wd_space[i] = memory_profile(wind_direction_scaling_inputs)
# print("wind direction", i, wd_space[i])
# ws_space = np.zeros(N)
# wind_speed_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# wind_speed_scaling_inputs.floris["farm"]["wind_directions"] = [270.0]
# wind_speed_scaling_inputs.floris["farm"]["wind_speeds"] = factor * [8.0]
# ws_space[i] = memory_profile(wind_speed_scaling_inputs)
# print("wind speed", i, ws_space[i])
# turb_space = np.zeros(N)
# turbine_scaling_inputs = copy.deepcopy(sample_inputs)
# for i in range(N):
# factor = (i+1) * 50
# turbine_scaling_inputs.floris["farm"]["layout_x"] = [5 * TURBINE_DIAMETER * j for j in range(factor)]
# turbine_scaling_inputs.floris["farm"]["layout_y"] = factor * [0.0]
# turb_space[i] = memory_profile(turbine_scaling_inputs)
# print("n turbine", turb_space[i])
# # Remove the min from each test so that each starts at 0
# wd_space = wd_space - min(wd_space)
# ws_space = ws_space - min(ws_space)
# turb_space = turb_space - min(turb_space)
# plt.figure()
# plt.plot(simulation_size, wd_space, 'b+-', label='wind direction')
# plt.plot(simulation_size, ws_space, 'g+-', label='wind speed')
# plt.plot(simulation_size, turb_space, 'r+-', label='n turbine')
# plt.legend(loc="upper left")
# plt.grid(True)
### Show plots
# plt.show()
|
[
"copy.deepcopy",
"floris.simulation.Floris",
"floris.simulation.Floris.from_dict",
"numpy.sum",
"numpy.zeros",
"time.perf_counter",
"memory_profiler.memory_usage",
"conftest.SampleInputs"
] |
[((215, 250), 'floris.simulation.Floris.from_dict', 'Floris.from_dict', (['input_dict.floris'], {}), '(input_dict.floris)\n', (231, 250), False, 'from floris.simulation import Floris\n'), ((263, 282), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (280, 282), False, 'import time\n'), ((341, 360), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (358, 360), False, 'import time\n'), ((430, 466), 'floris.simulation.Floris', 'Floris', ([], {'input_dict': 'input_dict.floris'}), '(input_dict=input_dict.floris)\n', (436, 466), False, 'from floris.simulation import Floris\n'), ((610, 646), 'floris.simulation.Floris', 'Floris', ([], {'input_dict': 'input_dict.floris'}), '(input_dict=input_dict.floris)\n', (616, 646), False, 'from floris.simulation import Floris\n'), ((663, 764), 'memory_profiler.memory_usage', 'memory_profiler.memory_usage', (['(floris.steady_state_atmospheric_condition, (), {})'], {'max_usage': '(True)'}), '((floris.steady_state_atmospheric_condition, (),\n {}), max_usage=True)\n', (691, 764), False, 'import memory_profiler\n'), ((850, 864), 'conftest.SampleInputs', 'SampleInputs', ([], {}), '()\n', (862, 864), False, 'from conftest import SampleInputs\n'), ((4866, 4877), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4874, 4877), True, 'import numpy as np\n'), ((4965, 4993), 'copy.deepcopy', 'copy.deepcopy', (['sample_inputs'], {}), '(sample_inputs)\n', (4978, 4993), False, 'import copy\n'), ((5054, 5067), 'numpy.sum', 'np.sum', (['times'], {}), '(times)\n', (5060, 5067), True, 'import numpy as np\n'), ((5108, 5121), 'numpy.sum', 'np.sum', (['times'], {}), '(times)\n', (5114, 5121), True, 'import numpy as np\n')]
|
from math import floor, atan2, sqrt, pi
import numpy as np
from numba import cuda, void, float64, float32, complex128, complex64, int32
from ._spherical_harmonics import gen_sph
from ..plists import nlist
class ql:
def __init__(self, frame, ls=np.asarray([4, 6]), cell_guess=15, n_guess=10):
self.frame = frame
self.gpu = frame.gpu
self.ls = ls
self._n_m = int(2 * np.max(ls) + 1) # -l ~ 0 ~ l
self._n_l = ls.shape[0] # number of l
self.nlist = nlist(self.frame, contain_self=1,
cell_guess=cell_guess, n_guess=n_guess)
self.r_cut = frame.r_cut
self.dtype = self.frame.x.dtype
self.ql_local = None
self.ql_avg = None
global sphHar
sphHar = gen_sph(self.frame.x.dtype)
if self.dtype == np.dtype(np.float64):
self.float = float64
self.complex = complex128
else:
self.float = float32
self.complex = complex64
self.cu_ql_local = self._ql_local_func()
self.cu_ql_avg = self._ql_avg_func()
self.n_bonds = None
def update(self, x=None, box=None, rc=None, mode='all'):
if x is not None:
self.frame.x = x
if box is not None:
self.frame.box = box
if rc is not None:
self.frame.r_cut = rc
self.r_cut = rc
self.frame.update()
self.nlist.update()
self.calculate(mode)
def calculate(self, mode='all'):
with cuda.gpus[self.gpu]:
d_ls = cuda.to_device(self.ls)
device = cuda.get_current_device()
tpb = device.WARP_SIZE
bpg = int(np.ceil(self.frame.x.shape[0] / tpb))
if mode == 'all' or mode == 'local':
self.ql_local = np.zeros((self.frame.x.shape[0], self._n_l),
dtype=self.frame.x.dtype)
d_ql_local = cuda.to_device(self.ql_local)
self.cu_ql_local[bpg, tpb](
self.frame.d_x,
self.frame.d_box,
self.frame.r_cut,
self.nlist.d_nl,
self.nlist.d_nc,
d_ls,
d_ql_local
)
d_ql_local.copy_to_host(self.ql_local)
cuda.synchronize()
if mode == 'all' or mode == 'avg':
self.ql_avg = np.zeros(self.ls.shape[0])
q_vec_real = np.zeros((self.ls.shape[0], self._n_m),
dtype=self.frame.x.dtype)
q_vec_imag = np.zeros((self.ls.shape[0], self._n_m),
dtype=self.frame.x.dtype)
d_qvec_real = cuda.to_device(q_vec_real)
d_qvec_imag = cuda.to_device(q_vec_imag)
n_bonds = np.zeros(1, dtype=np.int32)
d_n_bonds = cuda.to_device(n_bonds)
self.cu_ql_avg[bpg, tpb](
self.frame.d_x,
self.frame.d_box,
self.frame.r_cut,
self.nlist.d_nl,
self.nlist.d_nc,
self.ls,
d_qvec_real,
d_qvec_imag,
d_n_bonds
)
d_n_bonds.copy_to_host(n_bonds)
d_qvec_real.copy_to_host(q_vec_real)
d_qvec_imag.copy_to_host(q_vec_imag)
cuda.synchronize()
q_vec = q_vec_real + 1j * q_vec_imag
self.n_bonds = n_bonds[0]
if self.n_bonds < 1.0:
self.n_bonds = 1.0
for i in range(q_vec.shape[0]):
tmp = 0
for j in range(q_vec.shape[1]):
tmp += abs(q_vec[i, j] / self.n_bonds) ** 2
self.ql_avg[i] = sqrt(tmp * 4 * np.pi / (2 * self.ls[i] + 1))
def _ql_local_func(self):
_qvi = (self._n_l, self._n_m)
_rei = (self._n_l,)
nb_complex = self.complex
nb_float = self.float
@cuda.jit(void(self.float[:, :], self.float[:], self.float,
int32[:, :], int32[:], int32[:], self.float[:, :]))
def _ql_local(x, box, rc, nl, nc, ls, ret):
i = cuda.grid(1)
if i >= x.shape[0]:
return
q_vec_i = cuda.local.array(_qvi, nb_complex)
res_i = cuda.local.array(_rei, nb_float)
for _ in range(q_vec_i.shape[0]):
res_i[_] = 0
for __ in range(q_vec_i.shape[1]):
q_vec_i[_, __] = 0 + 0j
nn = 0.0
for j in range(nc[i] - 1):
pj = nl[i, j]
for k in range(j + 1, nc[i]):
pk = nl[i, k]
dx = x[pk, 0] - x[pj, 0]
dy = x[pk, 1] - x[pj, 1]
dz = x[pk, 2] - x[pj, 2]
dx = dx - box[0] * floor(dx / box[0] + 0.5)
dy = dy - box[1] * floor(dy / box[1] + 0.5)
dz = dz - box[2] * floor(dz / box[2] + 0.5)
dr = sqrt(dx ** 2 + dy ** 2 + dz ** 2)
if dr >= rc:
continue
nn += 1.0
phi = atan2(dy, dx)
if phi < 0:
phi = phi + 2 * pi
cosTheta = dz / dr
for _l in range(ls.shape[0]):
l = ls[_l]
for m in range(-l, l + 1):
q_vec_i[_l, m + l] += sphHar(l, m, cosTheta, phi)
# print(i, nn)
if nn < 1.0:
nn = 1.0
for _ in range(q_vec_i.shape[0]):
for __ in range(q_vec_i.shape[1]):
res_i[_] += abs(q_vec_i[_, __] / nn) ** 2
for _ in range(q_vec_i.shape[0]):
ret[i, _] = sqrt(res_i[_] * 4 * pi / (2 * ls[_] + 1))
return _ql_local
def _ql_avg_func(self):
@cuda.jit(
void(self.float[:, :], self.float[:], self.float, int32[:, :], int32[:],
int32[:], self.float[:, :], self.float[:, :], int32[:]))
def _ql_avg(x, box, rc, nl, nc, ls, q_vec_real, q_vec_imag, n_bonds):
i = cuda.grid(1)
if i >= x.shape[0]:
return
nn = 0
for j in range(nc[i]):
pj = nl[i, j]
if pj <= i:
continue
dx = x[pj, 0] - x[i, 0]
dy = x[pj, 1] - x[i, 1]
dz = x[pj, 2] - x[i, 2]
dx = dx - box[0] * floor(dx / box[0] + 0.5)
dy = dy - box[1] * floor(dy / box[1] + 0.5)
dz = dz - box[2] * floor(dz / box[2] + 0.5)
dr = sqrt(dx ** 2 + dy ** 2 + dz ** 2)
if dr >= rc:
continue
nn += 1
phi = atan2(dy, dx)
if phi < 0:
phi = phi + 2 * pi
cosTheta = dz / dr
for _l in range(ls.shape[0]):
l = ls[_l]
for m in range(-l, l + 1):
tmp = sphHar(l, m, cosTheta, phi)
cuda.atomic.add(q_vec_real[_l], m + l, tmp.real)
cuda.atomic.add(q_vec_imag[_l], m + l, tmp.imag)
# use very small arrays.
# qvec[i, _l, m + l] += sphHar(l, m, cosTheta, phi) # thread-safe
cuda.atomic.add(n_bonds, 0, nn)
return _ql_avg
|
[
"numba.void",
"numpy.ceil",
"math.sqrt",
"math.atan2",
"numpy.asarray",
"numpy.dtype",
"numba.cuda.get_current_device",
"numba.cuda.to_device",
"numba.cuda.atomic.add",
"numpy.zeros",
"math.floor",
"numba.cuda.local.array",
"numpy.max",
"numba.cuda.grid",
"numba.cuda.synchronize"
] |
[((252, 270), 'numpy.asarray', 'np.asarray', (['[4, 6]'], {}), '([4, 6])\n', (262, 270), True, 'import numpy as np\n'), ((825, 845), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (833, 845), True, 'import numpy as np\n'), ((1567, 1590), 'numba.cuda.to_device', 'cuda.to_device', (['self.ls'], {}), '(self.ls)\n', (1581, 1590), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((1612, 1637), 'numba.cuda.get_current_device', 'cuda.get_current_device', ([], {}), '()\n', (1635, 1637), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((4354, 4366), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (4363, 4366), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((4444, 4478), 'numba.cuda.local.array', 'cuda.local.array', (['_qvi', 'nb_complex'], {}), '(_qvi, nb_complex)\n', (4460, 4478), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((4499, 4531), 'numba.cuda.local.array', 'cuda.local.array', (['_rei', 'nb_float'], {}), '(_rei, nb_float)\n', (4515, 4531), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((4161, 4265), 'numba.void', 'void', (['self.float[:, :]', 'self.float[:]', 'self.float', 'int32[:, :]', 'int32[:]', 'int32[:]', 'self.float[:, :]'], {}), '(self.float[:, :], self.float[:], self.float, int32[:, :], int32[:],\n int32[:], self.float[:, :])\n', (4165, 4265), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((6402, 6414), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (6411, 6414), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((7666, 7697), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['n_bonds', '(0)', 'nn'], {}), '(n_bonds, 0, nn)\n', (7681, 7697), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((6161, 6293), 'numba.void', 'void', (['self.float[:, :]', 'self.float[:]', 'self.float', 'int32[:, :]', 'int32[:]', 'int32[:]', 'self.float[:, :]', 'self.float[:, :]', 'int32[:]'], {}), '(self.float[:, :], self.float[:], self.float, int32[:, :], int32[:],\n int32[:], self.float[:, :], self.float[:, :], int32[:])\n', (6165, 6293), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((1695, 1731), 'numpy.ceil', 'np.ceil', (['(self.frame.x.shape[0] / tpb)'], {}), '(self.frame.x.shape[0] / tpb)\n', (1702, 1731), True, 'import numpy as np\n'), ((1814, 1884), 'numpy.zeros', 'np.zeros', (['(self.frame.x.shape[0], self._n_l)'], {'dtype': 'self.frame.x.dtype'}), '((self.frame.x.shape[0], self._n_l), dtype=self.frame.x.dtype)\n', (1822, 1884), True, 'import numpy as np\n'), ((1955, 1984), 'numba.cuda.to_device', 'cuda.to_device', (['self.ql_local'], {}), '(self.ql_local)\n', (1969, 1984), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((2361, 2379), 'numba.cuda.synchronize', 'cuda.synchronize', ([], {}), '()\n', (2377, 2379), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((2457, 2483), 'numpy.zeros', 'np.zeros', (['self.ls.shape[0]'], {}), '(self.ls.shape[0])\n', (2465, 2483), True, 'import numpy as np\n'), ((2513, 2578), 'numpy.zeros', 'np.zeros', (['(self.ls.shape[0], self._n_m)'], {'dtype': 'self.frame.x.dtype'}), '((self.ls.shape[0], self._n_m), dtype=self.frame.x.dtype)\n', (2521, 2578), True, 'import numpy as np\n'), ((2646, 2711), 'numpy.zeros', 'np.zeros', (['(self.ls.shape[0], self._n_m)'], {'dtype': 'self.frame.x.dtype'}), '((self.ls.shape[0], self._n_m), dtype=self.frame.x.dtype)\n', (2654, 2711), True, 'import numpy as np\n'), ((2780, 2806), 'numba.cuda.to_device', 'cuda.to_device', (['q_vec_real'], {}), '(q_vec_real)\n', (2794, 2806), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((2837, 2863), 'numba.cuda.to_device', 'cuda.to_device', (['q_vec_imag'], {}), '(q_vec_imag)\n', (2851, 2863), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((2890, 2917), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.int32'}), '(1, dtype=np.int32)\n', (2898, 2917), True, 'import numpy as np\n'), ((2946, 2969), 'numba.cuda.to_device', 'cuda.to_device', (['n_bonds'], {}), '(n_bonds)\n', (2960, 2969), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((3511, 3529), 'numba.cuda.synchronize', 'cuda.synchronize', ([], {}), '()\n', (3527, 3529), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((6032, 6073), 'math.sqrt', 'sqrt', (['(res_i[_] * 4 * pi / (2 * ls[_] + 1))'], {}), '(res_i[_] * 4 * pi / (2 * ls[_] + 1))\n', (6036, 6073), False, 'from math import floor, atan2, sqrt, pi\n'), ((6932, 6965), 'math.sqrt', 'sqrt', (['(dx ** 2 + dy ** 2 + dz ** 2)'], {}), '(dx ** 2 + dy ** 2 + dz ** 2)\n', (6936, 6965), False, 'from math import floor, atan2, sqrt, pi\n'), ((7070, 7083), 'math.atan2', 'atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (7075, 7083), False, 'from math import floor, atan2, sqrt, pi\n'), ((405, 415), 'numpy.max', 'np.max', (['ls'], {}), '(ls)\n', (411, 415), True, 'import numpy as np\n'), ((3936, 3980), 'math.sqrt', 'sqrt', (['(tmp * 4 * np.pi / (2 * self.ls[i] + 1))'], {}), '(tmp * 4 * np.pi / (2 * self.ls[i] + 1))\n', (3940, 3980), False, 'from math import floor, atan2, sqrt, pi\n'), ((5224, 5257), 'math.sqrt', 'sqrt', (['(dx ** 2 + dy ** 2 + dz ** 2)'], {}), '(dx ** 2 + dy ** 2 + dz ** 2)\n', (5228, 5257), False, 'from math import floor, atan2, sqrt, pi\n'), ((5380, 5393), 'math.atan2', 'atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (5385, 5393), False, 'from math import floor, atan2, sqrt, pi\n'), ((6766, 6790), 'math.floor', 'floor', (['(dx / box[0] + 0.5)'], {}), '(dx / box[0] + 0.5)\n', (6771, 6790), False, 'from math import floor, atan2, sqrt, pi\n'), ((6826, 6850), 'math.floor', 'floor', (['(dy / box[1] + 0.5)'], {}), '(dy / box[1] + 0.5)\n', (6831, 6850), False, 'from math import floor, atan2, sqrt, pi\n'), ((6886, 6910), 'math.floor', 'floor', (['(dz / box[2] + 0.5)'], {}), '(dz / box[2] + 0.5)\n', (6891, 6910), False, 'from math import floor, atan2, sqrt, pi\n'), ((7392, 7440), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['q_vec_real[_l]', '(m + l)', 'tmp.real'], {}), '(q_vec_real[_l], m + l, tmp.real)\n', (7407, 7440), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((7465, 7513), 'numba.cuda.atomic.add', 'cuda.atomic.add', (['q_vec_imag[_l]', '(m + l)', 'tmp.imag'], {}), '(q_vec_imag[_l], m + l, tmp.imag)\n', (7480, 7513), False, 'from numba import cuda, void, float64, float32, complex128, complex64, int32\n'), ((5046, 5070), 'math.floor', 'floor', (['(dx / box[0] + 0.5)'], {}), '(dx / box[0] + 0.5)\n', (5051, 5070), False, 'from math import floor, atan2, sqrt, pi\n'), ((5110, 5134), 'math.floor', 'floor', (['(dy / box[1] + 0.5)'], {}), '(dy / box[1] + 0.5)\n', (5115, 5134), False, 'from math import floor, atan2, sqrt, pi\n'), ((5174, 5198), 'math.floor', 'floor', (['(dz / box[2] + 0.5)'], {}), '(dz / box[2] + 0.5)\n', (5179, 5198), False, 'from math import floor, atan2, sqrt, pi\n')]
|
import numpy as np
import numba
from src.data import Problem, Case, Matter
from src.operator.solver.common.shape import is_same
@numba.jit('i8(i8[:, :], i8)', nopython=True)
def find_periodicity_row(x_arr, background):
"""
:param x_arr: np.array(int)
:param background: int
:return: int, minimum period
"""
for p in range(1, x_arr.shape[0]):
res = True
i = 0
while i < p and res:
for j in range(x_arr.shape[1]):
colors = np.unique(x_arr[i::p, j])
if len(colors) >= 3:
res = False
break
elif colors.shape[0] == 2 and background not in list(colors):
res = False
break
i += 1
if res:
return p
return x_arr.shape[0]
@numba.jit('i8[:, :](i8[:, :], i8, i8)', nopython=True)
def fill_periodicity_row(x_arr, p, background):
"""
:param x_arr: np.array(int)
:param p: period
:param background: int
:return: np.array(int), filled array
"""
# assertion
assert x_arr.shape[0] > 0 and x_arr.shape[1] > 0
# trivial case
if p == x_arr.shape[0]:
return x_arr.copy()
y_arr = x_arr.copy()
for i in range(p):
for j in range(x_arr.shape[1]):
v = background
for a in x_arr[i::p, j]:
if a != background:
v = a
y_arr[i::p, j] = v
return y_arr
class AutoFillRowColPeriodicity:
def __init__(self):
pass
@classmethod
def find_periodicity_col(cls, x_arr, background=0):
"""
:param x_arr: np.array(int)
:param background: int
:return: int, minimum period
"""
return find_periodicity_row(x_arr.transpose(), background)
@classmethod
def auto_fill_row(cls, x_arr, background=0):
"""
:param x_arr: np.array(int), must be >= 0 otherwise returns x_arr.copy()
:param background: int
:return: np.array(int), filled array in row_wise
"""
p_row = find_periodicity_row(x_arr, background)
return fill_periodicity_row(x_arr, p_row, background)
@classmethod
def auto_fill_col(cls, x_arr, background=0):
"""
:param x_arr: np.array(int), must be >= 0 otherwise returns x_arr.copy()
:param background: int
:return: np.array(int), filled array in col_wise
"""
return cls.auto_fill_row(x_arr.transpose(), background).transpose()
@classmethod
def auto_fill_row_col(cls, x_arr, background=0):
"""
:param x_arr: np.array(int), must be >= 0 otherwise returns x_arr.copy()
:param background: int
:return: np.array(int), filled array in row_wise and col_wise, row first
"""
y_arr = x_arr.copy()
iter_times = 0
while iter_times < 10000:
z_arr, y_arr = y_arr, cls.auto_fill_row(y_arr, background)
z_arr, y_arr = y_arr, cls.auto_fill_col(y_arr, background)
if np.abs(z_arr - y_arr).sum() == 0:
return z_arr
iter_times += 1
assert iter_times == -1 # break by assertion error
return None
@classmethod
def auto_fill_row_col_background(cls, x_arr):
cost = 10000
background_res = 0
for background in range(10):
z_arr = cls.auto_fill_row_col(x_arr, background)
cost_temp = find_periodicity_row(z_arr, background) * cls.find_periodicity_col(z_arr, background)
if cost_temp < cost:
cost = cost_temp
background_res = background
return cls.auto_fill_row_col(x_arr, background_res)
@classmethod
def case_row(cls, c: Case) -> Case:
new_case = c.copy()
new_values = cls.auto_fill_row(c.repr_values(), c.background_color)
new_case.matter_list = [Matter(new_values, background_color=c.background_color, new=True)]
return new_case
@classmethod
def case_col(cls, c: Case) -> Case:
new_case = c.copy()
new_values = cls.auto_fill_col(c.repr_values(), c.background_color)
new_case.matter_list = [Matter(new_values, background_color=c.background_color, new=True)]
return new_case
@classmethod
def case_row_col(cls, c: Case) -> Case:
new_case = c.copy()
new_values = cls.auto_fill_row_col(c.repr_values(), c.background_color)
new_case.matter_list = [Matter(new_values, background_color=c.background_color, new=True)]
return new_case
@classmethod
def problem(cls, p: Problem) -> Problem:
assert is_same(p)
if p.is_periodic_row and p.is_periodic_col:
q: Problem = p.copy()
q.train_x_list = [cls.case_row_col(c) for c in p.train_x_list]
q.test_x_list = [cls.case_row_col(c) for c in p.test_x_list]
elif p.is_periodic_row:
q: Problem = p.copy()
q.train_x_list = [cls.case_row(c) for c in p.train_x_list]
q.test_x_list = [cls.case_row(c) for c in p.test_x_list]
elif p.is_periodic_col:
q: Problem = p.copy()
q.train_x_list = [cls.case_col(c) for c in p.train_x_list]
q.test_x_list = [cls.case_col(c) for c in p.test_x_list]
else:
raise AssertionError
return q
if __name__ == "__main__":
x = np.ones((5, 3), dtype=np.int)
print(find_periodicity_row(x, 0))
x[1, :] = 2
print(find_periodicity_row(x, 0))
x[3, :] = 2
print(find_periodicity_row(x, 0))
print(AutoFillRowColPeriodicity.find_periodicity_col(x))
x = np.zeros((5, 3), dtype=np.int)
print(find_periodicity_row(x, 0))
print(find_periodicity_row(x, -1))
x[1, :] = 2
print(find_periodicity_row(x, 0))
print(find_periodicity_row(x, -1))
x[3, :] = 2
print(find_periodicity_row(x, 0))
print(find_periodicity_row(x, -1))
print(AutoFillRowColPeriodicity.find_periodicity_col(x))
print(AutoFillRowColPeriodicity.find_periodicity_col(x, -1))
x = np.zeros((5, 3), dtype=np.int)
x[1, :] = 2
print(fill_periodicity_row(x, 2, 0))
print(fill_periodicity_row(x, 3, 0))
x = np.zeros((5, 3), dtype=np.int)
x[:2, :2] = 1
x[1, 1] = 2
print(find_periodicity_row(x, 0))
print(fill_periodicity_row(x, 2, 0))
print(AutoFillRowColPeriodicity.auto_fill_row(x))
print(AutoFillRowColPeriodicity.auto_fill_row_col(x))
x = np.ones((5, 3), dtype=np.int)
x[1, :] = 3
x[3:, :] = 5
print(x)
print(AutoFillRowColPeriodicity.auto_fill_row_col_background(x))
|
[
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"src.operator.solver.common.shape.is_same",
"src.data.Matter",
"numba.jit",
"numpy.unique"
] |
[((131, 175), 'numba.jit', 'numba.jit', (['"""i8(i8[:, :], i8)"""'], {'nopython': '(True)'}), "('i8(i8[:, :], i8)', nopython=True)\n", (140, 175), False, 'import numba\n'), ((843, 897), 'numba.jit', 'numba.jit', (['"""i8[:, :](i8[:, :], i8, i8)"""'], {'nopython': '(True)'}), "('i8[:, :](i8[:, :], i8, i8)', nopython=True)\n", (852, 897), False, 'import numba\n'), ((5445, 5474), 'numpy.ones', 'np.ones', (['(5, 3)'], {'dtype': 'np.int'}), '((5, 3), dtype=np.int)\n', (5452, 5474), True, 'import numpy as np\n'), ((5690, 5720), 'numpy.zeros', 'np.zeros', (['(5, 3)'], {'dtype': 'np.int'}), '((5, 3), dtype=np.int)\n', (5698, 5720), True, 'import numpy as np\n'), ((6119, 6149), 'numpy.zeros', 'np.zeros', (['(5, 3)'], {'dtype': 'np.int'}), '((5, 3), dtype=np.int)\n', (6127, 6149), True, 'import numpy as np\n'), ((6256, 6286), 'numpy.zeros', 'np.zeros', (['(5, 3)'], {'dtype': 'np.int'}), '((5, 3), dtype=np.int)\n', (6264, 6286), True, 'import numpy as np\n'), ((6521, 6550), 'numpy.ones', 'np.ones', (['(5, 3)'], {'dtype': 'np.int'}), '((5, 3), dtype=np.int)\n', (6528, 6550), True, 'import numpy as np\n'), ((4687, 4697), 'src.operator.solver.common.shape.is_same', 'is_same', (['p'], {}), '(p)\n', (4694, 4697), False, 'from src.operator.solver.common.shape import is_same\n'), ((3940, 4005), 'src.data.Matter', 'Matter', (['new_values'], {'background_color': 'c.background_color', 'new': '(True)'}), '(new_values, background_color=c.background_color, new=True)\n', (3946, 4005), False, 'from src.data import Problem, Case, Matter\n'), ((4225, 4290), 'src.data.Matter', 'Matter', (['new_values'], {'background_color': 'c.background_color', 'new': '(True)'}), '(new_values, background_color=c.background_color, new=True)\n', (4231, 4290), False, 'from src.data import Problem, Case, Matter\n'), ((4518, 4583), 'src.data.Matter', 'Matter', (['new_values'], {'background_color': 'c.background_color', 'new': '(True)'}), '(new_values, background_color=c.background_color, new=True)\n', (4524, 4583), False, 'from src.data import Problem, Case, Matter\n'), ((501, 526), 'numpy.unique', 'np.unique', (['x_arr[i::p, j]'], {}), '(x_arr[i::p, j])\n', (510, 526), True, 'import numpy as np\n'), ((3081, 3102), 'numpy.abs', 'np.abs', (['(z_arr - y_arr)'], {}), '(z_arr - y_arr)\n', (3087, 3102), True, 'import numpy as np\n')]
|
# Code is from OpenAI Baseline and Tensor2Tensor
import itertools
import numpy as np
from gym.envs.box2d import CarRacing
import multiprocessing as mp
def printstar(string, num_stars=50):
print("*" * num_stars)
print(string)
print("*" * num_stars)
def make_env():
def _thunk():
env = CarRacing(grayscale=0, show_info_panel=0, discretize_actions="hard", frames_per_state=1, num_lanes=1, num_tracks=1)
return env
return _thunk
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[mp.Pipe() for _ in range(nenvs)])
self.ps = [mp.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
if(type(actions) == int):
for remote in self.remotes:
remote.send(('step', actions))
else:
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def to_float(x):
"""Cast x to float; created because tf.to_float is deprecated."""
return tf.cast(x, tf.float32)
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
x_name = "(eager Tensor)"
try:
x_name = x.name
except AttributeError:
pass
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
x.device, cast_x.device)
return cast_x
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
"""Layer norm raw computation."""
# Save these before they get converted to tensors by the casting below
params = (scale, bias)
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
output = norm_x * scale + bias
return output
def layer_norm(x,
filters=None,
epsilon=1e-6,
name=None,
reuse=None,
layer_collection=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
scale, bias = layer_norm_vars(filters)
return layer_norm_compute(x, epsilon, scale, bias,
layer_collection=layer_collection)
def standardize_images(x):
"""Image standardization on batches and videos."""
with tf.name_scope("standardize_images", values=[x]):
x_shape = shape_list(x)
x = to_float(tf.reshape(x, [-1] + x_shape[-3:]))
x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
x_variance = tf.reduce_mean(
tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True)
num_pixels = to_float(x_shape[-2] * x_shape[-3])
x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels))
return tf.reshape(x, x_shape)
def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1):
"""Pad tensors x and y on axis 1 so that they have the same length."""
if axis not in [1, 2]:
raise ValueError("Only axis=1 and axis=2 supported for now.")
with tf.name_scope("pad_to_same_length", values=[x, y]):
x_length = shape_list(x)[axis]
y_length = shape_list(y)[axis]
if (isinstance(x_length, int) and isinstance(y_length, int) and
x_length == y_length and final_length_divisible_by == 1):
return x, y
max_length = tf.maximum(x_length, y_length)
if final_length_divisible_by > 1:
# Find the nearest larger-or-equal integer divisible by given number.
max_length += final_length_divisible_by - 1
max_length //= final_length_divisible_by
max_length *= final_length_divisible_by
length_diff1 = max_length - x_length
length_diff2 = max_length - y_length
def padding_list(length_diff, arg):
if axis == 1:
return [[[0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)]
return [[[0, 0], [0, 0], [0, length_diff]],
tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)]
paddings1 = tf.concat(padding_list(length_diff1, x), axis=0)
paddings2 = tf.concat(padding_list(length_diff2, y), axis=0)
res_x = tf.pad(x, paddings1)
res_y = tf.pad(y, paddings2)
# Static shapes are the same except for axis=1.
x_shape = x.shape.as_list()
x_shape[axis] = None
res_x.set_shape(x_shape)
y_shape = y.shape.as_list()
y_shape[axis] = None
res_y.set_shape(y_shape)
return res_x, res_y
def make_even_size(x):
"""Pad x to be even-sized on axis 1 and 2, but only if necessary."""
x_shape = x.get_shape().as_list()
assert len(x_shape) > 2, "Only 3+-dimensional tensors supported."
shape = [dim if dim is not None else -1 for dim in x_shape]
new_shape = x_shape # To make sure constant shapes remain constant.
if x_shape[1] is not None:
new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5))
if x_shape[2] is not None:
new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5))
if shape[1] % 2 == 0 and shape[2] % 2 == 0:
return x
if shape[1] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
if shape[2] % 2 == 0:
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x.set_shape(new_shape)
return x
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1)
x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2)
x.set_shape(new_shape)
return x
def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4):
"""Adds a bunch of sinusoids of different frequencies to a Tensor.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase in one of the positional dimensions.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(a+b) and cos(a+b) can be
expressed in terms of b, sin(a) and cos(a).
x is a Tensor with n "positional" dimensions, e.g. one dimension for a
sequence or two dimensions for an image
We use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels // (n * 2). For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
x: a Tensor with shape [batch, d1 ... dn, channels]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
"""
num_dims = len(x.get_shape().as_list()) - 2
channels = shape_list(x)[-1]
num_timescales = channels // (num_dims * 2)
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
for dim in range(num_dims):
length = shape_list(x)[dim + 1]
position = tf.to_float(tf.range(length))
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(
inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
prepad = dim * 2 * num_timescales
postpad = channels - (dim + 1) * 2 * num_timescales
signal = tf.pad(signal, [[0, 0], [prepad, postpad]])
for _ in range(1 + dim):
signal = tf.expand_dims(signal, 0)
for _ in range(num_dims - 1 - dim):
signal = tf.expand_dims(signal, -2)
x += signal
return x
|
[
"pickle.loads",
"numpy.stack",
"gym.envs.box2d.CarRacing",
"cloudpickle.dumps",
"multiprocessing.Pipe"
] |
[((312, 431), 'gym.envs.box2d.CarRacing', 'CarRacing', ([], {'grayscale': '(0)', 'show_info_panel': '(0)', 'discretize_actions': '"""hard"""', 'frames_per_state': '(1)', 'num_lanes': '(1)', 'num_tracks': '(1)'}), "(grayscale=0, show_info_panel=0, discretize_actions='hard',\n frames_per_state=1, num_lanes=1, num_tracks=1)\n", (321, 431), False, 'from gym.envs.box2d import CarRacing\n'), ((2980, 3005), 'cloudpickle.dumps', 'cloudpickle.dumps', (['self.x'], {}), '(self.x)\n', (2997, 3005), False, 'import cloudpickle\n'), ((3077, 3093), 'pickle.loads', 'pickle.loads', (['ob'], {}), '(ob)\n', (3089, 3093), False, 'import pickle\n'), ((4546, 4559), 'numpy.stack', 'np.stack', (['obs'], {}), '(obs)\n', (4554, 4559), True, 'import numpy as np\n'), ((4561, 4575), 'numpy.stack', 'np.stack', (['rews'], {}), '(rews)\n', (4569, 4575), True, 'import numpy as np\n'), ((4577, 4592), 'numpy.stack', 'np.stack', (['dones'], {}), '(dones)\n', (4585, 4592), True, 'import numpy as np\n'), ((3425, 3434), 'multiprocessing.Pipe', 'mp.Pipe', ([], {}), '()\n', (3432, 3434), True, 'import multiprocessing as mp\n')]
|
# Tensorflow and numpy to create the neural network
import tensorflow as tf
import numpy as np
# Matplotlib to plot info to show our results
import matplotlib.pyplot as plt
# OS to load files and save checkpoints
import os
# Load MNIST data from tf examples
image_height = 28
image_width = 28
color_channels = 1
model_name = "mnist"
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
category_names = list(map(str, range(10)))
# TODO: Process mnist data
print(train_data.shape)
train_data = np.reshape(train_data, (-1, image_height, image_width, color_channels))
print(train_data.shape)
eval_data = np.reshape(eval_data, (-1, image_height, image_width, color_channels))
# Load cifar data from file
image_height = 32
image_width = 32
color_channels = 3
model_name = "cifar"
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
cifar_path = './cifar-10-data/'
train_data = np.array([])
train_labels = np.array([])
# Load all the data batches.
for i in range(1, 6):
data_batch = unpickle(cifar_path + 'data_batch_' + str(i))
train_data = np.append(train_data, data_batch[b'data'])
train_labels = np.append(train_labels, data_batch[b'labels'])
# Load the eval batch.
eval_batch = unpickle(cifar_path + 'test_batch')
eval_data = eval_batch[b'data']
eval_labels = eval_batch[b'labels']
# Load the english category names.
category_names_bytes = unpickle(cifar_path + 'batches.meta')[b'label_names']
category_names = list(map(lambda x: x.decode("utf-8"), category_names_bytes))
# TODO: Process Cifar data
def process_data(data):
float_data = np.array(data, dtype=float) / 255.0
reshaped_data = np.reshape(float_data, (-1, color_channels, image_height, image_width))
transposed_data = np.transpose(reshaped_data, [0, 2, 3, 1])
return transposed_data
train_data = process_data(train_data)
eval_data = process_data(eval_data)
# TODO: The neural network
class ConvNet:
def __init__(self, image_height, image_width, channels, num_classes):
self.input_layer = tf.placeholder(dtype=tf.float32, shape=[None, image_height, image_width, channels],
name="inputs")
print(self.input_layer.shape)
conv_layer_1 = tf.layers.conv2d(self.input_layer, filters=32, kernel_size=[5, 5], padding="same",
activation=tf.nn.relu)
print(conv_layer_1.shape)
pooling_layer_1 = tf.layers.max_pooling2d(conv_layer_1, pool_size=[2, 2], strides=2)
print(pooling_layer_1.shape)
conv_layer_2 = tf.layers.conv2d(pooling_layer_1, filters=64, kernel_size=[5, 5], padding="same",
activation=tf.nn.relu)
print(conv_layer_2.shape)
pooling_layer_2 = tf.layers.max_pooling2d(conv_layer_2, pool_size=[2, 2], strides=2)
print(pooling_layer_2.shape)
flattened_pooling = tf.layers.flatten(pooling_layer_2)
dense_layer = tf.layers.dense(flattened_pooling, 1024, activation=tf.nn.relu)
print(dense_layer.shape)
dropout = tf.layers.dropout(dense_layer, rate=0.4, training=True)
outputs = tf.layers.dense(dropout, num_classes)
print(outputs.shape)
self.choice = tf.argmax(outputs, axis=1)
self.probability = tf.nn.softmax(outputs)
self.labels = tf.placeholder(dtype=tf.float32, name="labels")
self.accuracy, self.accuracy_op = tf.metrics.accuracy(self.labels, self.choice)
one_hot_labels = tf.one_hot(indices=tf.cast(self.labels, dtype=tf.int32), depth=num_classes)
self.loss = tf.losses.softmax_cross_entropy(onehot_labels=one_hot_labels, logits=outputs)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
self.train_operation = optimizer.minimize(loss=self.loss, global_step=tf.train.get_global_step())
# TODO: initialize variables
training_steps = 20000
batch_size = 64
path = "./" + model_name + "-cnn/"
load_checkpoint = True
performance_graph = np.array([])
# TODO: implement the training loop
tf.reset_default_graph()
dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
dataset = dataset.shuffle(buffer_size=train_labels.shape[0])
dataset = dataset.batch(batch_size)
dataset = dataset.repeat()
dataset_iterator = dataset.make_initializable_iterator()
next_element = dataset_iterator.get_next()
cnn = ConvNet(image_height,image_width,color_channels,10)
saver = tf.train.Saver(max_to_keep=2)
if not os.path.exists(path):
os.makedirs(path)
with tf.Session() as sess:
if load_checkpoint:
checkpoint = tf.train.get_checkpoint_state(path)
saver.restore(sess, checkpoint.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(dataset_iterator.initializer)
for step in range(training_steps):
current_batch = sess.run(next_element)
batch_inputs = current_batch[0]
batch_labels = current_batch[1]
sess.run((cnn.train_operation, cnn.accuracy_op), feed_dict={cnn.input_layer: batch_inputs, cnn.labels: batch_labels})
if step % 10 == 0:
performance_graph = np.append(performance_graph, sess.run(cnn.accuracy))
if step % 1000 == 0 and step > 0:
current_acc = sess.run(cnn.accuracy)
print("Accuracy at step " + str(step) + ": " + str(current_acc))
print("Saving checkpoint")
saver.save(sess, path + model_name, step)
print("Saving final checkpoint for training session.")
saver.save(sess, path + model_name, step)
|
[
"tensorflow.reset_default_graph",
"tensorflow.local_variables_initializer",
"pickle.load",
"tensorflow.layers.max_pooling2d",
"tensorflow.nn.softmax",
"tensorflow.metrics.accuracy",
"numpy.transpose",
"os.path.exists",
"numpy.append",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.reshape",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.get_global_step",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.layers.dropout",
"tensorflow.layers.flatten",
"tensorflow.Session",
"tensorflow.layers.conv2d",
"tensorflow.train.GradientDescentOptimizer",
"os.makedirs",
"tensorflow.argmax",
"tensorflow.layers.dense",
"tensorflow.contrib.learn.datasets.load_dataset",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.array",
"tensorflow.losses.softmax_cross_entropy"
] |
[((348, 395), 'tensorflow.contrib.learn.datasets.load_dataset', 'tf.contrib.learn.datasets.load_dataset', (['"""mnist"""'], {}), "('mnist')\n", (386, 395), True, 'import tensorflow as tf\n'), ((444, 490), 'numpy.asarray', 'np.asarray', (['mnist.train.labels'], {'dtype': 'np.int32'}), '(mnist.train.labels, dtype=np.int32)\n', (454, 490), True, 'import numpy as np\n'), ((536, 581), 'numpy.asarray', 'np.asarray', (['mnist.test.labels'], {'dtype': 'np.int32'}), '(mnist.test.labels, dtype=np.int32)\n', (546, 581), True, 'import numpy as np\n'), ((692, 763), 'numpy.reshape', 'np.reshape', (['train_data', '(-1, image_height, image_width, color_channels)'], {}), '(train_data, (-1, image_height, image_width, color_channels))\n', (702, 763), True, 'import numpy as np\n'), ((802, 872), 'numpy.reshape', 'np.reshape', (['eval_data', '(-1, image_height, image_width, color_channels)'], {}), '(eval_data, (-1, image_height, image_width, color_channels))\n', (812, 872), True, 'import numpy as np\n'), ((1166, 1178), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1174, 1178), True, 'import numpy as np\n'), ((1194, 1206), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1202, 1206), True, 'import numpy as np\n'), ((4273, 4285), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4281, 4285), True, 'import numpy as np\n'), ((4323, 4347), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4345, 4347), True, 'import tensorflow as tf\n'), ((4359, 4421), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_data, train_labels)'], {}), '((train_data, train_labels))\n', (4393, 4421), True, 'import tensorflow as tf\n'), ((4715, 4744), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(2)'}), '(max_to_keep=2)\n', (4729, 4744), True, 'import tensorflow as tf\n'), ((1339, 1381), 'numpy.append', 'np.append', (['train_data', "data_batch[b'data']"], {}), "(train_data, data_batch[b'data'])\n", (1348, 1381), True, 'import numpy as np\n'), ((1401, 1447), 'numpy.append', 'np.append', (['train_labels', "data_batch[b'labels']"], {}), "(train_labels, data_batch[b'labels'])\n", (1410, 1447), True, 'import numpy as np\n'), ((1908, 1979), 'numpy.reshape', 'np.reshape', (['float_data', '(-1, color_channels, image_height, image_width)'], {}), '(float_data, (-1, color_channels, image_height, image_width))\n', (1918, 1979), True, 'import numpy as np\n'), ((2003, 2044), 'numpy.transpose', 'np.transpose', (['reshaped_data', '[0, 2, 3, 1]'], {}), '(reshaped_data, [0, 2, 3, 1])\n', (2015, 2044), True, 'import numpy as np\n'), ((4753, 4773), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4767, 4773), False, 'import os\n'), ((4779, 4796), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4790, 4796), False, 'import os\n'), ((4803, 4815), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4813, 4815), True, 'import tensorflow as tf\n'), ((1068, 1101), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (1079, 1101), False, 'import pickle\n'), ((1852, 1879), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (1860, 1879), True, 'import numpy as np\n'), ((2293, 2395), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, image_height, image_width, channels]', 'name': '"""inputs"""'}), "(dtype=tf.float32, shape=[None, image_height, image_width,\n channels], name='inputs')\n", (2307, 2395), True, 'import tensorflow as tf\n'), ((2496, 2606), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['self.input_layer'], {'filters': '(32)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(self.input_layer, filters=32, kernel_size=[5, 5], padding=\n 'same', activation=tf.nn.relu)\n", (2512, 2606), True, 'import tensorflow as tf\n'), ((2703, 2769), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv_layer_1'], {'pool_size': '[2, 2]', 'strides': '(2)'}), '(conv_layer_1, pool_size=[2, 2], strides=2)\n', (2726, 2769), True, 'import tensorflow as tf\n'), ((2831, 2940), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['pooling_layer_1'], {'filters': '(64)', 'kernel_size': '[5, 5]', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(pooling_layer_1, filters=64, kernel_size=[5, 5], padding=\n 'same', activation=tf.nn.relu)\n", (2847, 2940), True, 'import tensorflow as tf\n'), ((3037, 3103), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['conv_layer_2'], {'pool_size': '[2, 2]', 'strides': '(2)'}), '(conv_layer_2, pool_size=[2, 2], strides=2)\n', (3060, 3103), True, 'import tensorflow as tf\n'), ((3170, 3204), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['pooling_layer_2'], {}), '(pooling_layer_2)\n', (3187, 3204), True, 'import tensorflow as tf\n'), ((3227, 3290), 'tensorflow.layers.dense', 'tf.layers.dense', (['flattened_pooling', '(1024)'], {'activation': 'tf.nn.relu'}), '(flattened_pooling, 1024, activation=tf.nn.relu)\n', (3242, 3290), True, 'import tensorflow as tf\n'), ((3342, 3397), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['dense_layer'], {'rate': '(0.4)', 'training': '(True)'}), '(dense_layer, rate=0.4, training=True)\n', (3359, 3397), True, 'import tensorflow as tf\n'), ((3416, 3453), 'tensorflow.layers.dense', 'tf.layers.dense', (['dropout', 'num_classes'], {}), '(dropout, num_classes)\n', (3431, 3453), True, 'import tensorflow as tf\n'), ((3506, 3532), 'tensorflow.argmax', 'tf.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (3515, 3532), True, 'import tensorflow as tf\n'), ((3560, 3582), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['outputs'], {}), '(outputs)\n', (3573, 3582), True, 'import tensorflow as tf\n'), ((3606, 3653), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'name': '"""labels"""'}), "(dtype=tf.float32, name='labels')\n", (3620, 3653), True, 'import tensorflow as tf\n'), ((3696, 3741), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['self.labels', 'self.choice'], {}), '(self.labels, self.choice)\n', (3715, 3741), True, 'import tensorflow as tf\n'), ((3864, 3941), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', ([], {'onehot_labels': 'one_hot_labels', 'logits': 'outputs'}), '(onehot_labels=one_hot_labels, logits=outputs)\n', (3895, 3941), True, 'import tensorflow as tf\n'), ((3963, 4016), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (3996, 4016), True, 'import tensorflow as tf\n'), ((4870, 4905), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['path'], {}), '(path)\n', (4899, 4905), True, 'import tensorflow as tf\n'), ((5044, 5076), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (5074, 5076), True, 'import tensorflow as tf\n'), ((4995, 5028), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5026, 5028), True, 'import tensorflow as tf\n'), ((3787, 3823), 'tensorflow.cast', 'tf.cast', (['self.labels'], {'dtype': 'tf.int32'}), '(self.labels, dtype=tf.int32)\n', (3794, 3823), True, 'import tensorflow as tf\n'), ((4095, 4121), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (4119, 4121), True, 'import tensorflow as tf\n')]
|
import os
import sys
import copy
import pickle
import numpy as np
import pandas as pd
from inspect import signature
from tensorflow import keras
from skorch.net import NeuralNet
from abc import ABCMeta, abstractmethod
from commonmodels2.utils.utils import *
from commonmodels2.log.logger import Logger
class ModelBase(metaclass=ABCMeta):
def __init__(self):
self._model = None
self._model_create_fn = None
self._params = {}
self._finalized = False
def get_model(self):
if self._finalized:
return self._model
else:
raise RuntimeError("Models must be finalized prior to getting")
def set_model_create_func(self, func):
sig = signature(func)
if len(sig.parameters) != 1:
raise ValueError("model_create_fn must accept a single argument")
param = [v for v in sig.parameters.values()][0]
if not (param.kind == param.POSITIONAL_ONLY or param.kind == param.POSITIONAL_OR_KEYWORD):
raise ValueError("model_create_fn must have similar prototype to `def func(params):`")
if not (param.default is param.empty):
raise ValueError("model_create_fn argument cannot have default value")
self._model_create_fn = func
def get_params(self):
return copy.deepcopy(self._params)
def set_params(self, params):
self._params = copy.deepcopy(params)
def load(self, file_path):
if not os.path.isdir(os.path.dirname(file_path)):
raise ValueError("Cannot load model from '%s' because file does not exist"%(file_path))
def save(self, out_folder, file_name):
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
def finalize(self):
self._model = self._model_create_fn(self._params)
self._finalized = True
def fit(self, X, y):
if not self._finalized:
raise RuntimeError("Models must be finalized prior to fitting")
def predict(self, X):
if not self._finalized:
raise RuntimeError("Models must be finalized prior to prediction")
params = property(get_params, set_params)
class SklearnModel(ModelBase):
def __init__(self):
super().__init__()
self._model_params = None
def set_params(self, params):
if params.get('model'):
self.set_model_params(params['model'])
def get_model_params(self):
return self._model_params
def set_model_params(self, model_params):
self._model_params = model_params
def load(self, file_path):
super().load(file_path)
with open(file_path, "rb") as in_file:
self._model = pickle.load(out_file)
self._finalized = True
def save(self, out_folder, file_name):
super().save(out_folder, file_name)
if not file_name.endswith('.pkl'):
if '.' in file_name:
file_name = file_name.split('.')[0]+'.pkl'
else:
file_name = file_name+'.pkl'
file_path = os.path.join(out_folder, file_name)
with open(file_path, "wb") as out_file:
pickle.dump(self._model, out_file)
def finalize(self):
self.params = self.model_params
super().finalize()
def fit(self, X, y):
super().fit(X, y)
self._model.fit(X, y)
return self._model
def predict(self, X):
super().predict(X)
preds = self._model.predict(X)
return preds
model_params = property(get_model_params, set_model_params)
class TensorFlowModel(ModelBase):
def __init__(self):
super().__init__()
self._model_params = {}
self._compile_params = {}
self._predict_params = {}
self._fit_params = {}
self._fit_transformer_fn = None
self._pred_transformer_fn = None
def set_params(self, params):
if params.get('model'):
self.set_model_params(params['model'])
if params.get('fit'):
self.set_fit_params(params['fit'])
if params.get('compile'):
self.set_optimizer_params(params['compile'])
if params.get('predict'):
self.set_predict_params(params['predict'])
def get_model_params(self):
return self._model_params
def set_model_params(self, model_params):
self._model_params = model_params
def get_fit_params(self):
return self._fit_params
def set_fit_params(self, fit_params):
self._fit_params = fit_params
def get_compile_params(self):
return self._compile_params
def set_compile_params(self, compile_params):
self._compile_params = compile_params
def get_predict_params(self):
return self._predict_params
def set_predict_params(self, predict_params):
self._predict_params = predict_params
def set_fit_transformer(self, data_trans_func):
sig = signature(data_trans_func)
if len(sig.parameters) != 2:
raise ValueError("data_transformer_fn must accept two arguments: X (data) and y (labels)")
for param_idx in range(2):
param = [v for v in sig.parameters.values()][param_idx]
if not (param.kind == param.POSITIONAL_ONLY or param.kind == param.POSITIONAL_OR_KEYWORD):
raise ValueError("data_transformer_fn must have similar prototype to `def func(X, y):`")
if not (param.default is param.empty):
raise ValueError("data_transformer_fn argument cannot have default value")
self._fit_transformer_fn = data_trans_func
def set_prediction_transformer(self, pred_trans_func):
sig = signature(pred_trans_func)
if len(sig.parameters) != 1:
raise ValueError("pred_transformer_fn must accept one argument: y_pred (labels)")
for param_idx in range(1):
param = [v for v in sig.parameters.values()][param_idx]
if not (param.kind == param.POSITIONAL_ONLY or param.kind == param.POSITIONAL_OR_KEYWORD):
raise ValueError("pred_transformer_fn must have similar prototype to `def func(y_pred):`")
if not (param.default is param.empty):
raise ValueError("pred_transformer_fn argument cannot have default value")
self._pred_transformer_fn = pred_trans_func
def load(self, file_path):
super().load(file_path)
self._model = keras.models.load_model(file_path)
self._finalized = True
def save(self, out_folder, file_name):
super().save(out_folder, file_name)
file_path = os.path.join(out_folder, file_name)
self._model.save(file_path)
def finalize(self):
self.params = self.model_params
super().finalize()
try:
self._model.compile(**self.compile_params)
except Exception as e:
raise RuntimeError("Failed to compile TensorFlowModel with exception: {}".format(str(e)))
def fit(self, X, y):
super().fit(X, y)
if self._fit_transformer_fn is not None:
trans_X, trans_y = self._fit_transformer_fn(X, y)
self._model.fit(trans_X, trans_y, **self._fit_params)
else:
self._model.fit(X, y, **self._fit_params)
return self._model
def predict(self, X):
super().predict(X)
if self._fit_transformer_fn is not None:
trans_X, trans_y = self._fit_transformer_fn(X, None)
preds = self._model.predict(trans_X, **self._predict_params)
else:
preds = self._model.predict(X, **self._predict_params)
if self._pred_transformer_fn is not None:
preds = self._pred_transformer_fn(preds)
return preds
model_params = property(get_model_params, set_model_params)
fit_params = property(get_fit_params, set_fit_params)
compile_params = property(get_compile_params, set_compile_params)
predict_params = property(get_predict_params, set_predict_params)
class PyTorchModel(ModelBase):
def __init__(self):
super().__init__()
self._model_params = None
self._optimizer_params = None
self._criterion_params = None
self._fit_params = None
self._fit_transformer_fn = PyTorchModel._default_fit_transformer
self._pred_transformer_fn = None
def set_params(self, params):
if params.get('model'):
self.set_model_params(params['model'])
if params.get('fit'):
self.set_fit_params(params['fit'])
if params.get('optimizer'):
self.set_optimizer_params(params['optimizer'])
if params.get('criterion'):
self.set_criterion_params(params['criterion'])
def get_criterion_params(self, fn_key="criterion"):
criterion_params = copy.deepcopy(self._criterion_params)
if criterion_params:
criterion_fn = criterion_params.pop(fn_key,None)
if not criterion_fn:
raise ValueError(f"Requires a {fn_key} parameter")
criterion_fn = get_torch_loss_func(criterion_fn)
criterion_params = {f"criterion__{k}":v for k,v in criterion_params.items()}
else:
raise ValueError(f"Requires a {fn_key} parameter")
return criterion_fn, criterion_params
def set_criterion_params(self, criterion_params):
self._criterion_params = criterion_params
def get_model_params(self):
return self._model_params
def set_model_params(self, model_params):
self._model_params = model_params
def get_optimizer_params(self, fn_key="optimizer", default_fn="sgd"):
optim_params = copy.deepcopy(self._optimizer_params)
if optim_params:
optim_fn = optim_params.pop(fn_key, default_fn)
optim_fn = get_torch_optimizer(optim_fn)
optim_params = {f"optimizer__{k}":v for k,v in optim_params.items()}
else:
optim_fn = get_torch_optimizer(default_fn)
optim_params = {}
return optim_fn, optim_params
def set_optimizer_params(self, optimizer_params):
self._optimizer_params = optimizer_params
def get_fit_params(self):
return self._fit_params
def set_fit_params(self, fit_params):
self._fit_params = fit_params
def compile(self):
loss_fn, loss_params = self.get_criterion_params()
optim_fn, optim_params = self.get_optimizer_params()
fit_params = self.get_fit_params()
user_params = {}
if loss_params:
user_params.update(loss_params)
if optim_params:
user_params.update(optim_params)
if fit_params:
user_params.update(fit_params)
self._model = NeuralNet(self._model,loss_fn,optimizer=optim_fn,**user_params)
@classmethod
def _default_fit_transformer(cls, X, y):
new_X = X
if isinstance(X,pd.DataFrame):
# BB - pytorch doesn't like using float64 tensors (X) with float64 labels (y), so
# stick to float32 for now
#best_type = np.float64 if np.any(X.dtypes == np.float64) else np.float32
best_type = np.float32
new_X = X.to_numpy(dtype=best_type)
elif isinstance(X,np.ndarray):
best_type = X.dtype
new_X = X.astype(best_type)
new_y = y
if isinstance(y,pd.DataFrame):
best_type = np.float64 if np.any(y.dtypes == np.float64) else np.float32
if best_type == np.float64: # BB - pytorch doesn't seem to like float64 labels ever?
best_type = np.float32
new_y = y.to_numpy(dtype=best_type)
elif isinstance(y, pd.Series):
best_type = y.dtype
if best_type == np.float64: # See above
best_type = np.float32
new_y = y.to_numpy(dtype=best_type)
elif isinstance(y, np.ndarray):
best_type = y.dtype
if best_type == np.float64: # See above
best_type = np.float32
new_y = y.astype(best_type)
elif isinstance(y, list):
best_type = np.float64 if np.any([type(elem) == np.float64 for elem in y]) else np.float32
if best_type == np.float64: # See above
best_type = np.float32
new_y = np.array(y).astype(best_type)
return new_X, new_y
def set_fit_transformer(self, data_trans_func):
sig = signature(data_trans_func)
if len(sig.parameters) != 2:
raise ValueError("data_transformer_fn must accept two arguments: X (data) and y (labels)")
for param_idx in range(2):
param = [v for v in sig.parameters.values()][param_idx]
if not (param.kind == param.POSITIONAL_ONLY or param.kind == param.POSITIONAL_OR_KEYWORD):
raise ValueError("data_transformer_fn must have similar prototype to `def func(X, y):`")
if not (param.default is param.empty):
raise ValueError("data_transformer_fn argument cannot have default value")
self._fit_transformer_fn = data_trans_func
def set_prediction_transformer(self, pred_trans_func):
sig = signature(pred_trans_func)
if len(sig.parameters) != 1:
raise ValueError("pred_transformer_fn must accept one argument: y_pred (labels)")
for param_idx in range(1):
param = [v for v in sig.parameters.values()][param_idx]
if not (param.kind == param.POSITIONAL_ONLY or param.kind == param.POSITIONAL_OR_KEYWORD):
raise ValueError("pred_transformer_fn must have similar prototype to `def func(y_pred):`")
if not (param.default is param.empty):
raise ValueError("pred_transformer_fn argument cannot have default value")
self._pred_transformer_fn = pred_trans_func
def load(self, file_path):
super().load(file_path)
with open(file_path, "rb") as in_file:
self._model = pickle.load(out_file)
self._finalized = True
def save(self, out_folder, file_name):
super().save(out_folder, file_name)
if not file_name.endswith('.pkl'):
if '.' in file_name:
file_name = file_name.split('.')[0]+'.pkl'
else:
file_name = file_name+'.pkl'
file_path = os.path.join(out_folder, file_name)
with open(file_path, "wb") as out_file:
pickle.dump(self._model, out_file)
def finalize(self):
self.params = self.model_params
super().finalize()
try:
self.compile()
except Exception as e:
raise RuntimeError("Failed to compile PyTorchModel with exception: {}".format(str(e)))
def fit(self, X, y):
super().fit(X,y)
if self._fit_transformer_fn is not None:
trans_X, trans_y = self._fit_transformer_fn(X, y)
self._model.fit(trans_X, trans_y)
else:
self._model.fit(X, y)
return self._model
def predict(self, X):
super().predict(X)
if self._fit_transformer_fn is not None:
trans_X, trans_y = self._fit_transformer_fn(X, None)
preds = self._model.predict(trans_X)
else:
preds = self._model.predict(X)
if self._pred_transformer_fn is not None:
preds = self._pred_transformer_fn(preds)
return preds
model_params = property(get_model_params, set_model_params)
fit_params = property(get_fit_params, set_fit_params)
optimizer_params = property(get_optimizer_params, set_optimizer_params)
criterion_params = property(get_criterion_params, set_criterion_params)
|
[
"copy.deepcopy",
"pickle.dump",
"tensorflow.keras.models.load_model",
"os.makedirs",
"os.path.isdir",
"os.path.dirname",
"numpy.any",
"pickle.load",
"inspect.signature",
"numpy.array",
"os.path.join",
"skorch.net.NeuralNet"
] |
[((718, 733), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (727, 733), False, 'from inspect import signature\n'), ((1312, 1339), 'copy.deepcopy', 'copy.deepcopy', (['self._params'], {}), '(self._params)\n', (1325, 1339), False, 'import copy\n'), ((1398, 1419), 'copy.deepcopy', 'copy.deepcopy', (['params'], {}), '(params)\n', (1411, 1419), False, 'import copy\n'), ((3057, 3092), 'os.path.join', 'os.path.join', (['out_folder', 'file_name'], {}), '(out_folder, file_name)\n', (3069, 3092), False, 'import os\n'), ((4947, 4973), 'inspect.signature', 'signature', (['data_trans_func'], {}), '(data_trans_func)\n', (4956, 4973), False, 'from inspect import signature\n'), ((5684, 5710), 'inspect.signature', 'signature', (['pred_trans_func'], {}), '(pred_trans_func)\n', (5693, 5710), False, 'from inspect import signature\n'), ((6427, 6461), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['file_path'], {}), '(file_path)\n', (6450, 6461), False, 'from tensorflow import keras\n'), ((6601, 6636), 'os.path.join', 'os.path.join', (['out_folder', 'file_name'], {}), '(out_folder, file_name)\n', (6613, 6636), False, 'import os\n'), ((8822, 8859), 'copy.deepcopy', 'copy.deepcopy', (['self._criterion_params'], {}), '(self._criterion_params)\n', (8835, 8859), False, 'import copy\n'), ((9683, 9720), 'copy.deepcopy', 'copy.deepcopy', (['self._optimizer_params'], {}), '(self._optimizer_params)\n', (9696, 9720), False, 'import copy\n'), ((10768, 10834), 'skorch.net.NeuralNet', 'NeuralNet', (['self._model', 'loss_fn'], {'optimizer': 'optim_fn'}), '(self._model, loss_fn, optimizer=optim_fn, **user_params)\n', (10777, 10834), False, 'from skorch.net import NeuralNet\n'), ((12484, 12510), 'inspect.signature', 'signature', (['data_trans_func'], {}), '(data_trans_func)\n', (12493, 12510), False, 'from inspect import signature\n'), ((13221, 13247), 'inspect.signature', 'signature', (['pred_trans_func'], {}), '(pred_trans_func)\n', (13230, 13247), False, 'from inspect import signature\n'), ((14374, 14409), 'os.path.join', 'os.path.join', (['out_folder', 'file_name'], {}), '(out_folder, file_name)\n', (14386, 14409), False, 'import os\n'), ((1669, 1694), 'os.path.isdir', 'os.path.isdir', (['out_folder'], {}), '(out_folder)\n', (1682, 1694), False, 'import os\n'), ((1708, 1731), 'os.makedirs', 'os.makedirs', (['out_folder'], {}), '(out_folder)\n', (1719, 1731), False, 'import os\n'), ((2698, 2719), 'pickle.load', 'pickle.load', (['out_file'], {}), '(out_file)\n', (2709, 2719), False, 'import pickle\n'), ((3153, 3187), 'pickle.dump', 'pickle.dump', (['self._model', 'out_file'], {}), '(self._model, out_file)\n', (3164, 3187), False, 'import pickle\n'), ((14015, 14036), 'pickle.load', 'pickle.load', (['out_file'], {}), '(out_file)\n', (14026, 14036), False, 'import pickle\n'), ((14470, 14504), 'pickle.dump', 'pickle.dump', (['self._model', 'out_file'], {}), '(self._model, out_file)\n', (14481, 14504), False, 'import pickle\n'), ((1481, 1507), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (1496, 1507), False, 'import os\n'), ((11466, 11496), 'numpy.any', 'np.any', (['(y.dtypes == np.float64)'], {}), '(y.dtypes == np.float64)\n', (11472, 11496), True, 'import numpy as np\n'), ((12358, 12369), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (12366, 12369), True, 'import numpy as np\n')]
|
# coding: utf8
# !/usr/env/python
# coding: utf8
# !/usr/env/python
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from terrainbento import BasicHySa, NotCoreNodeBaselevelHandler, PrecipChanger
@pytest.mark.parametrize("m_sp,n_sp", [(1.0 / 3, 2.0 / 3.0), (0.5, 1.0)])
@pytest.mark.parametrize(
"depression_finder", [None, "DepressionFinderAndRouter"]
)
@pytest.mark.parametrize("solver", ["basic"])
def test_channel_erosion(
clock_simple, grid_1, m_sp, n_sp, depression_finder, U, solver
):
ncnblh = NotCoreNodeBaselevelHandler(
grid_1, modify_core_nodes=True, lowering_rate=-U
)
phi = 0.1
F_f = 0.0
v_sc = 0.001
K_rock_sp = 0.001
K_sed_sp = 0.005
sp_crit_br = 0
sp_crit_sed = 0
H_star = 0.1
soil_transport_decay_depth = 1
soil_production__maximum_rate = 0
soil_production__decay_depth = 0.5
# construct dictionary. note that D is turned off here
params = {
"grid": grid_1,
"clock": clock_simple,
"regolith_transport_parameter": 0.0,
"water_erodibility_rock": K_rock_sp,
"water_erodibility_sediment": K_sed_sp,
"sp_crit_br": sp_crit_br,
"sp_crit_sed": sp_crit_sed,
"m_sp": m_sp,
"n_sp": n_sp,
"settling_velocity": v_sc,
"sediment_porosity": phi,
"fraction_fines": F_f,
"roughness__length_scale": H_star,
"solver": solver,
"soil_transport_decay_depth": soil_transport_decay_depth,
"soil_production__maximum_rate": soil_production__maximum_rate,
"soil_production__decay_depth": soil_production__decay_depth,
"depression_finder": depression_finder,
"boundary_handlers": {"NotCoreNodeBaselevelHandler": ncnblh},
}
# construct and run model
model = BasicHySa(**params)
for _ in range(2000):
model.run_one_step(10)
# construct actual and predicted slopes
actual_slopes = model.grid.at_node["topographic__steepest_slope"]
actual_areas = model.grid.at_node["surface_water__discharge"]
predicted_slopes = np.power(
((U * v_sc) / (K_sed_sp * np.power(actual_areas, m_sp)))
+ (U / (K_rock_sp * np.power(actual_areas, m_sp))),
1.0 / n_sp,
)
# assert actual and predicted slopes are the same.
assert_array_almost_equal(
actual_slopes[model.grid.core_nodes[1:-1]],
predicted_slopes[model.grid.core_nodes[1:-1]],
decimal=4,
)
with pytest.raises(SystemExit):
for _ in range(800):
model.run_one_step(100000)
def test_with_precip_changer(
clock_simple, grid_1, precip_defaults, precip_testing_factor
):
precip_changer = PrecipChanger(grid_1, **precip_defaults)
params = {
"grid": grid_1,
"clock": clock_simple,
"regolith_transport_parameter": 0.0,
"water_erodibility_rock": 0.001,
"water_erodibility_sediment": 0.01,
"boundary_handlers": {"PrecipChanger": precip_changer},
}
model = BasicHySa(**params)
assert model.eroder.K_sed[0] == params["water_erodibility_sediment"]
assert model.eroder.K_br[0] == params["water_erodibility_rock"]
assert "PrecipChanger" in model.boundary_handlers
model.run_one_step(1.0)
model.run_one_step(1.0)
assert_array_almost_equal(
model.eroder.K_sed,
params["water_erodibility_sediment"] * precip_testing_factor,
)
assert_array_almost_equal(
model.eroder.K_br,
params["water_erodibility_rock"] * precip_testing_factor,
)
|
[
"terrainbento.NotCoreNodeBaselevelHandler",
"numpy.power",
"terrainbento.BasicHySa",
"terrainbento.PrecipChanger",
"pytest.raises",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal"
] |
[((237, 309), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m_sp,n_sp"""', '[(1.0 / 3, 2.0 / 3.0), (0.5, 1.0)]'], {}), "('m_sp,n_sp', [(1.0 / 3, 2.0 / 3.0), (0.5, 1.0)])\n", (260, 309), False, 'import pytest\n'), ((311, 396), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depression_finder"""', "[None, 'DepressionFinderAndRouter']"], {}), "('depression_finder', [None,\n 'DepressionFinderAndRouter'])\n", (334, 396), False, 'import pytest\n'), ((400, 444), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""solver"""', "['basic']"], {}), "('solver', ['basic'])\n", (423, 444), False, 'import pytest\n'), ((554, 631), 'terrainbento.NotCoreNodeBaselevelHandler', 'NotCoreNodeBaselevelHandler', (['grid_1'], {'modify_core_nodes': '(True)', 'lowering_rate': '(-U)'}), '(grid_1, modify_core_nodes=True, lowering_rate=-U)\n', (581, 631), False, 'from terrainbento import BasicHySa, NotCoreNodeBaselevelHandler, PrecipChanger\n'), ((1827, 1846), 'terrainbento.BasicHySa', 'BasicHySa', ([], {}), '(**params)\n', (1836, 1846), False, 'from terrainbento import BasicHySa, NotCoreNodeBaselevelHandler, PrecipChanger\n'), ((2329, 2460), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['actual_slopes[model.grid.core_nodes[1:-1]]', 'predicted_slopes[model.grid.core_nodes[1:-1]]'], {'decimal': '(4)'}), '(actual_slopes[model.grid.core_nodes[1:-1]],\n predicted_slopes[model.grid.core_nodes[1:-1]], decimal=4)\n', (2354, 2460), False, 'from numpy.testing import assert_array_almost_equal\n'), ((2714, 2754), 'terrainbento.PrecipChanger', 'PrecipChanger', (['grid_1'], {}), '(grid_1, **precip_defaults)\n', (2727, 2754), False, 'from terrainbento import BasicHySa, NotCoreNodeBaselevelHandler, PrecipChanger\n'), ((3037, 3056), 'terrainbento.BasicHySa', 'BasicHySa', ([], {}), '(**params)\n', (3046, 3056), False, 'from terrainbento import BasicHySa, NotCoreNodeBaselevelHandler, PrecipChanger\n'), ((3313, 3425), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.eroder.K_sed', "(params['water_erodibility_sediment'] * precip_testing_factor)"], {}), "(model.eroder.K_sed, params[\n 'water_erodibility_sediment'] * precip_testing_factor)\n", (3338, 3425), False, 'from numpy.testing import assert_array_almost_equal\n'), ((3448, 3555), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.eroder.K_br', "(params['water_erodibility_rock'] * precip_testing_factor)"], {}), "(model.eroder.K_br, params[\n 'water_erodibility_rock'] * precip_testing_factor)\n", (3473, 3555), False, 'from numpy.testing import assert_array_almost_equal\n'), ((2498, 2523), 'pytest.raises', 'pytest.raises', (['SystemExit'], {}), '(SystemExit)\n', (2511, 2523), False, 'import pytest\n'), ((2152, 2180), 'numpy.power', 'np.power', (['actual_areas', 'm_sp'], {}), '(actual_areas, m_sp)\n', (2160, 2180), True, 'import numpy as np\n'), ((2211, 2239), 'numpy.power', 'np.power', (['actual_areas', 'm_sp'], {}), '(actual_areas, m_sp)\n', (2219, 2239), True, 'import numpy as np\n')]
|
from PIL import Image
import numpy as np
from blend_modes import soft_light, multiply
from core_def import CoreModeKey, BlendKey
class Blend(object):
@staticmethod
def run(image, config, show):
return Blend.__do_blend(image, config[CoreModeKey.MODE], config[BlendKey.OPACITY], show)
@staticmethod
def __do_blend(image, mode, opacity, show=False):
background_img = image
background_img_float = background_img.astype(float) # Inputs to blend_modes need to be floats.
foreground_img = image
foreground_img_float = foreground_img.astype(float) # Inputs to blend_modes need to be floats.
blended_img_float = Blend.EffectFunc[mode](background_img_float, foreground_img_float, opacity)
blended_img = np.uint8(blended_img_float) # Image needs to be converted back to uint8 type for PIL handling.
if show:
image_o = Image.fromarray(
blended_img) # Note that alpha channels are displayed in black by PIL by default.
# This behavior is difficult to change (although possible).
# If you have alpha channels in your images, then you should give
# OpenCV a try.
# Display blended image
image_o.show(mode, opacity)
return blended_img
EffectFunc = {
BlendKey.SOFT_LIGHT: soft_light,
BlendKey.MULTIPLY: multiply
}
|
[
"PIL.Image.fromarray",
"numpy.uint8"
] |
[((776, 803), 'numpy.uint8', 'np.uint8', (['blended_img_float'], {}), '(blended_img_float)\n', (784, 803), True, 'import numpy as np\n'), ((912, 940), 'PIL.Image.fromarray', 'Image.fromarray', (['blended_img'], {}), '(blended_img)\n', (927, 940), False, 'from PIL import Image\n')]
|
from typing import Dict, List
import numpy as np
from matplotlib import pyplot as plt
from src.bounding_box import BoundingBox
from src.evaluators.pascal_voc_evaluator import calculate_ap_every_point
from doors_detector.evaluators.model_evaluator import ModelEvaluator
class MyEvaluator(ModelEvaluator):
def get_metrics(self, iou_threshold: float = 0.5, confidence_threshold: float = 0.5, plot_curves: bool = False, colors = None) -> Dict:
"""
This method calculates metrics to evaluate a object detection model.
This metric is similar to the Pascal VOC metric but it is developed specifically for a robotic context.
In fact, if the model is used by a robot, it has to process a lot of negative images (images without any object to detect).
To correctly evaluate the model's performance in a robotic context, it is mandatory to consider also the negative images.
The final goal is to count the TP, FP, FN, calculate precision and recall and calculate AP.
This metric works as follow:
1) Bounding boxes are assigned to the image they belong to.
2) Predicted bounding boxes are divided according to the positiveness of their image.
The bounding boxes of positive and negative images are processed separately.
3) Positive images (images with object to detect) are treated similarly to the Pascal VOC metric.
a) Predicted bounding boxes are filtered by their confidence using confidence_threshold parameter.
Each bbox with confidence < confidence_threshold is discarded.
b) For each positive image, the ground truth bboxes are divided according to their class (label)
c) Now, all predicted bounding boxes (of all positive images) are ordered according to their confidence, in descending mode.
d) Each predicted bbox is matched with a single ground truth bounding of the same class belonging to the same image.
A match is composed by a ground truth bbox and a predicted bbox:
they must have the iou grater than a threshold and this value must be the maximum among all ground truth bbox.
A True Positive (TP) is a matched predicted bbox, while False Positives (FP) are not matched predicted bboxes.
A match fails when the iou area is less then the threshold or
the ground truth bounding box with the grater iou are has already been matched.
Each ground truth bbox not matched are considered as False Negative (FN)
4) The bounding boxes belong to the negative images are processed differently:
a) A new label is introduced: -1. It indicates all negative images' bounding boxes
b) The negative images are ordered according to the confidence sum of their predicted bboxes
c) Each negative image is processed to find TP and FP bboxes:
- a TP is a bounding box with confidence < confidence_threshold (the confidence is too low to be considered a good prediction)
- a FP is a bounding box with a confidence >= confidence threshold.
The metric described below refers to bounding box but it can be useful obtain result related to the images.
In this case, the images are divided in positives and negatives.
The positive images are processed as follow:
- the predicted bounding boxes are ordered according to their confidence, in descending mode
- predicted bounding boxes are matched with the ground truth bboxes
- a TP is a positive image in which all doors are found (so the are a number of matches >= of the number of ground truth bboxes)
- a FP is a positive image in which 0 < number of matcher < ground truth bboxes
- a FN is a positive image with no matches (no doors are found)
:param iou_threshold:
:param confidence_threshold:
:param plot_curves:
:return:
"""
gt_bboxes = self.get_gt_bboxes()
predicted_bboxes = self.get_predicted_bboxes()
predicted_bboxes_positives = []
# Labels
labels = {'-1'}
# A dictionary containing all bboxes divided by image. DETR produces a fixed number of prediction for every image.
# A positive images have at least one ground truth bbox, while negatives don't have ground truth
bboxes_images = {
box.get_image_name(): {
'is_positive': False,
'gt_bboxes': [],
'predicted_bboxes': [],
'TP': 0,
'FP': 0,
}
for box in predicted_bboxes}
# Add ground truth bboxes to each image
for box in gt_bboxes:
img = bboxes_images[box.get_image_name()]
img['is_positive'] = True
# Assign bounding box to its image
img['gt_bboxes'].append(box)
labels.add(box.get_class_id())
# Add predicted bboxes to each image.
# Divide predicted bounding boxes for the image's type (positive or negative) they belong to.
# For positive images, bounding boxes with confidence < confidence_threshold are discarded.
for box in predicted_bboxes:
img = bboxes_images[box.get_image_name()]
if img['is_positive'] and box.get_confidence() >= confidence_threshold:
img['predicted_bboxes'].append(box)
predicted_bboxes_positives.append(box)
elif not img['is_positive']:
img['predicted_bboxes'].append(box)
# Create dictionary to divide TP and FP by label
result_by_labels = {
label: {
'total_positives': sum(1 for box in gt_bboxes if box.get_class_id() == label),
'TP': [],
'FP': [],
} for label in labels
}
# Process bounding boxes of positive images
# For each positive image, ground truth bboxes are divided according their label
for img in [img for img in bboxes_images.values() if img['is_positive']]:
d = {}
for label in labels:
boxes = [box for box in img['gt_bboxes'] if box.get_class_id() == label]
d[label] = {
'bboxes': boxes,
'mask': np.zeros(len(boxes))
}
img['gt_bboxes'] = d
# Order bboxes according to confidence
predicted_bboxes_positives.sort(key=lambda box: box.get_confidence(), reverse=True)
for p_box in predicted_bboxes_positives:
label = p_box.get_class_id()
img = bboxes_images[p_box.get_image_name()]
iou_max = float('-inf')
match_index = -1
# Find the grater iou area with gt bboxes
for gt_index, gt_box in enumerate(img['gt_bboxes'][label]['bboxes']):
iou = BoundingBox.iou(p_box, gt_box)
if iou > iou_max:
iou_max = iou
match_index = gt_index
# If the iou >= threshold_iou and the label is the same, the match is valid
if iou_max >= iou_threshold and img['gt_bboxes'][label]['mask'][match_index] == 0:
# Set gt bbox as matched
img['gt_bboxes'][label]['mask'][match_index] = 1
# Update image information
img['TP'] += 1
# Update label information
result_by_labels[label]['TP'].append(1)
result_by_labels[label]['FP'].append(0)
# False Positive (if the iou area is less than threshold or the gt box has already been matched)
else:
# Update image information
img['FP'] += 1
# Update label information
result_by_labels[label]['TP'].append(0)
result_by_labels[label]['FP'].append(1)
# Process negative images
negative_images = sorted(
[img for img in bboxes_images.values() if not img['is_positive']],
key=lambda img: sum(box.get_confidence() for box in img['predicted_bboxes']),
reverse=False
)
for img in negative_images:
for box in img['predicted_bboxes']:
result_by_labels['-1']['total_positives'] += 1
if box.get_confidence() < confidence_threshold:
img['TP'] += 1
result_by_labels['-1']['TP'].append(1)
result_by_labels['-1']['FP'].append(0)
else:
img['FP'] += 1
result_by_labels['-1']['TP'].append(0)
result_by_labels['-1']['FP'].append(1)
# Prepare return value
bboxes_information = {}
for label, values in result_by_labels.items():
accumulate_tp = np.cumsum(np.array(values['TP'], dtype=int))
accumulate_fp = np.cumsum(np.array(values['FP'], dtype=int))
recall = accumulate_tp / values['total_positives']
precision = np.divide(accumulate_tp, (accumulate_tp + accumulate_fp))
[ap, mpre, mrec, _] = calculate_ap_every_point(recall, precision)
ret = {
'total_positives': values['total_positives'],
'TP': np.count_nonzero(values['TP']),
'FP': np.count_nonzero(values['FP']),
'precision': precision,
'recall': recall,
'AP': ap,
}
# The result of labels not presents in the examples are discarded
if ret['total_positives'] > 0:
bboxes_information[label] = ret
if plot_curves:
plt.close()
for label, values in sorted(bboxes_information.items(), key=lambda v: v[0]):
precision = values['precision']
recall = values['recall']
p = plt.plot(recall, precision, label=f'{label}')
if colors is not None:
p[0].set_color(colors[int(label)])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.title('Precision/Recall Curve')
plt.legend(shadow=True)
plt.grid()
plt.show()
images_information = {
label: {
'total_positives': 0,
'TP': 0,
'FP': 0,
'FN': 0
} for label in ['0', '1']
}
for img in bboxes_images.values():
# Positive images
if img['is_positive']:
images_information['1']['total_positives'] += 1
count_gt_bboxes = sum(len(v['bboxes']) for label, v in img['gt_bboxes'].items())
if img['TP'] >= count_gt_bboxes:
images_information['1']['TP'] += 1
elif img['TP'] == 0:
images_information['1']['FN'] += 1
else:
images_information['1']['FP'] += 1
# Negative images
else:
images_information['0']['total_positives'] += 1
if img['TP'] == len(img['predicted_bboxes']):
images_information['0']['TP'] += 1
else:
images_information['0']['FP'] += 1
return {'per_bbox': bboxes_information, 'per_image': images_information}
|
[
"matplotlib.pyplot.title",
"numpy.divide",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.count_nonzero",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"src.bounding_box.BoundingBox.iou",
"numpy.array",
"src.evaluators.pascal_voc_evaluator.calculate_ap_every_point",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((9198, 9253), 'numpy.divide', 'np.divide', (['accumulate_tp', '(accumulate_tp + accumulate_fp)'], {}), '(accumulate_tp, accumulate_tp + accumulate_fp)\n', (9207, 9253), True, 'import numpy as np\n'), ((9291, 9334), 'src.evaluators.pascal_voc_evaluator.calculate_ap_every_point', 'calculate_ap_every_point', (['recall', 'precision'], {}), '(recall, precision)\n', (9315, 9334), False, 'from src.evaluators.pascal_voc_evaluator import calculate_ap_every_point\n'), ((9847, 9858), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9856, 9858), True, 'from matplotlib import pyplot as plt\n'), ((10213, 10233), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (10223, 10233), True, 'from matplotlib import pyplot as plt\n'), ((10246, 10269), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (10256, 10269), True, 'from matplotlib import pyplot as plt\n'), ((10282, 10303), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (10290, 10303), True, 'from matplotlib import pyplot as plt\n'), ((10316, 10337), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (10324, 10337), True, 'from matplotlib import pyplot as plt\n'), ((10350, 10385), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision/Recall Curve"""'], {}), "('Precision/Recall Curve')\n", (10359, 10385), True, 'from matplotlib import pyplot as plt\n'), ((10398, 10421), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'shadow': '(True)'}), '(shadow=True)\n', (10408, 10421), True, 'from matplotlib import pyplot as plt\n'), ((10434, 10444), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10442, 10444), True, 'from matplotlib import pyplot as plt\n'), ((10457, 10467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10465, 10467), True, 'from matplotlib import pyplot as plt\n'), ((6978, 7008), 'src.bounding_box.BoundingBox.iou', 'BoundingBox.iou', (['p_box', 'gt_box'], {}), '(p_box, gt_box)\n', (6993, 7008), False, 'from src.bounding_box import BoundingBox\n'), ((9003, 9036), 'numpy.array', 'np.array', (["values['TP']"], {'dtype': 'int'}), "(values['TP'], dtype=int)\n", (9011, 9036), True, 'import numpy as np\n'), ((9076, 9109), 'numpy.array', 'np.array', (["values['FP']"], {'dtype': 'int'}), "(values['FP'], dtype=int)\n", (9084, 9109), True, 'import numpy as np\n'), ((9440, 9470), 'numpy.count_nonzero', 'np.count_nonzero', (["values['TP']"], {}), "(values['TP'])\n", (9456, 9470), True, 'import numpy as np\n'), ((9494, 9524), 'numpy.count_nonzero', 'np.count_nonzero', (["values['FP']"], {}), "(values['FP'])\n", (9510, 9524), True, 'import numpy as np\n'), ((10058, 10103), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision'], {'label': 'f"""{label}"""'}), "(recall, precision, label=f'{label}')\n", (10066, 10103), True, 'from matplotlib import pyplot as plt\n')]
|
import random
import numpy as np
from albumentations import DualTransform
from skimage.transform import PiecewiseAffineTransform, warp
class CustomPiecewiseAffineTransform(DualTransform):
"""
Add sine-wave piecewise affine transform to the image
Args:
phase_shift_limit, amplitude_limit, w_limit: parameters of a sine wave
value: padding value
p: probability of applying the transform. Default: 0.5.
"""
def __init__(
self,
phase_shift_limit=(0, 50),
amplitude_limit=(3, 5),
w_limit=(2, 4),
value=255,
always_apply=False,
p=0.2,
):
super(CustomPiecewiseAffineTransform, self).__init__(always_apply, p)
self._tform = PiecewiseAffineTransform()
self.phase_shift_limit = phase_shift_limit
self.amplitude_limit = amplitude_limit
self.w_limit = w_limit
self.value = value
def apply_to_bbox(self, bbox, **params):
raise NotImplementedError("Method apply_to_bbox is not implemented in class " + self.__class__.__name__)
def apply_to_keypoint(self, keypoint, **params):
raise NotImplementedError("Method apply_to_keypoint is not implemented in class " + self.__class__.__name__)
def get_params_dependent_on_targets(self, params):
raise NotImplementedError(
"Method get_params_dependent_on_targets is not implemented in class " + self.__class__.__name__)
def get_transform_init_args_names(self):
raise NotImplementedError(
"Method get_transform_init_args_names is not implemented in class " + self.__class__.__name__)
def _piecewise_affine_transform(self, img, phase_shift, amplitude, w):
rows, cols = img.shape[0], img.shape[1]
src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]
# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] + np.cos(np.linspace(0, w * np.pi, src.shape[0]) + phase_shift) * amplitude
dst_cols = src[:, 0]
dst_rows *= 1.2
dst = np.vstack([dst_cols, dst_rows]).T
self._tform.estimate(src, dst)
out_rows = rows
out_cols = cols
out = warp(img, self._tform, output_shape=(out_rows, out_cols))
return out
def apply(self, img, **params):
phase_shift = random.uniform(*self.phase_shift_limit)
amplitude = random.uniform(*self.amplitude_limit)
w = random.uniform(*self.w_limit)
return self._piecewise_affine_transform(img, phase_shift, amplitude, w)
|
[
"numpy.dstack",
"skimage.transform.PiecewiseAffineTransform",
"numpy.meshgrid",
"random.uniform",
"skimage.transform.warp",
"numpy.linspace",
"numpy.vstack"
] |
[((770, 796), 'skimage.transform.PiecewiseAffineTransform', 'PiecewiseAffineTransform', ([], {}), '()\n', (794, 796), False, 'from skimage.transform import PiecewiseAffineTransform, warp\n'), ((1816, 1840), 'numpy.linspace', 'np.linspace', (['(0)', 'cols', '(20)'], {}), '(0, cols, 20)\n', (1827, 1840), True, 'import numpy as np\n'), ((1860, 1884), 'numpy.linspace', 'np.linspace', (['(0)', 'rows', '(10)'], {}), '(0, rows, 10)\n', (1871, 1884), True, 'import numpy as np\n'), ((1914, 1945), 'numpy.meshgrid', 'np.meshgrid', (['src_rows', 'src_cols'], {}), '(src_rows, src_cols)\n', (1925, 1945), True, 'import numpy as np\n'), ((2371, 2428), 'skimage.transform.warp', 'warp', (['img', 'self._tform'], {'output_shape': '(out_rows, out_cols)'}), '(img, self._tform, output_shape=(out_rows, out_cols))\n', (2375, 2428), False, 'from skimage.transform import PiecewiseAffineTransform, warp\n'), ((2508, 2547), 'random.uniform', 'random.uniform', (['*self.phase_shift_limit'], {}), '(*self.phase_shift_limit)\n', (2522, 2547), False, 'import random\n'), ((2568, 2605), 'random.uniform', 'random.uniform', (['*self.amplitude_limit'], {}), '(*self.amplitude_limit)\n', (2582, 2605), False, 'import random\n'), ((2618, 2647), 'random.uniform', 'random.uniform', (['*self.w_limit'], {}), '(*self.w_limit)\n', (2632, 2647), False, 'import random\n'), ((1960, 2001), 'numpy.dstack', 'np.dstack', (['[src_cols.flat, src_rows.flat]'], {}), '([src_cols.flat, src_rows.flat])\n', (1969, 2001), True, 'import numpy as np\n'), ((2234, 2265), 'numpy.vstack', 'np.vstack', (['[dst_cols, dst_rows]'], {}), '([dst_cols, dst_rows])\n', (2243, 2265), True, 'import numpy as np\n'), ((2100, 2139), 'numpy.linspace', 'np.linspace', (['(0)', '(w * np.pi)', 'src.shape[0]'], {}), '(0, w * np.pi, src.shape[0])\n', (2111, 2139), True, 'import numpy as np\n')]
|
import pandas as pd
import pylab
import numpy as np
import tensorflow as tf
import os
import gc
import librosa
import librosa.display
import matplotlib
import matplotlib.pyplot as plt
from keras.preprocessing.image import img_to_array, load_img
from keras.models import Model, Sequential
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
from keras.utils.np_utils import to_categorical
from PIL import Image
def resize_image(image, image_length = 512):
# Find deflation_rate for narrow the image
deflation_rate = image_length / float(image.size[max((0, 1), key=lambda i: image.size[i])])
image.resize((int(image.size[0] * deflation_rate), int(image.size[1] * deflation_rate)))
return image.resize((int(image.size[0] * deflation_rate), int(image.size[1] * deflation_rate)))
def Load_image(ID, image_length = 512):
output_image = np.empty((len(ID), image_length, image_length, 1))
# i -> the i th of the ID set, image_ID -> the name of the image
for i, image_ID in enumerate(ID):
# Turn the image into an array
image = img_to_array(resize_image(load_img('spectrogram/' + str(image_ID) + '.png', color_mode = "grayscale"),
image_length=image_length))
# Get image height and width
# Place the image at the center of the image
h1 = int((image_length - image.shape[0] ) / 2)
h2 = h1 + image.shape[0]
w1 = int((image_length - image.shape[1]) / 2)
w2 = w1 + image.shape[1]
# Insert into image matrix
output_image[i, h1:h2, w1:w2, 0:1] = image
# Scale the array values so they are between 0 and 1
return np.around(output_image / 255.0)
def Read_train_data():
# Read train.csv and pop out the id
train_data = pd.read_csv('train.csv')
ID = train_data.pop('ID')
# Pop out the species and make species name correspond to number
species = train_data.pop('Class')
species = LabelEncoder().fit(species).transform(species)
# Standardize the data by setting the mean to 0 and std to 1
return ID, species
def load_train_data():
# Load the train data
ID, label = Read_train_data()
# Load the image data
train_image = Load_image(ID)
# Split them into validation and cross-validation
sss = StratifiedShuffleSplit(n_splits=1, train_size=0.8, test_size=0.2)
train_id, test_id = next(sss.split(train_image, label))
tra_image, tra_label = train_image[train_id], label[train_id]
val_image, val_label = train_image[test_id], label[test_id]
return (tra_image, tra_label), (val_image, val_label)
if __name__ == '__main__':
(tra_image, tra_label), (val_image, val_label) = load_train_data()
onehot_tra_label = to_categorical(tra_label)
onehot_val_label = to_categorical(val_label)
KerasCNNmodel = Sequential()
KerasCNNmodel.add(Convolution2D(input_shape=(512, 512, 1), filters=10, kernel_size=5, activation='relu'))
KerasCNNmodel.add(MaxPooling2D(pool_size=(4,4)))
KerasCNNmodel.add(Convolution2D(filters=20, kernel_size=3, activation='relu'))
KerasCNNmodel.add(MaxPooling2D(pool_size=(2,2)))
KerasCNNmodel.add(Convolution2D(filters=40, kernel_size=3, activation='relu'))
KerasCNNmodel.add(MaxPooling2D(pool_size=(1,1)))
KerasCNNmodel.add(Flatten())
KerasCNNmodel.add(Dense(256, activation='relu'))
KerasCNNmodel.add(Dropout(0.5))
KerasCNNmodel.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, nesterov=True, decay=1e-6, momentum=0.9)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
#rms = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
KerasCNNmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
history=KerasCNNmodel.fit(tra_image, onehot_tra_label, epochs=10, validation_data=(val_image, onehot_val_label), batch_size=20)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
KerasCNNmodel.save('urban_sound.h5')
del KerasCNNmodel
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"numpy.around",
"keras.optimizers.SGD",
"keras.layers.Flatten",
"sklearn.preprocessing.LabelEncoder",
"keras.utils.np_utils.to_categorical",
"keras.layers.MaxPooling2D",
"matplotlib.pyplot.show",
"keras.layers.Convolution2D",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"keras.optimizers.Adam",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"sklearn.model_selection.StratifiedShuffleSplit",
"keras.layers.Dense",
"keras.models.Sequential",
"matplotlib.pyplot.xlabel"
] |
[((1962, 1993), 'numpy.around', 'np.around', (['(output_image / 255.0)'], {}), '(output_image / 255.0)\n', (1971, 1993), True, 'import numpy as np\n'), ((2079, 2103), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (2090, 2103), True, 'import pandas as pd\n'), ((2642, 2707), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'train_size': '(0.8)', 'test_size': '(0.2)'}), '(n_splits=1, train_size=0.8, test_size=0.2)\n', (2664, 2707), False, 'from sklearn.model_selection import train_test_split, StratifiedShuffleSplit\n'), ((3086, 3111), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['tra_label'], {}), '(tra_label)\n', (3100, 3111), False, 'from keras.utils.np_utils import to_categorical\n'), ((3136, 3161), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['val_label'], {}), '(val_label)\n', (3150, 3161), False, 'from keras.utils.np_utils import to_categorical\n'), ((3185, 3197), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3195, 3197), False, 'from keras.models import Model, Sequential\n'), ((3839, 3893), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'nesterov': '(True)', 'decay': '(1e-06)', 'momentum': '(0.9)'}), '(lr=0.01, nesterov=True, decay=1e-06, momentum=0.9)\n', (3842, 3893), False, 'from keras.optimizers import SGD, RMSprop, Adam\n'), ((3905, 3990), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': 'None', 'decay': '(0.0)', 'amsgrad': '(False)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False\n )\n', (3909, 3990), False, 'from keras.optimizers import SGD, RMSprop, Adam\n'), ((4363, 4395), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (4371, 4395), True, 'import matplotlib.pyplot as plt\n'), ((4401, 4437), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (4409, 4437), True, 'import matplotlib.pyplot as plt\n'), ((4443, 4464), 'matplotlib.pyplot.title', 'plt.title', (['"""accuracy"""'], {}), "('accuracy')\n", (4452, 4464), True, 'import matplotlib.pyplot as plt\n'), ((4470, 4492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4480, 4492), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4508, 4517), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4570), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper left"""'}), "(['Train', 'Test'], loc='upper left')\n", (4533, 4570), True, 'import matplotlib.pyplot as plt\n'), ((4576, 4586), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4584, 4586), True, 'import matplotlib.pyplot as plt\n'), ((4594, 4627), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (4602, 4627), True, 'import matplotlib.pyplot as plt\n'), ((4633, 4670), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (4641, 4670), True, 'import matplotlib.pyplot as plt\n'), ((4676, 4693), 'matplotlib.pyplot.title', 'plt.title', (['"""loss"""'], {}), "('loss')\n", (4685, 4693), True, 'import matplotlib.pyplot as plt\n'), ((4699, 4717), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (4709, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4723, 4742), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4733, 4742), True, 'import matplotlib.pyplot as plt\n'), ((4748, 4795), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper left"""'}), "(['Train', 'Test'], loc='upper left')\n", (4758, 4795), True, 'import matplotlib.pyplot as plt\n'), ((4801, 4811), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4809, 4811), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3311), 'keras.layers.Convolution2D', 'Convolution2D', ([], {'input_shape': '(512, 512, 1)', 'filters': '(10)', 'kernel_size': '(5)', 'activation': '"""relu"""'}), "(input_shape=(512, 512, 1), filters=10, kernel_size=5,\n activation='relu')\n", (3234, 3311), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3332, 3362), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(4, 4)'}), '(pool_size=(4, 4))\n', (3344, 3362), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3388, 3447), 'keras.layers.Convolution2D', 'Convolution2D', ([], {'filters': '(20)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=20, kernel_size=3, activation='relu')\n", (3401, 3447), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3472, 3502), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3484, 3502), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3526, 3585), 'keras.layers.Convolution2D', 'Convolution2D', ([], {'filters': '(40)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), "(filters=40, kernel_size=3, activation='relu')\n", (3539, 3585), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3610, 3640), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(1, 1)'}), '(pool_size=(1, 1))\n', (3622, 3640), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3666, 3675), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3673, 3675), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3702, 3731), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (3707, 3731), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3756, 3768), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3763, 3768), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((3793, 3824), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (3798, 3824), False, 'from keras.layers import Dense, Dropout, Activation, Convolution2D, MaxPooling2D, Flatten, Input, merge\n'), ((2260, 2274), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2272, 2274), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\n')]
|
# Copyright 2021 The Kubric Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2021 The Kubric Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
import itertools
import json
import logging
import numpy as np
import png
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
from typing import List
_DESCRIPTION = """
A dataset of each ShapeNet object rendered from 25 random perspectives on transparent background.
Images are rendered at 512x512 and then cropped to fit the object, so they vary in size.
The dataset contains the following information:
- "image_id": str
- "asset_id": str
The id of the ShapeNet object. E.g. "02691156/1021a0914a7207aff927ed529ad90a11".
- "label": tfds.features.ClassLabel
One of the 55 Shapenet classes:
["airplane", "ashcan", "bag", "basket", "bathtub", "bed", "bench", "birdhouse",
"bookshelf", "bottle", "bowl", "bus", "cabinet", "camera", "can", "cap", "car",
"cellular telephone", "chair", "clock", "computer keyboard", "dishwasher",
"display", "earphone", "faucet", "file", "guitar", "helmet", "jar", "knife",
"lamp", "laptop", "loudspeaker", "mailbox", "microphone", "microwave",
"motorcycle", "mug", "piano", "pillow", "pistol", "pot", "printer",
"remote control", "rifle", "rocket", "skateboard", "sofa", "stove",
"table", "telephone", "tower", "train", "vessel", "washer"]
- "camera_position": (3,) [float32]
position of the camera in a half-sphere shell with inner radius 9 and outer radius 10.
The object sits at the origin.
- "image": (None, None, 4) [uint8]
The rendered image in RGBA format, cropped to fit the object.
"""
_CITATION = """\
@inproceedings{greff2022kubric,
title = {Kubric: a scalable dataset generator},
author = {<NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME> and Hsueh-Ti (Dere<NAME> and <NAME> and
<NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME> and <NAME> and <NAME>},
booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition, {CVPR}},
year = {2022},
publisher = {Computer Vision Foundation / {IEEE}},
}"""
class ShapenetPretraining(tfds.core.BeamBasedBuilder):
"""TFDS definition for ShapenetPretraining dataset"""
VERSION = tfds.core.Version("1.0.0")
RELEASE_NOTES = {
"1.0.0": "initial release",
}
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"image_id": tfds.features.Text(),
"asset_id": tfds.features.Text(),
"label": tfds.features.ClassLabel(names=[
"airplane", "ashcan", "bag", "basket", "bathtub", "bed", "bench", "birdhouse",
"bookshelf", "bottle", "bowl", "bus", "cabinet", "camera", "can", "cap", "car",
"cellular telephone", "chair", "clock", "computer keyboard", "dishwasher",
"display", "earphone", "faucet", "file", "guitar", "helmet", "jar", "knife",
"lamp", "laptop", "loudspeaker", "mailbox", "microphone", "microwave",
"motorcycle", "mug", "piano", "pillow", "pistol", "pot", "printer",
"remote control", "rifle", "rocket", "skateboard", "sofa", "stove",
"table", "telephone", "tower", "train", "vessel", "washer"]),
"camera_position": tfds.features.Tensor(shape=(3,), dtype=tf.float32),
"image": tfds.features.Image(shape=(None, None, 4)),
}),
supervised_keys=("image", "label"),
homepage="https://github.com/google-research/kubric",
citation=_CITATION,
)
def _split_generators(self, unused_dl_manager: tfds.download.DownloadManager):
"""Returns SplitGenerators."""
del unused_dl_manager
# find all available directories
path = tfds.core.as_path("gs://research-brain-kubric-xgcp/jobs/shapenet_demo_7/")
all_subdirs = list(path.glob("*")) # if (d / "metadata.json").exists()]
# figure out how many images per directory exist
nr_images_per_dir = len(list(all_subdirs[0].glob("rgba_*.png")))
logging.info("Found %d sub-folders with %d images each in master path: %s",
len(all_subdirs), nr_images_per_dir, path)
# we pick one view for each object for validation and the others for train
# views are random so we can just pick the first one for validation
val_all_image_paths = [str(d / "rgba_00000.png") for d in all_subdirs]
train_all_image_paths = [str(d / "rgba_{:05d}.png".format(i))
for d, i in itertools.product(all_subdirs,
range(1, nr_images_per_dir))]
# directories are sorted by categories, so we shuffle
np.random.shuffle(train_all_image_paths)
logging.info("Using 1 image per object for validation for a total of %d images",
len(val_all_image_paths))
logging.info("Using the other %d images for train", len(train_all_image_paths))
return {
tfds.Split.TRAIN: self._generate_examples(train_all_image_paths),
tfds.Split.VALIDATION: self._generate_examples(val_all_image_paths),
}
def _generate_examples(self, directories: List[str]):
"""Yields examples."""
def _process_example(image_path):
image_path = tfds.core.as_path(image_path)
image_dir = image_path.parent
image_index = int(image_path.name[-9:-4]) # int("rgba_00008.png"[-9:-4]) -> 8
key = f"{image_dir.name}_{image_index:05d}"
with tf.io.gfile.GFile(str(image_dir / "metadata.json"), "r") as fp:
metadata = json.load(fp)
bbox = metadata["instances"][0]["bboxes"][image_index]
y_min, x_min, y_max, x_max = [int(v*512) for v in bbox]
img = read_png(image_path)
return key, {
"image_id": key,
"asset_id": metadata["instances"][0]["asset_id"].replace("_", "/"),
"label": metadata["instances"][0]["category"],
"camera_position": np.array(metadata["camera"]["positions"][image_index], np.float32),
"image": img[y_min:y_max+1, x_min:x_max+1],
}
beam = tfds.core.lazy_imports.apache_beam
return beam.Create(directories) | beam.Map(_process_example)
def read_png(filename) -> np.ndarray:
filename = tfds.core.as_path(filename)
png_reader = png.Reader(bytes=filename.read_bytes())
width, height, pngdata, info = png_reader.read()
del png_reader
bitdepth = info["bitdepth"]
if bitdepth == 8:
dtype = np.uint8
elif bitdepth == 16:
dtype = np.uint16
else:
raise NotImplementedError(f"Unsupported bitdepth: {bitdepth}")
plane_count = info["planes"]
pngdata = np.vstack(list(map(dtype, pngdata)))
return pngdata.reshape((height, width, plane_count))
|
[
"json.load",
"tensorflow_datasets.public_api.features.ClassLabel",
"tensorflow_datasets.public_api.features.Text",
"tensorflow_datasets.public_api.features.Image",
"tensorflow_datasets.public_api.features.Tensor",
"tensorflow_datasets.public_api.core.Version",
"numpy.array",
"tensorflow_datasets.public_api.core.as_path",
"numpy.random.shuffle"
] |
[((3601, 3627), 'tensorflow_datasets.public_api.core.Version', 'tfds.core.Version', (['"""1.0.0"""'], {}), "('1.0.0')\n", (3618, 3627), True, 'import tensorflow_datasets.public_api as tfds\n'), ((7727, 7754), 'tensorflow_datasets.public_api.core.as_path', 'tfds.core.as_path', (['filename'], {}), '(filename)\n', (7744, 7754), True, 'import tensorflow_datasets.public_api as tfds\n'), ((5255, 5329), 'tensorflow_datasets.public_api.core.as_path', 'tfds.core.as_path', (['"""gs://research-brain-kubric-xgcp/jobs/shapenet_demo_7/"""'], {}), "('gs://research-brain-kubric-xgcp/jobs/shapenet_demo_7/')\n", (5272, 5329), True, 'import tensorflow_datasets.public_api as tfds\n'), ((6185, 6225), 'numpy.random.shuffle', 'np.random.shuffle', (['train_all_image_paths'], {}), '(train_all_image_paths)\n', (6202, 6225), True, 'import numpy as np\n'), ((6752, 6781), 'tensorflow_datasets.public_api.core.as_path', 'tfds.core.as_path', (['image_path'], {}), '(image_path)\n', (6769, 6781), True, 'import tensorflow_datasets.public_api as tfds\n'), ((7048, 7061), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (7057, 7061), False, 'import json\n'), ((7431, 7497), 'numpy.array', 'np.array', (["metadata['camera']['positions'][image_index]", 'np.float32'], {}), "(metadata['camera']['positions'][image_index], np.float32)\n", (7439, 7497), True, 'import numpy as np\n'), ((3932, 3952), 'tensorflow_datasets.public_api.features.Text', 'tfds.features.Text', ([], {}), '()\n', (3950, 3952), True, 'import tensorflow_datasets.public_api as tfds\n'), ((3978, 3998), 'tensorflow_datasets.public_api.features.Text', 'tfds.features.Text', ([], {}), '()\n', (3996, 3998), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4021, 4663), 'tensorflow_datasets.public_api.features.ClassLabel', 'tfds.features.ClassLabel', ([], {'names': "['airplane', 'ashcan', 'bag', 'basket', 'bathtub', 'bed', 'bench',\n 'birdhouse', 'bookshelf', 'bottle', 'bowl', 'bus', 'cabinet', 'camera',\n 'can', 'cap', 'car', 'cellular telephone', 'chair', 'clock',\n 'computer keyboard', 'dishwasher', 'display', 'earphone', 'faucet',\n 'file', 'guitar', 'helmet', 'jar', 'knife', 'lamp', 'laptop',\n 'loudspeaker', 'mailbox', 'microphone', 'microwave', 'motorcycle',\n 'mug', 'piano', 'pillow', 'pistol', 'pot', 'printer', 'remote control',\n 'rifle', 'rocket', 'skateboard', 'sofa', 'stove', 'table', 'telephone',\n 'tower', 'train', 'vessel', 'washer']"}), "(names=['airplane', 'ashcan', 'bag', 'basket',\n 'bathtub', 'bed', 'bench', 'birdhouse', 'bookshelf', 'bottle', 'bowl',\n 'bus', 'cabinet', 'camera', 'can', 'cap', 'car', 'cellular telephone',\n 'chair', 'clock', 'computer keyboard', 'dishwasher', 'display',\n 'earphone', 'faucet', 'file', 'guitar', 'helmet', 'jar', 'knife',\n 'lamp', 'laptop', 'loudspeaker', 'mailbox', 'microphone', 'microwave',\n 'motorcycle', 'mug', 'piano', 'pillow', 'pistol', 'pot', 'printer',\n 'remote control', 'rifle', 'rocket', 'skateboard', 'sofa', 'stove',\n 'table', 'telephone', 'tower', 'train', 'vessel', 'washer'])\n", (4045, 4663), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4793, 4843), 'tensorflow_datasets.public_api.features.Tensor', 'tfds.features.Tensor', ([], {'shape': '(3,)', 'dtype': 'tf.float32'}), '(shape=(3,), dtype=tf.float32)\n', (4813, 4843), True, 'import tensorflow_datasets.public_api as tfds\n'), ((4867, 4909), 'tensorflow_datasets.public_api.features.Image', 'tfds.features.Image', ([], {'shape': '(None, None, 4)'}), '(shape=(None, None, 4))\n', (4886, 4909), True, 'import tensorflow_datasets.public_api as tfds\n')]
|
import cv2
import os.path
import numpy as np
from os.path import dirname,exists
from tensorflow.keras.models import load_model
# make prediction on image saved on disk
def prediction_path(path_img,path_res,model_name="model.h5"):
if not exists("models/"+model_name):
print('Model '+model_name+' not found !!')
model_name="model.h5"
# load keras model
model = load_model("models/"+model_name)
faceCascade = cv2.CascadeClassifier(r''+dirname(__file__)+'/haarcascades/haarcascade_frontalface_default.xml')
# list of given emotions
EMOTIONS = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']
if os.path.exists(path_img):
# read the image
# print(path)
img = cv2.imread(r''+path_img, 1)
# print(img)
# check if image is valid or not
if img is None:
print('Invalid image !!')
return
else:
print('Image not found')
return
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find face in the frame
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
# required region for the face
img_face = img[y-90:y+h+70, x-50:x+w+50]
break
try:
# remove colors
img_face_gray = cv2.cvtColor(img_face, cv2.COLOR_BGR2GRAY)
# resize image for the model
img_face_gray = cv2.resize(img_face_gray, (48, 48))
img_face_gray = np.reshape(img_face_gray, (1, 48, 48, 1))
# do prediction
result = model.predict(img_face_gray)
# print emotion
print('Detected emotion: ' + str(EMOTIONS[np.argmax(result[0])]))
emotion_index= str(np.argmax(result[0]))
except:
# No face detected
emotion_index= str(-1)
f= open(path_res,"w")
f.write(emotion_index)
f.close()
return
|
[
"tensorflow.keras.models.load_model",
"numpy.argmax",
"cv2.cvtColor",
"os.path.dirname",
"os.path.exists",
"cv2.imread",
"numpy.reshape",
"cv2.resize"
] |
[((395, 429), 'tensorflow.keras.models.load_model', 'load_model', (["('models/' + model_name)"], {}), "('models/' + model_name)\n", (405, 429), False, 'from tensorflow.keras.models import load_model\n'), ((999, 1036), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1011, 1036), False, 'import cv2\n'), ((242, 272), 'os.path.exists', 'exists', (["('models/' + model_name)"], {}), "('models/' + model_name)\n", (248, 272), False, 'from os.path import dirname, exists\n'), ((757, 785), 'cv2.imread', 'cv2.imread', (["('' + path_img)", '(1)'], {}), "('' + path_img, 1)\n", (767, 785), False, 'import cv2\n'), ((1487, 1529), 'cv2.cvtColor', 'cv2.cvtColor', (['img_face', 'cv2.COLOR_BGR2GRAY'], {}), '(img_face, cv2.COLOR_BGR2GRAY)\n', (1499, 1529), False, 'import cv2\n'), ((1591, 1626), 'cv2.resize', 'cv2.resize', (['img_face_gray', '(48, 48)'], {}), '(img_face_gray, (48, 48))\n', (1601, 1626), False, 'import cv2\n'), ((1651, 1692), 'numpy.reshape', 'np.reshape', (['img_face_gray', '(1, 48, 48, 1)'], {}), '(img_face_gray, (1, 48, 48, 1))\n', (1661, 1692), True, 'import numpy as np\n'), ((1888, 1908), 'numpy.argmax', 'np.argmax', (['result[0]'], {}), '(result[0])\n', (1897, 1908), True, 'import numpy as np\n'), ((473, 490), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (480, 490), False, 'from os.path import dirname, exists\n'), ((1837, 1857), 'numpy.argmax', 'np.argmax', (['result[0]'], {}), '(result[0])\n', (1846, 1857), True, 'import numpy as np\n')]
|
from collections import namedtuple
from copy import deepcopy
import numpy as np
def powers2(num):
'''List (descending) of powers of two in the number'''
powers = [int(power)
for power, value in enumerate(reversed(format(num, 'b')))
if value != '0']
return powers[::-1]
# assert all(sum(2**p for p in powers2(n)) == n for n in (9, 13, 425, 123))
Tile = namedtuple('Tile', ('coords', 'cells',
'master_vertices', 'slave_vertices', 'mappings',
'data'))
def make_tile(x, cells, master_vertices, slave_vertices, vertex_mappings, data):
'''Freeze the tile from data'''
# The tile consists of coordinates x and cells as indices in to the
# cells array. master/slave vertices define a map for gluing in the
# direction. The periodic maps for the remaining dirs are in vertex_
# mappings
return Tile(deepcopy(x), deepcopy(cells),
np.copy(master_vertices), np.copy(slave_vertices),
[vm.copy() for vm in vertex_mappings],
data.copy())
|
[
"copy.deepcopy",
"collections.namedtuple",
"numpy.copy"
] |
[((397, 497), 'collections.namedtuple', 'namedtuple', (['"""Tile"""', "('coords', 'cells', 'master_vertices', 'slave_vertices', 'mappings', 'data')"], {}), "('Tile', ('coords', 'cells', 'master_vertices', 'slave_vertices',\n 'mappings', 'data'))\n", (407, 497), False, 'from collections import namedtuple\n'), ((916, 927), 'copy.deepcopy', 'deepcopy', (['x'], {}), '(x)\n', (924, 927), False, 'from copy import deepcopy\n'), ((929, 944), 'copy.deepcopy', 'deepcopy', (['cells'], {}), '(cells)\n', (937, 944), False, 'from copy import deepcopy\n'), ((962, 986), 'numpy.copy', 'np.copy', (['master_vertices'], {}), '(master_vertices)\n', (969, 986), True, 'import numpy as np\n'), ((988, 1011), 'numpy.copy', 'np.copy', (['slave_vertices'], {}), '(slave_vertices)\n', (995, 1011), True, 'import numpy as np\n')]
|
import numpy as np
import torch
def get_coordinate_tensors(x_max, y_max):
x_map = np.tile(np.arange(x_max), (y_max,1))/x_max*2 - 1.0
y_map = np.tile(np.arange(y_max), (x_max,1)).T/y_max*2 - 1.0
x_map_tensor = torch.from_numpy(x_map.astype(np.float32)).cuda()
y_map_tensor = torch.from_numpy(y_map.astype(np.float32)).cuda()
return x_map_tensor, y_map_tensor
def get_center(part_map, self_referenced=False):
h,w = part_map.shape
x_map, y_map = get_coordinate_tensors(h,w)
x_center = (part_map * x_map).sum()
y_center = (part_map * y_map).sum()
if self_referenced:
x_c_value = float(x_center.cpu().detach())
y_c_value = float(y_center.cpu().detach())
x_center = (part_map * (x_map - x_c_value)).sum() + x_c_value
y_center = (part_map * (y_map - y_c_value)).sum() + y_c_value
return x_center, y_center
def get_centers(part_maps, detach_k=True, epsilon=1e-3, self_ref_coord=False):
C,H,W = part_maps.shape
centers = []
for c in range(C):
part_map = part_maps[c,:,:] + epsilon
k = part_map.sum()
part_map_pdf = part_map/k
x_c, y_c = get_center(part_map_pdf, self_ref_coord)
centers.append(torch.stack((x_c, y_c), dim=0).unsqueeze(0))
return torch.cat(centers, dim=0)
def batch_get_centers(pred_softmax):
B,C,H,W = pred_softmax.shape
centers_list = []
for b in range(B):
centers_list.append(get_centers(pred_softmax[b]).unsqueeze(0))
return torch.cat(centers_list, dim=0)
|
[
"torch.stack",
"numpy.arange",
"torch.cat"
] |
[((1278, 1303), 'torch.cat', 'torch.cat', (['centers'], {'dim': '(0)'}), '(centers, dim=0)\n', (1287, 1303), False, 'import torch\n'), ((1503, 1533), 'torch.cat', 'torch.cat', (['centers_list'], {'dim': '(0)'}), '(centers_list, dim=0)\n', (1512, 1533), False, 'import torch\n'), ((96, 112), 'numpy.arange', 'np.arange', (['x_max'], {}), '(x_max)\n', (105, 112), True, 'import numpy as np\n'), ((1222, 1252), 'torch.stack', 'torch.stack', (['(x_c, y_c)'], {'dim': '(0)'}), '((x_c, y_c), dim=0)\n', (1233, 1252), False, 'import torch\n'), ((159, 175), 'numpy.arange', 'np.arange', (['y_max'], {}), '(y_max)\n', (168, 175), True, 'import numpy as np\n')]
|
import os
import re
import numpy as np
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, \
UnexpectedAlertPresentException, StaleElementReferenceException, \
NoSuchWindowException, WebDriverException
from enum import Enum, auto
from threading import Thread, Event
from typing import Callable, Dict, List, Tuple
_listeners: Dict['OthelloListenerCallback', Callable] = {}
_listeners_cache: Dict['OthelloListenerCallback', Tuple] = {}
class ListenerCallback(Enum):
USER_LOGGED = auto()
IN_ROOM = auto()
IN_GAME = auto()
PLAYERS = auto()
PLAYER_COLOR = auto()
PLAYERS_POINTS = auto()
BOARD = auto()
PLAYERS_TIME = auto()
CURRENT_PLAYER = auto()
GAME_PROGRESS = auto()
IS_FINISHED = auto()
CLOSE = auto()
class ListenerCallbackRegister:
def register_listener(type_: ListenerCallback):
global _listeners
def decorator(function):
_listeners[type_] = function
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except UnexpectedAlertPresentException:
return None
except StaleElementReferenceException:
return None
return wrapper
return decorator
@register_listener(ListenerCallback.USER_LOGGED)
def _user_logged_listener(driver):
try:
driver.find_element_by_xpath('//body[not(contains(@class, "not_logged_user"))]')
return driver.execute_script('return document.getElementById("connected_username").innerText')
except NoSuchElementException:
return None
@register_listener(ListenerCallback.IN_ROOM)
def _in_room_logged_listener(driver):
return bool(re.match(r'.+/table\?table=\d+', driver.current_url))
@register_listener(ListenerCallback.IN_GAME)
def _in_game_logged_listener(driver):
return bool(re.match(r'.+/reversi\?table=\d+', driver.current_url))
@register_listener(ListenerCallback.CURRENT_PLAYER)
def _current_player_logged_listener(driver):
xpath = '//*[@class="emblemwrap" and contains(@style, "display: block;") ' \
'and contains(@id, "active")]/following::div[@class="player-name"]'
try:
element = driver.find_element_by_xpath(xpath)
return element and element.text
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYERS)
def _players_listener(driver):
xpath = '//*[contains(@class, "player-name")]//a'
try:
elements = driver.find_elements_by_xpath(xpath)
return tuple([element.text for element in elements])
except NoSuchElementException:
return None
@register_listener(ListenerCallback.BOARD)
def _board_listener(driver):
try:
board = np.zeros((8, 8), dtype=int)
discs_root = driver.find_element_by_id('discs')
discs = {}
for disc_el in discs_root.find_elements_by_class_name('disc'):
player = -1 if 'disccolor_ffffff' in disc_el.get_attribute('class') else 1
position = disc_el.get_attribute('id').split('_')[1]
position = int(position[1]) - 1, int(position[0]) - 1
board[position[0], position[1]] = player
return board
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYERS_POINTS)
def _points_listener(driver):
try:
players = driver.find_elements_by_xpath('//*[contains(@class, "player-name")]//a')
players = [p.text for p in players]
points = driver.execute_script('return Array.prototype.map.call(document.querySelectorAll(".player_score_value"),(item) => item.innerText)')
points = map(int, points)
return dict(zip(players, points))
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYERS_TIME)
def _players_time_listener(driver):
try:
players = driver.find_elements_by_xpath('//*[contains(@class, "player-name")]//a')
players = [p.text for p in players]
times = driver.execute_script('return Array.prototype.map.call(document.querySelectorAll(".timeToThink"),(item) => item.innerText)')
return dict(zip(players, times))
except NoSuchElementException:
return None
@register_listener(ListenerCallback.PLAYER_COLOR)
def _player_color_listener(driver):
try:
xpath = '//*[contains(@class, "player-name")]//a'
logged_player_style = driver.find_element_by_xpath(xpath).get_attribute('style')
return 1 if logged_player_style == 'color: rgb(0, 0, 0);' else -1
except NoSuchElementException:
return None
@register_listener(ListenerCallback.IS_FINISHED)
def _is_finished_listener(driver):
try:
driver.find_element_by_id('createNew_btn')
return True
except NoSuchElementException:
return None
@register_listener(ListenerCallback.GAME_PROGRESS)
def _game_progress_listener(driver):
try:
element = driver.find_element_by_id('pr_gameprogression')
return element and element.text
except NoSuchElementException:
return None
class OthelloListener(Thread):
HOME_PAGE = 'https://en.boardgamearena.com/account'
def __init__(self):
self._driver = None
self._stop_event = Event()
self._callbacks: Dict[OthelloListenerCallback, List[Callable]] = {}
super().__init__(daemon=True)
def run(self):
options = webdriver.ChromeOptions()
options.add_argument('--lang=en')
if os.name == 'nt':
executable_path = './chromedriver.exe'
else:
executable_path = './chromedriver'
self._driver = webdriver.Chrome(executable_path=executable_path, options=options)
self._driver.get(OthelloListener.HOME_PAGE)
self._listener()
self._driver.quit()
def register_callback(self, type_: 'ListenerCallback', callback: Callable):
if type_ not in self._callbacks:
self._callbacks[type_] = []
self._callbacks[type_].append(callback)
def unregister_callback(self, callback: Callable):
self._callbacks[type_].remove(callback)
def _listener(self):
global _listeners_cache
while not self._stop_event.is_set():
for type_ in ListenerCallback:
if type_ in self._callbacks and type_ in _listeners:
listener = _listeners[type_]
self._driver.implicitly_wait(0)
try:
result = listener(self._driver)
except NoSuchWindowException:
self._stop_event.set()
break
except WebDriverException:
self._stop_event.set()
break
cache_result = _listeners_cache.get(type_)
cache_result = cache_result and cache_result[1]
if isinstance(result, np.ndarray):
results_are_equals = np.all(result == cache_result)
else:
results_are_equals = result == cache_result
callback_params = tuple([type_] + [result])
if result is not None and not results_are_equals:
self._run_callbacks(type_, callback_params)
_listeners_cache[type_] = callback_params
if ListenerCallback.CLOSE in self._callbacks:
self._run_callbacks(ListenerCallback.CLOSE, (ListenerCallback.CLOSE, None))
def _run_callbacks(self, type_: 'ListenerCallback', callback_params):
if type_ in self._callbacks:
for callback in self._callbacks[type_]:
Thread(target=callback, args=callback_params, daemon=True).start()
def callback(event, result):
print(f'Event: {event}. Result: {repr(result)}')
if __name__ == '__main__':
listener = OthelloListener()
listener.start()
listener.register_callback(ListenerCallback.USER_LOGGED, callback)
listener.register_callback(ListenerCallback.IN_ROOM, callback)
listener.register_callback(ListenerCallback.CURRENT_PLAYER, callback)
listener.register_callback(ListenerCallback.BOARD, callback)
listener.register_callback(ListenerCallback.PLAYERS_POINTS, callback)
listener.register_callback(ListenerCallback.PLAYERS, callback)
listener.register_callback(ListenerCallback.PLAYER_COLOR, callback)
listener.register_callback(ListenerCallback.PLAYERS_TIME, callback)
listener.register_callback(ListenerCallback.IS_FINISHED, callback)
while True:
pass
|
[
"threading.Thread",
"numpy.zeros",
"re.match",
"threading.Event",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"enum.auto",
"numpy.all"
] |
[((539, 545), 'enum.auto', 'auto', ([], {}), '()\n', (543, 545), False, 'from enum import Enum, auto\n'), ((560, 566), 'enum.auto', 'auto', ([], {}), '()\n', (564, 566), False, 'from enum import Enum, auto\n'), ((581, 587), 'enum.auto', 'auto', ([], {}), '()\n', (585, 587), False, 'from enum import Enum, auto\n'), ((602, 608), 'enum.auto', 'auto', ([], {}), '()\n', (606, 608), False, 'from enum import Enum, auto\n'), ((628, 634), 'enum.auto', 'auto', ([], {}), '()\n', (632, 634), False, 'from enum import Enum, auto\n'), ((656, 662), 'enum.auto', 'auto', ([], {}), '()\n', (660, 662), False, 'from enum import Enum, auto\n'), ((675, 681), 'enum.auto', 'auto', ([], {}), '()\n', (679, 681), False, 'from enum import Enum, auto\n'), ((701, 707), 'enum.auto', 'auto', ([], {}), '()\n', (705, 707), False, 'from enum import Enum, auto\n'), ((729, 735), 'enum.auto', 'auto', ([], {}), '()\n', (733, 735), False, 'from enum import Enum, auto\n'), ((756, 762), 'enum.auto', 'auto', ([], {}), '()\n', (760, 762), False, 'from enum import Enum, auto\n'), ((781, 787), 'enum.auto', 'auto', ([], {}), '()\n', (785, 787), False, 'from enum import Enum, auto\n'), ((800, 806), 'enum.auto', 'auto', ([], {}), '()\n', (804, 806), False, 'from enum import Enum, auto\n'), ((5702, 5709), 'threading.Event', 'Event', ([], {}), '()\n', (5707, 5709), False, 'from threading import Thread, Event\n'), ((5862, 5887), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (5885, 5887), False, 'from selenium import webdriver\n'), ((6093, 6159), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'executable_path', 'options': 'options'}), '(executable_path=executable_path, options=options)\n', (6109, 6159), False, 'from selenium import webdriver\n'), ((1821, 1874), 're.match', 're.match', (['""".+/table\\\\?table=\\\\d+"""', 'driver.current_url'], {}), "('.+/table\\\\?table=\\\\d+', driver.current_url)\n", (1829, 1874), False, 'import re\n'), ((1991, 2046), 're.match', 're.match', (['""".+/reversi\\\\?table=\\\\d+"""', 'driver.current_url'], {}), "('.+/reversi\\\\?table=\\\\d+', driver.current_url)\n", (1999, 2046), False, 'import re\n'), ((2970, 2997), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {'dtype': 'int'}), '((8, 8), dtype=int)\n', (2978, 2997), True, 'import numpy as np\n'), ((7465, 7495), 'numpy.all', 'np.all', (['(result == cache_result)'], {}), '(result == cache_result)\n', (7471, 7495), True, 'import numpy as np\n'), ((8178, 8236), 'threading.Thread', 'Thread', ([], {'target': 'callback', 'args': 'callback_params', 'daemon': '(True)'}), '(target=callback, args=callback_params, daemon=True)\n', (8184, 8236), False, 'from threading import Thread, Event\n')]
|
import pygame
from pygame.locals import *
from numpy import reshape
import sys
import traceback
import random
import math
from .game import Game
def rndint(x):
return int(round(x))
def clamp(x, minimum, maximum):
if x < minimum:
return minimum
if x > maximum:
return maximum
return x
class Pong(Game):
PADDLE_SPEED = 300 * 2
BALL_SPEED = 200.0 * 2
NO_REWARD = 0
ENEMY_SCORE_REWARD = -100
PONG_REWARD = 10 # given with time_reward
SCORE_REWARD = 0
CENTER_REWARD = 0
OUTPUT_SHAPE = (1, 4)
def __init__(self, key_bindings, max_score):
super(Pong, self).__init__(key_bindings, 800, 600, "Pong - SI")
self._font = pygame.font.SysFont("Times New Roman", 18)
self._max_score = max_score
self.start()
def start(self):
self._dt = 1.0 / 60.0
self._done = False
self.end = False # to testing episodes
self._ball = Pong.Ball(
self._screen_size[0] / 2, self._screen_size[1] / 2, Pong.BALL_SPEED)
self._player = Pong.Player((0, 255, 0), Pong.Paddle(
self._screen_size[0] - 5 - 10, self._screen_size[1] / 2 - 30, 10, 100, K_DOWN, K_UP))
self._bot = Pong.Bot((0, 0, 255), Pong.Paddle(
5, self._screen_size[1] / 2 - 30, 10, 450, K_s, K_w))
self._clock = pygame.time.Clock()
def update_clock(self):
self._clock.tick(60)
self._dt = 1.0 / clamp(self._clock.get_fps(), 30, 90)
@property
def state(self):
x = self._ball.pos['x'] // (self._screen_size[0] // 4)
y = self._ball.pos['y'] // (self._screen_size[1] // 4)
bpos = y * 4 + x + 1
state = [self._player._paddle._pos['y'], bpos,
self._ball.speed['x'], self._ball.speed['y']]
return reshape(state, Pong.OUTPUT_SHAPE)
def update(self):
for event in pygame.event.get():
if event.type == QUIT:
self._done = True
self.end = True
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self._done = True
if self._player.score == self._max_score or self._bot.score == self._max_score:
self._done = True
restart = False
given = False
reward = Pong.NO_REWARD
for _ in range(20):
self._ball.update(self._dt / 20)
if self._ball.pos['x'] < 0:
self._bot.add_score()
reward = Pong.SCORE_REWARD
restart = True
elif self._ball.pos['x'] > self._screen_size[0]:
self._player.add_score()
restart = True
reward = Pong.ENEMY_SCORE_REWARD
#reward *= (self._player._paddle._pos['y'] -
# self._ball.pos['y'])/self._screen_size[1]
if self._ball.pos['y'] < 0 or self._ball.pos['y'] > self._screen_size[1]:
self._ball.pos['y'] = clamp(
self._ball.pos['y'], 0, self._screen_size[1])
self._ball.speed['y'] *= -1
if restart:
self._ball = Pong.Ball(
self._screen_size[0] / 2, self._screen_size[1] / 2, Pong.BALL_SPEED)
else:
tmp = self._player.collide(self._ball)
if tmp > Pong.NO_REWARD and not given:
given = True
reward = Pong.PONG_REWARD
self._bot.collide(self._ball)
return reward
def draw(self):
self.surface.fill((0, 0, 0))
self._ball.draw()
self._bot.draw()
self._player.draw()
p1_score_text = self._font.render(
"Score " + str(self._player.score), True, (255, 255, 255))
p2_score_text = self._font.render(
"Score " + str(self._bot.score), True, (255, 255, 255))
self._surface.blit(p1_score_text, (20, 20))
self._surface.blit(
p2_score_text, (self._screen_size[0] - p2_score_text.get_width() - 20, 20))
pygame.display.flip()
def execute(self, action):
keys = pygame.key.get_pressed()
s = self._player._paddle._pos['y']
self._player.update(self._dt, key=self._key_bindings[action])
e = self._player._paddle._pos['y']
self._bot.update(self._dt, self._ball.pos['y'])
self._ball.update(self._dt)
reward = self.update()
if s - e == 0:
reward -= 1
return self.state, reward, self.done
class Paddle:
def __init__(self, x, y, w, h, key_d, key_u):
self._pos = {'x': x, 'y': y}
self._dim = {'width': w, 'height': h}
self.key_d = key_d
self.key_u = key_u
def move(self, speed, dt):
self._pos['y'] = clamp(
self._pos['y'] - dt * speed, 0, Pong._screen_size[1] - self._dim['height'])
def update_with_key(self, key, dt):
if self.key_d == key:
self.move(-1 * Pong.PADDLE_SPEED, dt)
elif self.key_u == key:
self.move(Pong.PADDLE_SPEED, dt)
def update_with_keys(self, keys, dt):
if keys[self.key_d]:
self.move(-1 * Pong.PADDLE_SPEED, dt)
elif keys[self.key_u]:
self.move(Pong.PADDLE_SPEED, dt)
def collide(self, ball):
reward = Pong.NO_REWARD
if ball.pos['x'] > self._pos['x'] and ball.pos['x'] < self._pos['x'] + self._dim['width'] and \
ball.pos['y'] > self._pos['y'] and ball.pos['y'] < self._pos['y'] + self._dim['height']:
dist_lrdu = [
ball.pos['x'] - self._pos['x'],
(self._pos['x'] + self._dim['width']) - ball.pos['x'],
(self._pos['y'] + self._dim['height']) - ball.pos['y'],
ball.pos['y'] - self._pos['y'],
]
reward = Pong.PONG_REWARD
dist_min = min(dist_lrdu)
if dist_min == dist_lrdu[0]:
ball.speed['x'] = -abs(ball.speed['x'])
elif dist_min == dist_lrdu[1]:
ball.speed['x'] = abs(ball.speed['x'])
elif dist_min == dist_lrdu[2]:
ball.speed['y'] = abs(ball.speed['y'])
elif dist_min == dist_lrdu[3]:
ball.speed['y'] = -abs(ball.speed['y'])
return reward
def draw(self, color):
pygame.draw.rect(Pong._surface, color,
(self._pos['x'], self._pos['y'], self._dim['width'], self._dim['height']), 0)
pygame.draw.rect(Pong._surface, (255, 255, 255),
(self._pos['x'], self._pos['y'], self._dim['width'], self._dim['height']), 1)
class Player:
def __init__(self, color, paddle):
self._score = 0
self._color = color
self._paddle = paddle
def add_score(self):
self._score += 1
@property
def score(self):
return self._score
def draw(self):
self._paddle.draw(self._color)
def update(self, dt, key=None, keys=None):
if key is not None:
self._paddle.update_with_key(key, dt)
elif keys is not None:
self._paddle.update_with_keys(keys, dt)
def collide(self, ball):
return self._paddle.collide(ball)
class Bot(Player):
COUNT = 0
SPEED = 0
def update(self, dt, poy):
if self._paddle._pos['y'] > poy:
self.SPEED = 1.3
else:
self.SPEED = -1.3
self._paddle.move(1.5 * self.SPEED * Pong.PADDLE_SPEED, dt)
self.COUNT += 1
class Ball:
def __init__(self, x, y, speed):
self.pos = {'x': x, 'y': y}
angle = math.pi / 2
while abs(math.cos(angle)) < 0.2 or abs(math.cos(angle)) > 0.8:
angle = math.radians(random.randint(0, 360))
self.speed = {'x': speed *
math.cos(angle), 'y': speed * math.sin(angle)}
self.radius = 4
def update(self, dt):
self.pos['x'] += dt * self.speed['x']
self.pos['y'] += dt * self.speed['y']
def draw(self):
pygame.draw.circle(Pong._surface, (255, 255, 255), [rndint(self.pos['x']), rndint(self.pos['y'])],
self.radius)
|
[
"random.randint",
"pygame.font.SysFont",
"pygame.event.get",
"pygame.draw.rect",
"math.sin",
"pygame.display.flip",
"numpy.reshape",
"math.cos",
"pygame.time.Clock",
"pygame.key.get_pressed"
] |
[((701, 743), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Times New Roman"""', '(18)'], {}), "('Times New Roman', 18)\n", (720, 743), False, 'import pygame\n'), ((1344, 1363), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1361, 1363), False, 'import pygame\n'), ((1808, 1841), 'numpy.reshape', 'reshape', (['state', 'Pong.OUTPUT_SHAPE'], {}), '(state, Pong.OUTPUT_SHAPE)\n', (1815, 1841), False, 'from numpy import reshape\n'), ((1886, 1904), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1902, 1904), False, 'import pygame\n'), ((4074, 4095), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4093, 4095), False, 'import pygame\n'), ((4143, 4167), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (4165, 4167), False, 'import pygame\n'), ((6533, 6653), 'pygame.draw.rect', 'pygame.draw.rect', (['Pong._surface', 'color', "(self._pos['x'], self._pos['y'], self._dim['width'], self._dim['height'])", '(0)'], {}), "(Pong._surface, color, (self._pos['x'], self._pos['y'],\n self._dim['width'], self._dim['height']), 0)\n", (6549, 6653), False, 'import pygame\n'), ((6691, 6822), 'pygame.draw.rect', 'pygame.draw.rect', (['Pong._surface', '(255, 255, 255)', "(self._pos['x'], self._pos['y'], self._dim['width'], self._dim['height'])", '(1)'], {}), "(Pong._surface, (255, 255, 255), (self._pos['x'], self._pos\n ['y'], self._dim['width'], self._dim['height']), 1)\n", (6707, 6822), False, 'import pygame\n'), ((8083, 8105), 'random.randint', 'random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (8097, 8105), False, 'import random\n'), ((8172, 8187), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (8180, 8187), False, 'import math\n'), ((8202, 8217), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (8210, 8217), False, 'import math\n'), ((7992, 8007), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (8000, 8007), False, 'import math\n'), ((8022, 8037), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (8030, 8037), False, 'import math\n')]
|
import os
import unittest
import warnings
# prevent excessive warning logs
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import random
from sklearn.model_selection import train_test_split
from finetune import ComparisonRegressor
class TestComparisonRegression(unittest.TestCase):
n_sample = 100
def default_config(self, **kwargs):
d = dict(
batch_size=2,
max_length=16,
n_epochs=3,
val_size=0.,
l2_reg=0.,
low_memory_mode=True,
)
d.update(kwargs)
return d
def setUp(self):
random.seed(42)
np.random.seed(42)
def test_reasonable_predictions(self):
"""
Ensure model training does not error out
Ensure model returns predictions of the right type
Test model loss at least outperforms naive baseline
"""
model = ComparisonRegressor(**self.default_config())
# fake dataset generation
animals = ["dog", "cat", "horse", "cow", "pig", "sheep", "goat", "chicken", "guinea pig", "donkey", "turkey", "duck", "camel", "goose", "llama", "rabbit", "fox"]
numbers = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen"]
n_per = 150
similar = []
different = []
for dataset in [animals, numbers]:
for i in range(n_per // 2):
similar.append([random.choice(dataset), random.choice(dataset)])
for i in range(n_per):
different.append([random.choice(animals), random.choice(numbers)])
targets = np.asarray([1] * len(similar) + [0] * len(different))
data = similar + different
x_tr, x_te, t_tr, t_te = train_test_split(data, targets, test_size=0.3, random_state=42)
model.finetune(x_tr, t_tr)
predictions = model.predict(x_te)
mse = np.mean([(pred - true)**2 for pred, true in zip(predictions, t_te)])
naive_baseline = max(np.mean(targets == 1), np.mean(targets == 0))
naive_baseline_mse = np.mean([(naive_baseline - true)**2 for true in t_te])
self.assertIsInstance(predictions, np.ndarray)
self.assertIsInstance(predictions[0], np.floating)
# whether it is float32 or float64 depends on whether it is run on cpu or gpu.
self.assertGreater(naive_baseline_mse, mse)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.random.seed",
"warnings.filterwarnings",
"sklearn.model_selection.train_test_split",
"random.choice",
"numpy.mean",
"random.seed"
] |
[((76, 109), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (99, 109), False, 'import warnings\n'), ((2513, 2528), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2526, 2528), False, 'import unittest\n'), ((652, 667), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (663, 667), False, 'import random\n'), ((676, 694), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (690, 694), True, 'import numpy as np\n'), ((1842, 1905), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'targets'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(data, targets, test_size=0.3, random_state=42)\n', (1858, 1905), False, 'from sklearn.model_selection import train_test_split\n'), ((2171, 2229), 'numpy.mean', 'np.mean', (['[((naive_baseline - true) ** 2) for true in t_te]'], {}), '([((naive_baseline - true) ** 2) for true in t_te])\n', (2178, 2229), True, 'import numpy as np\n'), ((2096, 2117), 'numpy.mean', 'np.mean', (['(targets == 1)'], {}), '(targets == 1)\n', (2103, 2117), True, 'import numpy as np\n'), ((2119, 2140), 'numpy.mean', 'np.mean', (['(targets == 0)'], {}), '(targets == 0)\n', (2126, 2140), True, 'import numpy as np\n'), ((1651, 1673), 'random.choice', 'random.choice', (['animals'], {}), '(animals)\n', (1664, 1673), False, 'import random\n'), ((1675, 1697), 'random.choice', 'random.choice', (['numbers'], {}), '(numbers)\n', (1688, 1697), False, 'import random\n'), ((1541, 1563), 'random.choice', 'random.choice', (['dataset'], {}), '(dataset)\n', (1554, 1563), False, 'import random\n'), ((1565, 1587), 'random.choice', 'random.choice', (['dataset'], {}), '(dataset)\n', (1578, 1587), False, 'import random\n')]
|
import torch
import numpy as np
import data
import matplotlib.pyplot as plt
import copy
class PTDeep(torch.nn.Module):
def __init__(self, conf, activation_f, param_lambda=1e-4):
"""Arguments:
:param conf: network architecture - nr_neurons in each layer
"""
self.conf = conf
self.nr_layers = len(conf)
self.activation_f = activation_f # activation function, e.g. softmax
self.D = conf[0] # nr_inputs
self.C = conf[self.nr_layers - 1] # nr_classes
super(PTDeep, self).__init__()
# initalize parameters
w = [torch.nn.Parameter(0.01 * torch.randn(conf[i], conf[i + 1])) for i in range(self.nr_layers - 1)]
b = [torch.nn.Parameter(torch.zeros(conf[i + 1])) for i in range(self.nr_layers - 1)]
self.weights = torch.nn.ParameterList(w)
self.biases = torch.nn.ParameterList(b)
self.probs = None
self.loss = None
self.param_lambda = param_lambda
self.count = None
def forward(self, X):
layer_out = torch.mm(X, self.weights[0]) + self.biases[0] # N x C
for i in range(1, self.nr_layers - 1):
h = self.activation_f(layer_out) # N x C
layer_out = torch.mm(h, self.weights[i]) + self.biases[i] # N x C
self.probs = torch.nn.functional.softmax(layer_out) # N x C
def get_loss(self, X, Y_):
N = X.shape[0]
logprobs = torch.log(self.probs) # N x C
# regularization
reg = torch.sum(torch.Tensor([torch.norm(x) for x in self.weights]))
self.loss = - torch.mean(logprobs[range(N), Y_[range(N)]]) + reg * self.param_lambda
def count_params(self):
counter = 0
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
counter += sum(param.shape)
self.count = counter
print("Total count: ", counter)
def train(model, X, Y_, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.SGD(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
return
def train_mb(model, X, Y_, param_niter=20001, param_delta=0.1, batch_size=500):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.SGD(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
indices = list() # choose
while len(indices) < batch_size:
x = np.random.randint(0, N-1)
if x not in indices:
indices.append(x)
x_train = X[indices]
y_train = Y_[indices]
model.forward(x_train)
model.get_loss(x_train, y_train)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
return
# similar to train, but reports testset performanse after each iteration
def early_stopping_train(model, X, Y_, x_test, y_test, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
best_model, best_accuracy = None, 0
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.SGD(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
# evaluate the model on the test dataset
probs = eval(model, x_test)
Y = np.argmax(probs, axis=1)
accuracy, recall, matrix = data.eval_perf_multi(Y, y_test)
print("Current accuracy on testset: ", accuracy)
if accuracy > best_accuracy:
best_model = copy.copy(model)
best_accuracy = accuracy
return best_model
def train_adam(model, X, Y_, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.Adam(model.parameters(), lr=param_delta)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
return
# adam learner with variable learning rate
def train_variable_adam(model, X, Y_, param_niter=20001, param_delta=0.1):
"""Arguments:
- X: model inputs [NxD], type: torch.Tensor
- Y_: ground truth [Nx1], type: torch.Tensor
- param_niter: number of training iterations
- param_delta: learning rate
"""
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
optimizer = torch.optim.Adam(model.parameters(), lr=param_delta)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=1-1e-4)
prev_loss, count = None, 0
for i in range(param_niter):
model.forward(X)
model.get_loss(X, Y_)
model.loss.backward()
if i % 1 == 0:
print("iteration {}: loss {}".format(i, model.loss))
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if prev_loss is not None: # exit if no move was made for 100 iterations
if abs(model.loss - prev_loss) < 1e-9:
count += 1
else:
count = 0
if count > 100:
break
prev_loss = model.loss
return
def eval(model, X):
""" Arguments:
:param X: type: PTLogreg
:param model: actual datapoints [NxD], type: np.array
Returns: predicted class probabilites [NxC], type: np.array
"""
torch_X = torch.Tensor(X)
det_w, det_b = list(), list()
[det_w.append(torch.Tensor.detach(x)) for x in model.weights]
[det_b.append(torch.Tensor.detach(x)) for x in model.biases]
layer_out = torch.mm(torch_X, det_w[0]) + det_b[0] # N x C
for i in range(1, model.nr_layers - 1):
h = model.activation_f(layer_out) # N x C
layer_out = torch.mm(h, det_w[i]) + det_b[i] # N x C
model.probs = torch.nn.functional.softmax(layer_out) # N x C
return torch.Tensor.numpy(model.probs) # N x C
def logreg_decfun(model):
'''
Wrapper that feeds graph_surface function
'''
def classify(X):
probs = eval(model, X)
return np.argmax(probs, axis=1)
return classify
# recreating logreg from pt_logreg module by using [2, 3] architecture
def task1():
# get the training dataset
nr_classes, nr_samples = 3, 100
# nr_classes, nr_samples per class
X, Y_ = data.sample_gauss_2d(nr_classes, nr_samples)
N, D = X.shape[0], X.shape[1]
C = max(Y_) + 1 # nr_classes
# train the model
model = PTDeep([D, C], torch.relu)
train(model, torch.Tensor(X), Y_)
# evaluate the model on the training dataset
probs = eval(model, X)
Y = np.argmax(probs, axis=1)
# report performance
accuracy, recall, matrix = data.eval_perf_multi(Y, Y_)
print("Accuracy: ", accuracy, "\nRecall: ", recall, "\nMatrix:\n", matrix)
# plot graph
decfun = logreg_decfun(model)
rect = (np.min(X, axis=0), np.max(X, axis=0))
data.graph_surface(decfun, rect, offset=0.5)
nr = nr_classes * nr_samples
data.graph_data(X, Y_.reshape(nr, ), Y.reshape(nr, ), special=[])
# show the plot
model.count_params()
plt.show()
# testing on more difficult datasets and various architectures
def experiment(architecture, data_conf, function):
# get the training dataset
nr_components, nr_classes, nr_samples = data_conf
# nr_classes, nr_samples per class
X, Y_ = data.sample_gmm_2d(nr_components, nr_classes, nr_samples)
C = max(Y_) + 1 # nr_classes
# train the model
model = PTDeep(architecture, function)
train(model, torch.Tensor(X), Y_)
# evaluate the model on the training dataset
probs = eval(model, X)
Y = np.argmax(probs, axis=1)
# report performance
accuracy, recall, matrix = data.eval_perf_multi(Y, Y_)
print("Accuracy: ", accuracy, "\nRecall: ", recall, "\nMatrix:\n", matrix)
# plot graph
decfun = logreg_decfun(model)
rect = (np.min(X, axis=0), np.max(X, axis=0))
data.graph_surface(decfun, rect, offset=0.5)
nr = nr_components * nr_samples
data.graph_data(X, Y_.reshape(nr, ), Y.reshape(nr, ), special=[])
model.count_params()
print("Experiment done!\nArchitecture: {}\nData_conf: {}".format(architecture, data_conf))
def do_experiments_relu():
confs = [[2, 2], [2, 10, 2], [2, 10, 10, 2]]
data_confs = [[4, 2, 40], [6, 2, 10]]
for conf in confs:
for data_conf in data_confs:
experiment(conf, data_conf, torch.relu)
print("Function: ReLu")
plt.show()
def do_experiments_sigmoid():
confs = [[2, 2], [2, 10, 2], [2, 10, 10, 2]]
data_confs = [[4, 2, 40], [6, 2, 10]]
for conf in confs:
for data_conf in data_confs:
experiment(conf, data_conf, torch.sigmoid)
print("Function: sigmoid")
plt.show()
if __name__ == "__main__":
np.random.seed(100)
# task1() # basic test
do_experiments_sigmoid()
do_experiments_relu()
|
[
"data.graph_surface",
"numpy.random.seed",
"numpy.argmax",
"torch.mm",
"torch.randn",
"data.sample_gmm_2d",
"data.sample_gauss_2d",
"numpy.random.randint",
"numpy.max",
"torch.Tensor",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.ParameterList",
"torch.zeros",
"torch.log",
"matplotlib.pyplot.show",
"torch.Tensor.numpy",
"torch.norm",
"numpy.min",
"data.eval_perf_multi",
"copy.copy",
"torch.nn.functional.softmax",
"torch.Tensor.detach"
] |
[((7247, 7314), 'torch.optim.lr_scheduler.ExponentialLR', 'torch.optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': '(1 - 0.0001)'}), '(optimizer, gamma=1 - 0.0001)\n', (7285, 7314), False, 'import torch\n'), ((8145, 8160), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (8157, 8160), False, 'import torch\n'), ((8567, 8605), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['layer_out'], {}), '(layer_out)\n', (8594, 8605), False, 'import torch\n'), ((8627, 8658), 'torch.Tensor.numpy', 'torch.Tensor.numpy', (['model.probs'], {}), '(model.probs)\n', (8645, 8658), False, 'import torch\n'), ((9076, 9120), 'data.sample_gauss_2d', 'data.sample_gauss_2d', (['nr_classes', 'nr_samples'], {}), '(nr_classes, nr_samples)\n', (9096, 9120), False, 'import data\n'), ((9374, 9398), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (9383, 9398), True, 'import numpy as np\n'), ((9456, 9483), 'data.eval_perf_multi', 'data.eval_perf_multi', (['Y', 'Y_'], {}), '(Y, Y_)\n', (9476, 9483), False, 'import data\n'), ((9669, 9713), 'data.graph_surface', 'data.graph_surface', (['decfun', 'rect'], {'offset': '(0.5)'}), '(decfun, rect, offset=0.5)\n', (9687, 9713), False, 'import data\n'), ((9867, 9877), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9875, 9877), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10187), 'data.sample_gmm_2d', 'data.sample_gmm_2d', (['nr_components', 'nr_classes', 'nr_samples'], {}), '(nr_components, nr_classes, nr_samples)\n', (10148, 10187), False, 'import data\n'), ((10411, 10435), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (10420, 10435), True, 'import numpy as np\n'), ((10493, 10520), 'data.eval_perf_multi', 'data.eval_perf_multi', (['Y', 'Y_'], {}), '(Y, Y_)\n', (10513, 10520), False, 'import data\n'), ((10706, 10750), 'data.graph_surface', 'data.graph_surface', (['decfun', 'rect'], {'offset': '(0.5)'}), '(decfun, rect, offset=0.5)\n', (10724, 10750), False, 'import data\n'), ((11602, 11621), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (11616, 11621), True, 'import numpy as np\n'), ((821, 846), 'torch.nn.ParameterList', 'torch.nn.ParameterList', (['w'], {}), '(w)\n', (843, 846), False, 'import torch\n'), ((869, 894), 'torch.nn.ParameterList', 'torch.nn.ParameterList', (['b'], {}), '(b)\n', (891, 894), False, 'import torch\n'), ((1318, 1356), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['layer_out'], {}), '(layer_out)\n', (1345, 1356), False, 'import torch\n'), ((1440, 1461), 'torch.log', 'torch.log', (['self.probs'], {}), '(self.probs)\n', (1449, 1461), False, 'import torch\n'), ((5471, 5495), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (5480, 5495), True, 'import numpy as np\n'), ((5531, 5562), 'data.eval_perf_multi', 'data.eval_perf_multi', (['Y', 'y_test'], {}), '(Y, y_test)\n', (5551, 5562), False, 'import data\n'), ((8343, 8370), 'torch.mm', 'torch.mm', (['torch_X', 'det_w[0]'], {}), '(torch_X, det_w[0])\n', (8351, 8370), False, 'import torch\n'), ((8826, 8850), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (8835, 8850), True, 'import numpy as np\n'), ((9268, 9283), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (9280, 9283), False, 'import torch\n'), ((9627, 9644), 'numpy.min', 'np.min', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (9633, 9644), True, 'import numpy as np\n'), ((9646, 9663), 'numpy.max', 'np.max', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (9652, 9663), True, 'import numpy as np\n'), ((10305, 10320), 'torch.Tensor', 'torch.Tensor', (['X'], {}), '(X)\n', (10317, 10320), False, 'import torch\n'), ((10664, 10681), 'numpy.min', 'np.min', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (10670, 10681), True, 'import numpy as np\n'), ((10683, 10700), 'numpy.max', 'np.max', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (10689, 10700), True, 'import numpy as np\n'), ((1061, 1089), 'torch.mm', 'torch.mm', (['X', 'self.weights[0]'], {}), '(X, self.weights[0])\n', (1069, 1089), False, 'import torch\n'), ((3541, 3568), 'numpy.random.randint', 'np.random.randint', (['(0)', '(N - 1)'], {}), '(0, N - 1)\n', (3558, 3568), True, 'import numpy as np\n'), ((5682, 5698), 'copy.copy', 'copy.copy', (['model'], {}), '(model)\n', (5691, 5698), False, 'import copy\n'), ((8213, 8235), 'torch.Tensor.detach', 'torch.Tensor.detach', (['x'], {}), '(x)\n', (8232, 8235), False, 'import torch\n'), ((8279, 8301), 'torch.Tensor.detach', 'torch.Tensor.detach', (['x'], {}), '(x)\n', (8298, 8301), False, 'import torch\n'), ((8506, 8527), 'torch.mm', 'torch.mm', (['h', 'det_w[i]'], {}), '(h, det_w[i])\n', (8514, 8527), False, 'import torch\n'), ((11258, 11268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11266, 11268), True, 'import matplotlib.pyplot as plt\n'), ((11558, 11568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11566, 11568), True, 'import matplotlib.pyplot as plt\n'), ((736, 760), 'torch.zeros', 'torch.zeros', (['conf[i + 1]'], {}), '(conf[i + 1])\n', (747, 760), False, 'import torch\n'), ((1241, 1269), 'torch.mm', 'torch.mm', (['h', 'self.weights[i]'], {}), '(h, self.weights[i])\n', (1249, 1269), False, 'import torch\n'), ((633, 666), 'torch.randn', 'torch.randn', (['conf[i]', 'conf[i + 1]'], {}), '(conf[i], conf[i + 1])\n', (644, 666), False, 'import torch\n'), ((1534, 1547), 'torch.norm', 'torch.norm', (['x'], {}), '(x)\n', (1544, 1547), False, 'import torch\n')]
|
"""Search for rooms where maximizing correlational coefficient leads to
accurate clustering
"""
import multiprocessing
import csv
import sys
from copy import deepcopy
import numpy as np
from ..data_loader import config_loader
from . import strict_ga
def _choose_random_room(total_room_count: int, target_room_count: int):
return np.random.choice(
total_room_count, size=target_room_count, replace=False)
_COUNTER_ = multiprocessing.Value('i', 0, lock=True)
def _eval_room_config(config: config_loader.ColocationConfig):
with _COUNTER_.get_lock():
_COUNTER_.value += 1
sys.stderr.write(
str(_COUNTER_.value) + '/' + str(config.searching_count) + '\n')
_, accuracy, _ = strict_ga.run(config)
return accuracy
def run(config: config_loader.ColocationConfig):
"""Search for accurate room groups
Args:
config (config_loader.ColocationConfig): Configuration
Returns:
Nothing
"""
search_configs = []
np.random.seed(config.seed)
for _ in range(config.searching_count):
new_config = deepcopy(config)
new_config.selected_rooms = list(
_choose_random_room(config.total_room_count, config.room_count))
search_configs.append(new_config)
sys.stderr.write('Loaded all configs\n')
pool = multiprocessing.Pool(1)
accuracies = list(pool.map(_eval_room_config, search_configs, chunksize=8))
with open(
config.base_file_name + 'search_result.csv', 'w',
newline='') as file:
header = [str(i) for i in range(config.room_count)]
header.append('accuracy')
csv_writer: csv.writer = csv.writer(file)
for (conf, accuracy) in zip(search_configs, accuracies):
csv_writer.writerow([*conf.selected_rooms, accuracy])
return 0, 0, {}
|
[
"numpy.random.choice",
"copy.deepcopy",
"numpy.random.seed",
"csv.writer",
"multiprocessing.Value",
"multiprocessing.Pool",
"sys.stderr.write"
] |
[((432, 472), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {'lock': '(True)'}), "('i', 0, lock=True)\n", (453, 472), False, 'import multiprocessing\n'), ((335, 408), 'numpy.random.choice', 'np.random.choice', (['total_room_count'], {'size': 'target_room_count', 'replace': '(False)'}), '(total_room_count, size=target_room_count, replace=False)\n', (351, 408), True, 'import numpy as np\n'), ((996, 1023), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (1010, 1023), True, 'import numpy as np\n'), ((1273, 1313), 'sys.stderr.write', 'sys.stderr.write', (['"""Loaded all configs\n"""'], {}), "('Loaded all configs\\n')\n", (1289, 1313), False, 'import sys\n'), ((1325, 1348), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (1345, 1348), False, 'import multiprocessing\n'), ((1090, 1106), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (1098, 1106), False, 'from copy import deepcopy\n'), ((1667, 1683), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (1677, 1683), False, 'import csv\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : fib.py
# @Date : 2019-02-15
# @Author : luyang(<EMAIL>)
import numpy as np
from prettytable import PrettyTable
def fib(month, produce, young):
rabbits = np.zeros([month, 2])
# 第一个月的起始状况,1对young,0对mature
rabbits[0, 0] = young
rabbits[0, 1] = 0
for i in range(1, month):
# 次月young <- 上月mature * produce
rabbits[i, 0] = produce * rabbits[i - 1, 1]
# 次月mature <- 上月young + 上月mature
rabbits[i, 1] = rabbits[i - 1].sum()
# 返回每个月的状况
return rabbits
def main():
file = 'input/rosalind_fib.txt'
with open(file) as f:
line = f.readline()
young = 1
month = int(line.split()[0])
produce = int(line.split()[1])
rabbits = fib(month, produce, young)
pt = PrettyTable(['month', 'young', 'mature', 'total'])
for i in range(month):
pt.add_row([i + 1, rabbits[i, 0], rabbits[i, 1], rabbits[i].sum()])
print(pt)
if __name__ == "__main__":
main()
|
[
"numpy.zeros",
"prettytable.PrettyTable"
] |
[((225, 245), 'numpy.zeros', 'np.zeros', (['[month, 2]'], {}), '([month, 2])\n', (233, 245), True, 'import numpy as np\n'), ((806, 856), 'prettytable.PrettyTable', 'PrettyTable', (["['month', 'young', 'mature', 'total']"], {}), "(['month', 'young', 'mature', 'total'])\n", (817, 856), False, 'from prettytable import PrettyTable\n')]
|
""" Rasterize
Module to rasterize a shapely polygon to a Numpy Array
Author: <NAME>
(University of Antwerp, Belgium)
"""
import numpy as np
from rasterio.features import rasterize as rasterioRasterize
from shapely.geometry import Polygon, MultiPolygon, Point
from np2Geotiff import *
def rasterize(mpol, res = 1, return_minmaxs = False, save_as_geotiff = False, epsg = 4326):
"""
Function to Rasterize
author: <NAME> (University of Antwerp, Belgium)
Args:
mpol: (Required) Shapely Multi(Polygon) to rasterize
res: (Optional, defaults to 1) float/integer representing the cell size
return_minmaxs: (Optional, defaults to False) True/False boolean to return the coordinates of the min and max of polygon and the coordinates of the top left corner
save_as_geotiff: (Optional, defaults to False) False/directory path string to save a geotiff
epsg: (Optional, defaults to 4326 (WGS84)) Integer of epsg code to to project the geotiff
Returns:
Numpy array with dimensions n x m with 1 on cells covering the original polygons
Coordinate pair with the coordinates of the left top corner of the raster
"""
# make sure it can handle both a Polygon and a MultiPolygon
if type(mpol) == Polygon:
mpol = MultiPolygon([mpol])
# initialize limits
xmin = ymin = float('inf')
xmax = ymax = float('-inf')
# search min and max coordinates along polygons to determine graph limits
for pol in mpol:
xy = np.rot90(pol.exterior.xy)
if np.min(xy[:, 0]) < xmin: xmin = np.min(xy[:, 0])
if np.max(xy[:, 0]) > xmax: xmax = np.max(xy[:, 0])
if np.min(xy[:, 1]) < ymin: ymin = np.min(xy[:, 1])
if np.max(xy[:, 1]) > ymax: ymax = np.max(xy[:, 1])
# raster dimensions
rows = int(np.ceil((ymax - ymin + 1)/res))
cols = int(np.ceil((xmax - xmin +1)/res))
TL = [xmin, ymax] # coordinates of the top left corner of the top left corner cell
# initialize original array
arr = np.zeros((rows, cols))
# loop over all polygons in MultiPolygon
for pol in mpol:
# extract exterior coordinate pairs
xy = np.rot90(pol.exterior.xy)
# edit coordinates so that the relative zerpoint will be (0,0)
xy[:,0] = (xy[:,0] - xmin)/res
xy[:,1] = (xy[:,1] - ymin)/res
# use the rasterio function to rasterize the extior polygon
layer = rasterioRasterize([Polygon(xy)], out_shape=(rows, cols))
# add this to existing Numpy array storing all rasterized polygons
arr += layer
for i in pol.interiors:
# extract exterior coordinate pairs
xy = np.rot90(i.xy)
# edit coordinates so that the relative zeropoint will be (0,0)
xy[:,0] = (xy[:,0] - xmin)/res
xy[:,1] = (xy[:,1] - ymin)/res
# use the rasterio function to rasterize the interior polygon
layer = rasterioRasterize([Polygon(xy)], out_shape=(rows, cols))
layer = np.array(layer, dtype = np.uint8)
# substract this to existing Numpy array storing all rasterized polygons
arr -= layer
# transform to a boolean
arr[arr > 0] = 1
# flip it so north is up if you plot it
arr = np.flip(arr, axis = 0)
if save_as_geotiff:
arr2Geotiff(arr, save_as_geotiff, TL, res, epsg)
if return_minmaxs:
return arr, xmin, ymin, xmax, ymax, TL, res
else:
return arr
|
[
"numpy.flip",
"numpy.ceil",
"shapely.geometry.Polygon",
"numpy.zeros",
"shapely.geometry.MultiPolygon",
"numpy.min",
"numpy.rot90",
"numpy.max",
"numpy.array"
] |
[((2038, 2060), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (2046, 2060), True, 'import numpy as np\n'), ((3295, 3315), 'numpy.flip', 'np.flip', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (3302, 3315), True, 'import numpy as np\n'), ((1301, 1321), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (['[mpol]'], {}), '([mpol])\n', (1313, 1321), False, 'from shapely.geometry import Polygon, MultiPolygon, Point\n'), ((1523, 1548), 'numpy.rot90', 'np.rot90', (['pol.exterior.xy'], {}), '(pol.exterior.xy)\n', (1531, 1548), True, 'import numpy as np\n'), ((1830, 1862), 'numpy.ceil', 'np.ceil', (['((ymax - ymin + 1) / res)'], {}), '((ymax - ymin + 1) / res)\n', (1837, 1862), True, 'import numpy as np\n'), ((1877, 1909), 'numpy.ceil', 'np.ceil', (['((xmax - xmin + 1) / res)'], {}), '((xmax - xmin + 1) / res)\n', (1884, 1909), True, 'import numpy as np\n'), ((2185, 2210), 'numpy.rot90', 'np.rot90', (['pol.exterior.xy'], {}), '(pol.exterior.xy)\n', (2193, 2210), True, 'import numpy as np\n'), ((1561, 1577), 'numpy.min', 'np.min', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (1567, 1577), True, 'import numpy as np\n'), ((1593, 1609), 'numpy.min', 'np.min', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (1599, 1609), True, 'import numpy as np\n'), ((1621, 1637), 'numpy.max', 'np.max', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (1627, 1637), True, 'import numpy as np\n'), ((1653, 1669), 'numpy.max', 'np.max', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (1659, 1669), True, 'import numpy as np\n'), ((1681, 1697), 'numpy.min', 'np.min', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (1687, 1697), True, 'import numpy as np\n'), ((1713, 1729), 'numpy.min', 'np.min', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (1719, 1729), True, 'import numpy as np\n'), ((1741, 1757), 'numpy.max', 'np.max', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (1747, 1757), True, 'import numpy as np\n'), ((1773, 1789), 'numpy.max', 'np.max', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (1779, 1789), True, 'import numpy as np\n'), ((2696, 2710), 'numpy.rot90', 'np.rot90', (['i.xy'], {}), '(i.xy)\n', (2704, 2710), True, 'import numpy as np\n'), ((3045, 3076), 'numpy.array', 'np.array', (['layer'], {'dtype': 'np.uint8'}), '(layer, dtype=np.uint8)\n', (3053, 3076), True, 'import numpy as np\n'), ((2464, 2475), 'shapely.geometry.Polygon', 'Polygon', (['xy'], {}), '(xy)\n', (2471, 2475), False, 'from shapely.geometry import Polygon, MultiPolygon, Point\n'), ((2987, 2998), 'shapely.geometry.Polygon', 'Polygon', (['xy'], {}), '(xy)\n', (2994, 2998), False, 'from shapely.geometry import Polygon, MultiPolygon, Point\n')]
|
import json
import PIL
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import tensor
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
import torchvision.models as models
import argparse
from collections import OrderedDict
import seaborn as sns
import time
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=test_transforms)
train_dataloaders = torch.utils.data.DataLoader(train_data, batch_size=60, shuffle=True)
test_dataloaders = torch.utils.data.DataLoader(test_data, batch_size=32)
valid_dataloaders = torch.utils.data.DataLoader(valid_data, batch_size=32)
def getDevice(gpu=False):
if gpu:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
return device
def validation(model, valid_loader, criterion, gpu=False):
valid_loss = 0
accuracy = 0
device = getDevice(gpu)
# change model to work with either cuda or cpu
model.to(device)
# load images and labels from iterated valid loader
for i, (images, labels) in enumerate(valid_loader):
images, labels = images.to(device), labels.to(device)
output = model.forward(images)
valid_loss += criterion(output, labels).item()
ps = torch.exp(output)
# Calculate accuracy
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return valid_loss, accuracy
def get_pre_trained_model(arch='vgg16'):
model = ""
if arch == 'vgg16':
model = models.vgg16(pretrained=True)
elif arch == 'densenet121':
model = models.densenet121(pretrained=True)
elif arch == 'alexnet':
model = models.alexnet(pretrained=True)
return model
def network(arch='alexnet', lr=0.001, hidden_units=512):
architectures = {"vgg16": 25088, "densenet121": 1024, "alexnet": 9216}
if arch not in architectures:
print('Sorry you can select the following achitectures vgg16,densenet121 or alexnet')
return
# lr = 0.0008
# lr = 0.001
model = get_pre_trained_model(arch)
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(architectures[arch], hidden_units)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(0.05)),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr)
return model, criterion, optimizer
def save_checkpoint(arch='alexnet'):
model, criterion, optimizer = network(arch)
model.class_to_idx = train_data.class_to_idx
torch.save({'structure': arch,
'hidden_layer': 512,
'droupout': 0.5,
'epochs': 3,
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx,
'optimizer_dict': optimizer.state_dict()},
'checkpoint.pth')
return True
def test_network(testloader, model, gpu):
device = getDevice(gpu)
correct = 0
total = 0
model.to(device)
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('\nNetwork accuracy on test network : %d %%' % (100 * correct / total))
def train_network(model, optimizer, criterion, gpu, arch, epochs=3):
device = getDevice(gpu)
# epochs = 3
steps = 0
print_every = 40
# change to gpu mode
model.to(device)
print('Training started ...')
for e in range(epochs):
since = time.time()
running_loss = 0
# Iterating over data to carry out training step
for i, (images, labels) in enumerate(train_dataloaders):
steps += 1
images, labels = images.to(device), labels.to(device)
# Clear the gradients from all Variables
optimizer.zero_grad()
# Forward pass, then backward pass, then update weights
outputs = model.forward(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
# Carrying out validation step
if steps % print_every == 0:
# Set model to eval mode for inference
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
valid_loss, accuracy = validation(model, valid_dataloaders, criterion)
print("Epoch: {}/{} | ".format(e + 1, epochs),
"Training Loss: {:.2f} | ".format(running_loss / print_every),
"Validation Loss: {:.2f} | ".format(valid_loss / len(valid_dataloaders)),
"Validation Accuracy: {:.2f}%".format(accuracy / len(valid_dataloaders) * 100))
running_loss = 0
# Turning training back on
model.train()
print('Training ended ...')
model.class_to_idx = train_data.class_to_idx
# save trained model
torch.save({'structure': arch,
'hidden_layer': 512,
'droupout': 0.5,
'epochs': 3,
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx,
'optimizer_dict': optimizer.state_dict()},
'checkpoint.pth')
test_network(test_dataloaders, model, gpu)
def load_checkpoint(path):
checkpoint = torch.load(path)
structure = checkpoint['structure']
model, _, _ = network(structure)
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def process_image(image):
pil_image = Image.open(image)
process = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
image_tensor = process(pil_image)
# np_image = np.array(pil_image)
return image_tensor
def predict_image(model, image_path, device, topk):
if torch.cuda.is_available():
model.to(device)
image = process_image(image_path)
model.eval()
img = image.unsqueeze_(0)
with torch.no_grad():
output = model.forward(img)
probs = torch.exp(output)
k_prob, k_index = probs.topk(topk)[0], probs.topk(topk)[1]
# convert to list
probs = k_prob.numpy()[0]
k_index_list = k_index.numpy()[0]
indx_to_class = {model.class_to_idx[i]: i for i in model.class_to_idx}
classes = list()
[classes.append(indx_to_class[index]) for index in k_index_list]
return probs, classes
def get_cat_to_name(cat_json):
with open(cat_json, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def get_flower_name(model, image_path, gpu, cat_to_name, topk):
device = getDevice(gpu)
probs, classes = predict_image(model, image_path, device, topk)
print("\n Below are Probabilities from predicted image")
print(probs)
print("\n Below are Probability classes from predicted image")
print(classes)
max_index = np.argmax(probs)
title_from_class = classes[max_index]
cat_to_name = get_cat_to_name(cat_to_name)
probable_names = list()
{probable_names.append(cat_to_name[str(i)]) for i in classes}
flower_name = cat_to_name[str(title_from_class)]
return flower_name, probable_names
|
[
"torch.nn.Dropout",
"numpy.argmax",
"torch.nn.NLLLoss",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomRotation",
"torch.load",
"torch.exp",
"torch.nn.Linear",
"torchvision.transforms.CenterCrop",
"torchvision.models.vgg16",
"torchvision.transforms.RandomHorizontalFlip",
"torch.nn.LogSoftmax",
"torchvision.models.alexnet",
"torchvision.datasets.ImageFolder",
"torch.cuda.is_available",
"torch.max",
"torchvision.transforms.Resize",
"json.load",
"torch.nn.ReLU",
"torchvision.models.densenet121",
"PIL.Image.open",
"time.time",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.ToTensor"
] |
[((1349, 1408), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['train_dir'], {'transform': 'train_transforms'}), '(train_dir, transform=train_transforms)\n', (1369, 1408), False, 'from torchvision import datasets, transforms\n'), ((1421, 1478), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['test_dir'], {'transform': 'test_transforms'}), '(test_dir, transform=test_transforms)\n', (1441, 1478), False, 'from torchvision import datasets, transforms\n'), ((1492, 1550), 'torchvision.datasets.ImageFolder', 'datasets.ImageFolder', (['valid_dir'], {'transform': 'test_transforms'}), '(valid_dir, transform=test_transforms)\n', (1512, 1550), False, 'from torchvision import datasets, transforms\n'), ((1572, 1640), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': '(60)', 'shuffle': '(True)'}), '(train_data, batch_size=60, shuffle=True)\n', (1599, 1640), False, 'import torch\n'), ((1660, 1713), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': '(32)'}), '(test_data, batch_size=32)\n', (1687, 1713), False, 'import torch\n'), ((1734, 1788), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_data'], {'batch_size': '(32)'}), '(valid_data, batch_size=32)\n', (1761, 1788), False, 'import torch\n'), ((3688, 3700), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3698, 3700), False, 'from torch import nn\n'), ((7043, 7059), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (7053, 7059), False, 'import torch\n'), ((7302, 7319), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (7312, 7319), False, 'from PIL import Image\n'), ((7732, 7757), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7755, 7757), False, 'import torch\n'), ((7944, 7961), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (7953, 7961), False, 'import torch\n'), ((8783, 8799), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (8792, 8799), True, 'import numpy as np\n'), ((570, 599), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (595, 599), False, 'from torchvision import datasets, transforms\n'), ((640, 673), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (668, 673), False, 'from torchvision import datasets, transforms\n'), ((714, 747), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (745, 747), False, 'from torchvision import datasets, transforms\n'), ((788, 809), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (807, 809), False, 'from torchvision import datasets, transforms\n'), ((850, 916), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (870, 916), False, 'from torchvision import datasets, transforms\n'), ((1018, 1040), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1035, 1040), False, 'from torchvision import datasets, transforms\n'), ((1080, 1106), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1101, 1106), False, 'from torchvision import datasets, transforms\n'), ((1146, 1167), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1165, 1167), False, 'from torchvision import datasets, transforms\n'), ((1207, 1273), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1227, 1273), False, 'from torchvision import datasets, transforms\n'), ((2440, 2457), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (2449, 2457), False, 'import torch\n'), ((2732, 2761), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2744, 2761), True, 'import torchvision.models as models\n'), ((4414, 4429), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4427, 4429), False, 'import torch\n'), ((5082, 5093), 'time.time', 'time.time', ([], {}), '()\n', (5091, 5093), False, 'import time\n'), ((7878, 7893), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7891, 7893), False, 'import torch\n'), ((8401, 8413), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8410, 8413), False, 'import json\n'), ((2810, 2845), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2828, 2845), True, 'import torchvision.models as models\n'), ((4602, 4628), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (4611, 4628), False, 'import torch\n'), ((7364, 7386), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (7381, 7386), False, 'from torchvision import datasets, transforms\n'), ((7396, 7422), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (7417, 7422), False, 'from torchvision import datasets, transforms\n'), ((7432, 7453), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7451, 7453), False, 'from torchvision import datasets, transforms\n'), ((7463, 7538), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (7483, 7538), False, 'from torchvision import datasets, transforms\n'), ((1869, 1894), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1892, 1894), False, 'import torch\n'), ((2890, 2921), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2904, 2921), True, 'import torchvision.models as models\n'), ((3426, 3470), 'torch.nn.Linear', 'nn.Linear', (['architectures[arch]', 'hidden_units'], {}), '(architectures[arch], hidden_units)\n', (3435, 3470), False, 'from torch import nn\n'), ((3490, 3499), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3497, 3499), False, 'from torch import nn\n'), ((3522, 3538), 'torch.nn.Dropout', 'nn.Dropout', (['(0.05)'], {}), '(0.05)\n', (3532, 3538), False, 'from torch import nn\n'), ((3557, 3585), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(102)'], {}), '(hidden_units, 102)\n', (3566, 3585), False, 'from torch import nn\n'), ((3607, 3627), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (3620, 3627), False, 'from torch import nn\n'), ((5951, 5966), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5964, 5966), False, 'import torch\n')]
|
#!/usr/bin/env python3
import os
from datetime import datetime
import re
from dataclasses import dataclass
import pathlib
import numpy as np
import scipy.stats as stats
from analysis.parser.common import parse_contiki
@dataclass(frozen=True)
class LengthStats:
length: int
seconds: float
def us_to_s(us: int) -> float:
return us / 1000000.0
class ProfileAnalyser:
#RE_SHA256_START = re.compile(r'Starting sha256\(([0-9]+)\)\.\.\.')
RE_SHA256_END = re.compile(r'sha256\(([0-9]+)\), ([0-9]+) us')
RE_ECDH = re.compile(r'ecdh2\(\), ([0-9]+) us')
RE_SIGN = re.compile(r'ecc_dsa_sign\(\), ([0-9]+) us')
RE_VERIFY = re.compile(r'ecc_dsa_verify\(\), ([0-9]+) us')
RE_ENCRYPT = re.compile(r'encrypt\(([0-9]+)\), ([0-9]+) us')
RE_DECRYPT = re.compile(r'decrypt\(([0-9]+)\), ([0-9]+) us')
def __init__(self, hostname: str):
self.hostname = hostname
self.stats_sha256 = []
self.stats_ecdh = []
self.stats_sign = []
self.stats_verify = []
self.stats_encrypt = []
self.stats_decrypt = []
self.res = {
self.RE_SHA256_END: self._process_sha256_end,
self.RE_ECDH: self._process_ecdh,
self.RE_SIGN: self._process_sign,
self.RE_VERIFY: self._process_verify,
self.RE_ENCRYPT: self._process_encrypt,
self.RE_DECRYPT: self._process_decrypt,
}
def analyse(self, f):
for (time, log_level, module, line) in parse_contiki(f):
if module in ("crypto-sup", "profile"):
for (r, f) in self.res.items():
m = r.match(line)
if m is not None and f is not None:
f(time, log_level, module, line, m)
break
def summary(self):
if self.stats_ecdh:
print("ECDH", stats.describe(self.stats_ecdh))
if self.stats_sign:
print("Sign", stats.describe(self.stats_sign))
if self.stats_verify:
print("Verify", stats.describe(self.stats_verify))
if self.stats_sha256:
self.stats_sha256_u = [x.seconds for x in self.stats_sha256]
self.stats_sha256_n = [x.seconds / x.length for x in self.stats_sha256]
print("SHA256 (u)", stats.describe(self.stats_sha256_u))
print("SHA256 (n)", stats.describe(self.stats_sha256_n))
if self.stats_encrypt:
self.stats_encrypt_u = [x.seconds for x in self.stats_encrypt]
self.stats_encrypt_n = [x.seconds / x.length for x in self.stats_encrypt]
print("encrypt (u)", stats.describe(self.stats_encrypt_u))
print("encrypt (n)", stats.describe(self.stats_encrypt_n))
if self.stats_decrypt:
self.stats_decrypt_u = [x.seconds for x in self.stats_decrypt]
self.stats_decrypt_n = [x.seconds / x.length for x in self.stats_decrypt]
print("decrypt (u)", stats.describe(self.stats_decrypt_u))
print("decrypt (n)", stats.describe(self.stats_decrypt_n))
def _process_sha256_end(self, time: datetime, log_level: str, module: str, line: str, m: str):
m_len = int(m.group(1))
m_s = us_to_s(int(m.group(2)))
self.stats_sha256.append(LengthStats(m_len, m_s))
def _process_ecdh(self, time: datetime, log_level: str, module: str, line: str, m: str):
m_s = us_to_s(int(m.group(1)))
self.stats_ecdh.append(m_s)
def _process_sign(self, time: datetime, log_level: str, module: str, line: str, m: str):
m_s = us_to_s(int(m.group(1)))
self.stats_sign.append(m_s)
def _process_verify(self, time: datetime, log_level: str, module: str, line: str, m: str):
m_s = us_to_s(int(m.group(1)))
self.stats_verify.append(m_s)
def _process_encrypt(self, time: datetime, log_level: str, module: str, line: str, m: str):
m_len = int(m.group(1))
m_s = us_to_s(int(m.group(2)))
self.stats_encrypt.append(LengthStats(m_len, m_s))
def _process_decrypt(self, time: datetime, log_level: str, module: str, line: str, m: str):
m_len = int(m.group(1))
m_s = us_to_s(int(m.group(2)))
self.stats_decrypt.append(LengthStats(m_len, m_s))
def print_mean_ci(name: str, x: np.array, confidence: float=0.95):
mean, sem, n = np.mean(x), stats.sem(x), len(x)
ci = mean - stats.t.interval(0.95, len(x)-1, loc=np.mean(x), scale=stats.sem(x))[0]
vs = {
"seconds": 1,
"ms": 1e3,
"us": 1e6,
"ns": 1e9,
}
for (n, f) in vs.items():
print(name, mean * f, ci * f, n)
def global_summary(results: Dict[str, ProfileAnalyser]):
names = [
"stats_sha256_u",
"stats_sha256_n",
"stats_ecdh",
"stats_sign",
"stats_verify",
"stats_encrypt_u",
"stats_encrypt_n",
"stats_decrypt_u",
"stats_decrypt_n",
]
print("Global:")
for name in names:
print(name)
combined = np.concatenate([getattr(x, name, []) for x in results.values()])
if combined.size != 0:
print(name, stats.describe(combined))
print_mean_ci(name, combined)
def main(log_dir: pathlib.Path):
print(f"Looking for results in {log_dir}")
gs = log_dir.glob("profile.*.pyterm.log")
results = {}
for g in gs:
print(f"Processing {g}...")
bg = os.path.basename(g)
kind, hostname, cr, log = bg.split(".", 3)
if kind != "profile":
print(f"Can only have challenge_response results from a profile node instead of {kind}")
continue
a = ProfileAnalyser(hostname)
with open(g, 'r') as f:
a.analyse(f)
a.summary()
results[hostname] = a
global_summary(results)
return results
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Parse Profile pyterm')
parser.add_argument('--log-dir', type=pathlib.Path, default="results", help='The directory which contains the log output')
args = parser.parse_args()
main(args.log_dir)
|
[
"argparse.ArgumentParser",
"os.path.basename",
"analysis.parser.common.parse_contiki",
"numpy.mean",
"scipy.stats.sem",
"scipy.stats.describe",
"dataclasses.dataclass",
"re.compile"
] |
[((223, 245), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (232, 245), False, 'from dataclasses import dataclass\n'), ((474, 521), 're.compile', 're.compile', (['"""sha256\\\\(([0-9]+)\\\\), ([0-9]+) us"""'], {}), "('sha256\\\\(([0-9]+)\\\\), ([0-9]+) us')\n", (484, 521), False, 'import re\n'), ((536, 574), 're.compile', 're.compile', (['"""ecdh2\\\\(\\\\), ([0-9]+) us"""'], {}), "('ecdh2\\\\(\\\\), ([0-9]+) us')\n", (546, 574), False, 'import re\n'), ((589, 634), 're.compile', 're.compile', (['"""ecc_dsa_sign\\\\(\\\\), ([0-9]+) us"""'], {}), "('ecc_dsa_sign\\\\(\\\\), ([0-9]+) us')\n", (599, 634), False, 'import re\n'), ((650, 697), 're.compile', 're.compile', (['"""ecc_dsa_verify\\\\(\\\\), ([0-9]+) us"""'], {}), "('ecc_dsa_verify\\\\(\\\\), ([0-9]+) us')\n", (660, 697), False, 'import re\n'), ((719, 767), 're.compile', 're.compile', (['"""encrypt\\\\(([0-9]+)\\\\), ([0-9]+) us"""'], {}), "('encrypt\\\\(([0-9]+)\\\\), ([0-9]+) us')\n", (729, 767), False, 'import re\n'), ((784, 832), 're.compile', 're.compile', (['"""decrypt\\\\(([0-9]+)\\\\), ([0-9]+) us"""'], {}), "('decrypt\\\\(([0-9]+)\\\\), ([0-9]+) us')\n", (794, 832), False, 'import re\n'), ((5988, 6047), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse Profile pyterm"""'}), "(description='Parse Profile pyterm')\n", (6011, 6047), False, 'import argparse\n'), ((1500, 1516), 'analysis.parser.common.parse_contiki', 'parse_contiki', (['f'], {}), '(f)\n', (1513, 1516), False, 'from analysis.parser.common import parse_contiki\n'), ((4418, 4428), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (4425, 4428), True, 'import numpy as np\n'), ((4430, 4442), 'scipy.stats.sem', 'stats.sem', (['x'], {}), '(x)\n', (4439, 4442), True, 'import scipy.stats as stats\n'), ((5503, 5522), 'os.path.basename', 'os.path.basename', (['g'], {}), '(g)\n', (5519, 5522), False, 'import os\n'), ((1881, 1912), 'scipy.stats.describe', 'stats.describe', (['self.stats_ecdh'], {}), '(self.stats_ecdh)\n', (1895, 1912), True, 'import scipy.stats as stats\n'), ((1969, 2000), 'scipy.stats.describe', 'stats.describe', (['self.stats_sign'], {}), '(self.stats_sign)\n', (1983, 2000), True, 'import scipy.stats as stats\n'), ((2061, 2094), 'scipy.stats.describe', 'stats.describe', (['self.stats_verify'], {}), '(self.stats_verify)\n', (2075, 2094), True, 'import scipy.stats as stats\n'), ((2329, 2364), 'scipy.stats.describe', 'stats.describe', (['self.stats_sha256_u'], {}), '(self.stats_sha256_u)\n', (2343, 2364), True, 'import scipy.stats as stats\n'), ((2398, 2433), 'scipy.stats.describe', 'stats.describe', (['self.stats_sha256_n'], {}), '(self.stats_sha256_n)\n', (2412, 2433), True, 'import scipy.stats as stats\n'), ((2674, 2710), 'scipy.stats.describe', 'stats.describe', (['self.stats_encrypt_u'], {}), '(self.stats_encrypt_u)\n', (2688, 2710), True, 'import scipy.stats as stats\n'), ((2745, 2781), 'scipy.stats.describe', 'stats.describe', (['self.stats_encrypt_n'], {}), '(self.stats_encrypt_n)\n', (2759, 2781), True, 'import scipy.stats as stats\n'), ((3022, 3058), 'scipy.stats.describe', 'stats.describe', (['self.stats_decrypt_u'], {}), '(self.stats_decrypt_u)\n', (3036, 3058), True, 'import scipy.stats as stats\n'), ((3093, 3129), 'scipy.stats.describe', 'stats.describe', (['self.stats_decrypt_n'], {}), '(self.stats_decrypt_n)\n', (3107, 3129), True, 'import scipy.stats as stats\n'), ((5221, 5245), 'scipy.stats.describe', 'stats.describe', (['combined'], {}), '(combined)\n', (5235, 5245), True, 'import scipy.stats as stats\n'), ((4504, 4514), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (4511, 4514), True, 'import numpy as np\n'), ((4522, 4534), 'scipy.stats.sem', 'stats.sem', (['x'], {}), '(x)\n', (4531, 4534), True, 'import scipy.stats as stats\n')]
|
# -*- coding: utf-8 -*-
#
# This module is part of the GeoTag-X PyBossa plugin.
# It contains implementations for custom filters.
#
# Authors:
# - <NAME> (<EMAIL>)
# - <NAME> (<EMAIL>)
#
# Copyright (c) 2016 UNITAR/UNOSAT
#
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from flask import Blueprint, current_app, jsonify
blueprint = Blueprint("geotagx-geojson-exporter", __name__)
@blueprint.route("/project/category/<string:category_short_name>/export-geojson")
def export_category_results(category_short_name):
"""Renders the specified category's results in GeoJSON format.
Args:
category_short_name (str): A category's unique short name.
Returns:
str: A GeoJSON formatted-string containing the specified category's results.
"""
return _export_category_results_as_geoJSON(category_short_name)
def _export_category_results_as_geoJSON(category_name):
from pybossa.cache import projects as cached_projects
from pybossa.exporter.json_export import JsonExporter
import json
import pandas as pd
import numpy as np
geotagx_json_exporter = JsonExporter()
max_number_of_exportable_projects = 15
projects_in_category = cached_projects.get(category_name, page=1, per_page=max_number_of_exportable_projects)
task_runs = []
task_runs_info = []
project_name_id_mapping = {}
project_id_name_mapping = {}
project_question_type_mapping = {}
project_question_question_text_mapping = {}
for project in projects_in_category:
short_name = project['short_name']
project_id_name_mapping[project['id']] = project['short_name']
project_name_id_mapping[project['short_name']] = project['id']
# Check if it a supported geotagx project whose schema we know
if 'GEOTAGX_SUPPORTED_PROJECTS_SCHEMA' in current_app.config.keys() \
and short_name in current_app.config['GEOTAGX_SUPPORTED_PROJECTS_SCHEMA'].keys():
##Read the project schema and store the respective questions and their types
for _question in current_app.config['GEOTAGX_SUPPORTED_PROJECTS_SCHEMA'][short_name]['questions']:
project_question_type_mapping[unicode(short_name+"::"+_question['answer']['saved_as'])] = _question['type']
project_question_question_text_mapping[unicode(short_name+"::"+_question['answer']['saved_as']+"::question_text")] = _question['title']
#Only export results of known GEOTAGX projects that are created with `geotagx-project-template`
task_runs_generator = geotagx_json_exporter.gen_json("task_run", project['id'])
_task_runs = ""
for task_run_c in task_runs_generator:
_task_runs += task_run_c
task_runs = task_runs + json.loads(_task_runs)
def extract_geotagx_info(json):
"""Returns a list of only info objects of the task_run"""
exploded_json = []
for item in json:
item['info']['project_id'] = item['project_id']
exploded_json.append(item['info'])
return exploded_json
def _summarize_geolocations(geolocation_responses):
"""
TODO :: Add different geo-summarization methods (ConvexHull, Centroid, etc)
"""
responses = []
for response in geolocation_responses:
if type(response) == type([]):
responses.append(response)
return responses
"""
Changes projection to WGS84 projection from WebMercator projection
so that most geojson renderers support it out of the box
Inspired by : http://www.gal-systems.com/2011/07/convert-coordinates-between-web.html
"""
def _project_coordinate_from_webmercator_toWGS84(coordinates):
import math
mercatorX_lon = coordinates[0]
mercatorY_lat = coordinates[1]
if math.fabs(mercatorX_lon) < 180 and math.fabs(mercatorY_lat) < 90:
return False, False
if ((math.fabs(mercatorX_lon) > 20037508.3427892) or (math.fabs(mercatorY_lat) > 20037508.3427892)):
return False, False
x = mercatorX_lon
y = mercatorY_lat
num3 = x / 6378137.0
num4 = num3 * 57.295779513082323
num5 = math.floor(float((num4 + 180.0) / 360.0))
num6 = num4 - (num5 * 360.0)
num7 = 1.5707963267948966 - (2.0 * math.atan(math.exp((-1.0 * y) / 6378137.0)));
mercatorX_lon = num6
mercatorY_lat = num7 * 57.295779513082323
return mercatorX_lon, mercatorY_lat
"""
Changes the projection of the multi_polygon object to WGS84 from WebMercator
"""
def _project_geosummary_from_webmercator_to_WGS84(multi_polygon):
_multi_polygon = []
for polygon in multi_polygon:
_polygon = []
for coordinates in polygon:
try:
_x, _y = _project_coordinate_from_webmercator_toWGS84(coordinates)
if _x and _y:
_polygon.append([_x, _y])
except:
pass # Pass Silentily if there is some error in the input
_multi_polygon.append(_polygon)
return _multi_polygon
def _build_geo_json(geolocation_responses):
geoJSON = {}
geoJSON['type'] = "FeatureCollection"
geoJSON['features'] = []
for response in geolocation_responses:
if response['_geotagx_geolocation_key']:
geo_summary = response[response['_geotagx_geolocation_key']]
_feature = {}
_feature['type'] = "Feature"
_feature['geometry'] = {}
_feature['geometry']['type'] = "MultiPolygon"
_feature['geometry']['coordinates'] = \
[_project_geosummary_from_webmercator_to_WGS84(geo_summary['geo_summary'])]
del response[response['_geotagx_geolocation_key']]
del response['_geotagx_geolocation_key']
_feature['properties'] = response
#Neglect responses with no coordinate labels
if _feature['geometry']['coordinates'] != [[]]:
geoJSON['features'].append(_feature)
return geoJSON
task_runs_info = extract_geotagx_info(task_runs)
task_runs_info = pd.read_json(json.dumps(task_runs_info))
summary_dict = {}
for img_url in task_runs_info['img'].unique():
per_url_data = task_runs_info[task_runs_info['img'] == img_url]
for project_id in np.unique(per_url_data['project_id'].values):
per_summary_dict = {}
per_summary_dict['_geotagx_geolocation_key'] = False
if img_url in summary_dict.keys():
per_summary_dict = summary_dict[img_url]
per_summary_dict['GEOTAGX_IMAGE_URL'] = img_url
per_url_data_project_slice = per_url_data[per_url_data['project_id'] == project_id]
for key in per_url_data_project_slice.keys():
namespaced_key = project_id_name_mapping[project_id]+"::"+key
if key not in ['img', 'isMigrated', 'son_app_id', 'task_id', 'project_id']:
if namespaced_key in project_question_type_mapping.keys():
if project_question_type_mapping[namespaced_key] == u"geotagging":
per_summary_dict['_geotagx_geolocation_key'] = namespaced_key
per_summary_dict[namespaced_key] = {'geo_summary' : _summarize_geolocations(per_url_data_project_slice[key].values)}
else:
per_summary_dict[namespaced_key] = {'answer_summary':dict(per_url_data_project_slice[key].value_counts())}
per_summary_dict[namespaced_key]['question_text'] = project_question_question_text_mapping[unicode(namespaced_key+"::question_text")]
elif key == u"img":
per_summary_dict[project_id_name_mapping[project_id]+"::GEOTAGX_TOTAL"] = len(per_url_data_project_slice)
summary_dict[img_url] = per_summary_dict
geo_json = _build_geo_json(summary_dict.values())
return jsonify(geo_json)
|
[
"math.exp",
"flask.Blueprint",
"json.loads",
"math.fabs",
"pybossa.exporter.json_export.JsonExporter",
"json.dumps",
"flask.jsonify",
"flask.current_app.config.keys",
"pybossa.cache.projects.get",
"numpy.unique"
] |
[((1366, 1413), 'flask.Blueprint', 'Blueprint', (['"""geotagx-geojson-exporter"""', '__name__'], {}), "('geotagx-geojson-exporter', __name__)\n", (1375, 1413), False, 'from flask import Blueprint, current_app, jsonify\n'), ((2134, 2148), 'pybossa.exporter.json_export.JsonExporter', 'JsonExporter', ([], {}), '()\n', (2146, 2148), False, 'from pybossa.exporter.json_export import JsonExporter\n'), ((2220, 2311), 'pybossa.cache.projects.get', 'cached_projects.get', (['category_name'], {'page': '(1)', 'per_page': 'max_number_of_exportable_projects'}), '(category_name, page=1, per_page=\n max_number_of_exportable_projects)\n', (2239, 2311), True, 'from pybossa.cache import projects as cached_projects\n'), ((9227, 9244), 'flask.jsonify', 'jsonify', (['geo_json'], {}), '(geo_json)\n', (9234, 9244), False, 'from flask import Blueprint, current_app, jsonify\n'), ((7378, 7404), 'json.dumps', 'json.dumps', (['task_runs_info'], {}), '(task_runs_info)\n', (7388, 7404), False, 'import json\n'), ((7579, 7623), 'numpy.unique', 'np.unique', (["per_url_data['project_id'].values"], {}), "(per_url_data['project_id'].values)\n", (7588, 7623), True, 'import numpy as np\n'), ((2854, 2879), 'flask.current_app.config.keys', 'current_app.config.keys', ([], {}), '()\n', (2877, 2879), False, 'from flask import Blueprint, current_app, jsonify\n'), ((3811, 3833), 'json.loads', 'json.loads', (['_task_runs'], {}), '(_task_runs)\n', (3821, 3833), False, 'import json\n'), ((4908, 4932), 'math.fabs', 'math.fabs', (['mercatorX_lon'], {}), '(mercatorX_lon)\n', (4917, 4932), False, 'import math\n'), ((4943, 4967), 'math.fabs', 'math.fabs', (['mercatorY_lat'], {}), '(mercatorY_lat)\n', (4952, 4967), False, 'import math\n'), ((5020, 5044), 'math.fabs', 'math.fabs', (['mercatorX_lon'], {}), '(mercatorX_lon)\n', (5029, 5044), False, 'import math\n'), ((5069, 5093), 'math.fabs', 'math.fabs', (['mercatorY_lat'], {}), '(mercatorY_lat)\n', (5078, 5093), False, 'import math\n'), ((5418, 5448), 'math.exp', 'math.exp', (['(-1.0 * y / 6378137.0)'], {}), '(-1.0 * y / 6378137.0)\n', (5426, 5448), False, 'import math\n')]
|
from .base_array import *
from .state_variables_array import *
import cantera as ct
import numpy as np
import scipy.interpolate as interp
class PressureArray(StateVariablesArray):
'''Variable array for pressure'''
def __init__(self, parent, var=None):
super().__init__(parent, var)
self.name = 'Pressure (g/cm/s2)'
def initialize(self):
'''Initialize variable array for simulation'''
self.variable_array = 1013250
def interpolate(self):
'''
Interpolate and assign variables from other value arrays
Converted to cgs-unit [g/cm/s2] from [atm]
'''
df_ck = self.parent_solution.df_ck
dis = df_ck['Distance (cm)'].to_numpy()
phi = df_ck['Pressure (atm)'].to_numpy()
phi_cgs = phi*1013250
f = interp.interp1d(dis, phi_cgs, kind="cubic")
self.variable_array = f(self.y)
def calc_coef(self):
'''Calculate coefficients for TDMA'''
# Read variables from parent soluction
R = self.parent_solution.R.variable_array
R_s = self.parent_solution.R.variable_array_s
R_old = self.parent_solution.R_old
G = self.parent_solution.G.variable_array
'''V should be changed to V_star'''
V = self.parent_solution.V_star
# Calculation of d
coef_a_V = self.parent_solution.V.coef_a
d = 1/np.multiply(R_s, coef_a_V)*self.dy
for p in range(self.num_grid):
# Upper boundary condition
if p == 0:
self.coef_a[p] = 1.0
self.coef_b[p] = 0.0
self.coef_c[p] = 0.0
self.coef_d[p] = 1013250
continue
# Lower boundary condition
if p == self.num_grid-1:
self.coef_a[p] = 1.0
self.coef_b[p] = 0.0
self.coef_c[p] = 0.0
self.coef_d[p] = 1013250
continue
# Inner points of numerical grid
self.coef_a[p] = R_s[p]*d[p]/self.dy + R_s[p-1]*d[p-1]/self.dy
self.coef_b[p] = R_s[p]*d[p]/self.dy
self.coef_c[p] = R_s[p-1]*d[p-1]/self.dy
self.coef_d[p] = - (R[p] - R_old[p])/self.dy - 2*R[p]*G[p] \
- (R_s[p]*V[p] - R_s[p-1]*V[p-1])/self.dy
def calc_P_dash(self):
'''Calculate P' for step 3 in Simple loop'''
self.calc_coef()
P_dash = self.get_phi_TDMA()
return P_dash
|
[
"scipy.interpolate.interp1d",
"numpy.multiply"
] |
[((814, 857), 'scipy.interpolate.interp1d', 'interp.interp1d', (['dis', 'phi_cgs'], {'kind': '"""cubic"""'}), "(dis, phi_cgs, kind='cubic')\n", (829, 857), True, 'import scipy.interpolate as interp\n'), ((1396, 1422), 'numpy.multiply', 'np.multiply', (['R_s', 'coef_a_V'], {}), '(R_s, coef_a_V)\n', (1407, 1422), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from sklearn.metrics import roc_auc_score, precision_recall_curve, jaccard_score, f1_score
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = np.multiply(val, weight)
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum = np.add(self.sum, np.multiply(val, weight))
self.count = self.count + weight
self.avg = self.sum / self.count
@property
def value(self):
return np.round(self.val, 4)
@property
def average(self):
return np.round(self.avg, 4)
def eval_metrics(target, predict):
tp = (predict * target).sum()
tn = ((1 - predict) * (1 - target)).sum()
fp = ((1 - target) * predict).sum()
fn = ((1 - predict) * target).sum()
return tn, fp, fn, tp
def get_metrics(tn, fp, fn, tp):
acc = (tp + tn) / (tp + fp + fn + tn)
pre = tp / (tp + fp)
sen = tp / (tp + fn)
spe = tn / (tn + fp)
iou = tp / (tp + fp + fn)
f1 = 2 * pre * sen / (pre + sen)
return {
"Acc": np.round(acc, 4),
"pre": np.round(pre, 4),
"Sem": np.round(sen, 4),
"Spe": np.round(spe, 4),
"F1": np.round(f1, 4),
"IOU": np.round(iou, 4)
}
def get_metrics_full(tn, fp, fn, tp, target, output, output_b):
auc = roc_auc_score(target, output)
# precision, recall, thresholds = precision_recall_curve(target, output)
# precision = np.fliplr([precision])[0] # so the array is increasing (you won't get negative AUC)
# recall = np.fliplr([recall])[0] # so the array is increasing (you won't get negative AUC)
# pra = np.trapz(precision, recall)
# jc = jaccard_score(target, output_b)
acc = (tp + tn) / (tp + fp + fn + tn)
pre = tp / (tp + fp)
sen = tp / (tp + fn)
spe = tn / (tn + fp)
iou = tp / (tp + fp + fn)
f1 = 2 * pre * sen / (pre + sen)
return {
"AUC": np.round(auc, 4),
"F1": np.round(f1, 4),
"Acc": np.round(acc, 4),
"pre": np.round(pre, 4),
"Sen": np.round(sen, 4),
"Spe": np.round(spe, 4),
"IOU": np.round(iou, 4),
# "PRA": np.round(pra, 4),
# "jc ": np.round(jc, 4),
}
|
[
"numpy.round",
"numpy.multiply",
"sklearn.metrics.roc_auc_score"
] |
[((1800, 1829), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['target', 'output'], {}), '(target, output)\n', (1813, 1829), False, 'from sklearn.metrics import roc_auc_score, precision_recall_curve, jaccard_score, f1_score\n'), ((474, 498), 'numpy.multiply', 'np.multiply', (['val', 'weight'], {}), '(val, weight)\n', (485, 498), True, 'import numpy as np\n'), ((970, 991), 'numpy.round', 'np.round', (['self.val', '(4)'], {}), '(self.val, 4)\n', (978, 991), True, 'import numpy as np\n'), ((1045, 1066), 'numpy.round', 'np.round', (['self.avg', '(4)'], {}), '(self.avg, 4)\n', (1053, 1066), True, 'import numpy as np\n'), ((1538, 1554), 'numpy.round', 'np.round', (['acc', '(4)'], {}), '(acc, 4)\n', (1546, 1554), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.round', 'np.round', (['pre', '(4)'], {}), '(pre, 4)\n', (1579, 1587), True, 'import numpy as np\n'), ((1604, 1620), 'numpy.round', 'np.round', (['sen', '(4)'], {}), '(sen, 4)\n', (1612, 1620), True, 'import numpy as np\n'), ((1637, 1653), 'numpy.round', 'np.round', (['spe', '(4)'], {}), '(spe, 4)\n', (1645, 1653), True, 'import numpy as np\n'), ((1669, 1684), 'numpy.round', 'np.round', (['f1', '(4)'], {}), '(f1, 4)\n', (1677, 1684), True, 'import numpy as np\n'), ((1701, 1717), 'numpy.round', 'np.round', (['iou', '(4)'], {}), '(iou, 4)\n', (1709, 1717), True, 'import numpy as np\n'), ((2403, 2419), 'numpy.round', 'np.round', (['auc', '(4)'], {}), '(auc, 4)\n', (2411, 2419), True, 'import numpy as np\n'), ((2435, 2450), 'numpy.round', 'np.round', (['f1', '(4)'], {}), '(f1, 4)\n', (2443, 2450), True, 'import numpy as np\n'), ((2467, 2483), 'numpy.round', 'np.round', (['acc', '(4)'], {}), '(acc, 4)\n', (2475, 2483), True, 'import numpy as np\n'), ((2500, 2516), 'numpy.round', 'np.round', (['pre', '(4)'], {}), '(pre, 4)\n', (2508, 2516), True, 'import numpy as np\n'), ((2533, 2549), 'numpy.round', 'np.round', (['sen', '(4)'], {}), '(sen, 4)\n', (2541, 2549), True, 'import numpy as np\n'), ((2566, 2582), 'numpy.round', 'np.round', (['spe', '(4)'], {}), '(spe, 4)\n', (2574, 2582), True, 'import numpy as np\n'), ((2599, 2615), 'numpy.round', 'np.round', (['iou', '(4)'], {}), '(iou, 4)\n', (2607, 2615), True, 'import numpy as np\n'), ((811, 835), 'numpy.multiply', 'np.multiply', (['val', 'weight'], {}), '(val, weight)\n', (822, 835), True, 'import numpy as np\n')]
|
from pywebio.input import *
from pywebio.output import *
from pywebio import start_server
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import io
def data_gen(num=100):
"""
Generates random samples for plotting
"""
a = np.random.normal(size=num)
return a
def plot_raw(a):
"""
Plots line graph
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Line plot of {len(a)} samples",fontsize=16)
plt.plot(a)
return plt.gcf()
def plot_hist(a):
"""
Plots histogram
"""
plt.close()
plt.figure(figsize=(12,5))
plt.title(f"Histogram of {len(a)} samples",fontsize=16)
plt.hist(a,color='orange',edgecolor='k')
return plt.gcf()
def fig2img(fig):
"""
Convert a Matplotlib figure to a PIL Image and return it
"""
buf = io.BytesIO()
fig.savefig(buf)
buf.seek(0)
img = Image.open(buf)
return img
def Generate(num=100):
"""
Generates plot, called from the `Generate` button
"""
remove(scope='raw')
with use_scope(name='raw',clear=True,) as img:
a = data_gen(num)
f1 = plot_raw(a)
im1 = fig2img(f1)
put_image(im1)
f2 = plot_hist(a)
im2 = fig2img(f2)
put_image(im2)
def app():
"""
Main app
"""
put_markdown("""
# Matplotlib plot demo
## [Dr. <NAME>](https://www.linkedin.com/in/tirthajyoti-sarkar-2127aa7/)
We show two plots from [random gaussian samples](https://en.wikipedia.org/wiki/Normal_distribution). You choose the number of data points to generate.
- A line plot
- A histogram
""", strip_indent=4)
num_samples = input("Number of samples", type=NUMBER)
Generate(num_samples)
put_markdown("""## Code for this app is here: [Code repo](https://github.com/tirthajyoti/PyWebIO/tree/main/apps)""")
if __name__ == '__main__':
start_server(app,port=9999,debug=True)
|
[
"io.BytesIO",
"pywebio.start_server",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.close",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"numpy.random.normal",
"matplotlib.pyplot.gcf"
] |
[((263, 289), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'num'}), '(size=num)\n', (279, 289), True, 'import numpy as np\n'), ((362, 373), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (371, 373), True, 'import matplotlib.pyplot as plt\n'), ((378, 405), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (388, 405), True, 'import matplotlib.pyplot as plt\n'), ((469, 480), 'matplotlib.pyplot.plot', 'plt.plot', (['a'], {}), '(a)\n', (477, 480), True, 'import matplotlib.pyplot as plt\n'), ((492, 501), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (499, 501), True, 'import matplotlib.pyplot as plt\n'), ((561, 572), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (570, 572), True, 'import matplotlib.pyplot as plt\n'), ((577, 604), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (587, 604), True, 'import matplotlib.pyplot as plt\n'), ((668, 710), 'matplotlib.pyplot.hist', 'plt.hist', (['a'], {'color': '"""orange"""', 'edgecolor': '"""k"""'}), "(a, color='orange', edgecolor='k')\n", (676, 710), True, 'import matplotlib.pyplot as plt\n'), ((720, 729), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((836, 848), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (846, 848), False, 'import io\n'), ((896, 911), 'PIL.Image.open', 'Image.open', (['buf'], {}), '(buf)\n', (906, 911), False, 'from PIL import Image\n'), ((1905, 1945), 'pywebio.start_server', 'start_server', (['app'], {'port': '(9999)', 'debug': '(True)'}), '(app, port=9999, debug=True)\n', (1917, 1945), False, 'from pywebio import start_server\n')]
|
#!/usr/bin/env python
import scipy.stats
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import jinja2
from jinja2 import Template
import os
import subprocess
import matplotlib
import json
pgf_with_latex = {"pgf.texsystem": 'pdflatex'}
matplotlib.rcParams.update(pgf_with_latex)
def create_accuracy_plot_and_return_mse(prediction_df, solution_array):
prediction_array = prediction_df['Prediction, MSTB'].to_numpy()
plt.figure(figsize = (6,4))
plt.plot(solution_array, prediction_array,'o',label = 'Estimates')
plt.plot([500,2000],[500,2000],'--r',label = '1:1 line')
plt.plot([solution_array[0], solution_array[0]],
[prediction_array[0], solution_array[0]],
'--',color = 'gray',label = 'misfit')
for i in range(1,10):
plt.plot([solution_array[i], solution_array[i]],
[prediction_array[i], solution_array[i]],
'--',color = 'gray')
plt.xlabel('True, MSTB');
plt.ylabel('Prediction, MSTB');
plt.grid('on');
plt.legend();
plt.axis([500,2000,500,2000])
plt.savefig('accuracy.pgf')
return np.round(mean_squared_error(solution_array,prediction_array),3)
def create_realizations_plots(prediction_df, solution_array):
prediction_realizations = prediction_df.iloc[:,2:].to_numpy()
kde = [scipy.stats.gaussian_kde(prediction_realizations[i],
bw_method = None) for i in range(10)]
t_range = [np.linspace(prediction_realizations[i].min() * 0.8,
prediction_realizations[i].max() * 1.2, 200) for i in range(10)]
plt.figure(figsize =(6,8))
for i in range(10):
ax = plt.subplot(5,2,i+1)
if i == 0:
pdf, = ax.plot(t_range[i],kde[i](t_range[i]), lw=2, label=f'PDF of reals')
real, = ax.plot(solution_array[i], 0,'ro',markersize = 10, label ='True')
plt.xlabel('Prediction, MSTB');
plt.title(f'Preproduction No. {74+i}')
else:
pdf, = ax.plot(t_range[i],kde[i](t_range[i]), lw=2)
real, = ax.plot(solution_array[i], 0, 'ro',markersize = 10)
plt.title(f'Preproduction No. {74+i}')
if i == 1:
plt.legend([pdf, real], ['PDF of realz.', 'True'], bbox_to_anchor=(1.05, 1.0), loc='upper left')
if i % 2 == 0:
plt.ylabel('Probability')
plt.tight_layout()
plt.savefig('realizations.pgf')
return
def compute_goodness_array(prediction_df, solution_array):
prediction_realizations = prediction_df.iloc[:,2:].to_numpy()
goodness_score = []
list_percentile_lower = [50-5*i for i in range(0,11)] # Define upper/lower boundary of "within-percentile" ranges
list_percentile_upper = [50+5*i for i in range(0,11)] # E.g., 10% range - from 45% to 55% percentile
for i in range(11): # 0%, 10%, 20%, 30%, ... 100% percentiles ranges
num_within = 0 # Counts for wells within the range
for j in range(10): # 10 Predrill wells
min_ = np.percentile(prediction_realizations[j],list_percentile_lower[i])
max_ = np.percentile(prediction_realizations[j],list_percentile_upper[i])
print(solution_array[j])
if solution_array[j] > min_ and solution_array[j] < max_:
num_within += 1
goodness_score.append(num_within)
return goodness_score
def create_goodness_plot_and_return_goodness_score(prediction_df, solution_array):
goodness_score = compute_goodness_array(prediction_df, solution_array)
prediction_realizations = prediction_df.iloc[:,2:].to_numpy()
plt.figure(figsize = (6,4))
plt.plot(goodness_score,'--ko', label = 'Goodness')
plt.plot([0,10],[0,10], '-r', label = '1:1 line')
#plt.fill([i for i in range(11)],goodness_score,alpha = 0.2, label = 'misfit area')
plt.xticks([i for i in np.linspace(0,10,6)], [f'{np.int(i*10)}%' for i in np.linspace(0,10,6)])
plt.yticks([i for i in np.linspace(0,10,6)], [f'{np.int(i*10)}%' for i in np.linspace(0,10,6)])
plt.xlabel('Within percentile');
plt.ylabel('Percentage of wells within the range')
plt.legend();
plt.savefig('goodness.pgf')
## Total area of plot is 100 (square of 10 x 10)
# If goodness plot perfectly along with 1:1 line, that should be "1"
# If goodness plot all flat at 0% in y-axis, that should be "0"
# If goodness plot all flat at 100% in y-axis, that should be "0.5"
# Follow lines of code compute and normalize area above/below area to get goodness score (0~1)
goodness_score_upNdown = np.array(goodness_score) - np.arange(0,11)
a_interval_index = [1 if goodness_score[i+1] >= i+1 else 0 for i in range(10)]
goodness_score_ = 1
for i in range(10):
if a_interval_index[i] == 1:
goodness_score_ -= +1/2*goodness_score_upNdown[i+1]/45
else:
goodness_score_ -= -goodness_score_upNdown[i+1]/55
return np.abs(np.round(goodness_score_,3))
latex_jinja_env = jinja2.Environment(
block_start_string = '\BLOCK{',
block_end_string = '}',
variable_start_string = '\VAR{',
variable_end_string = '}',
comment_start_string = '\#{',
comment_end_string = '}',
line_statement_prefix = '%%',
line_comment_prefix = '%#',
trim_blocks = True,
autoescape = False,
loader = jinja2.FileSystemLoader(os.path.abspath('.'))
)
def create_team_report(team_name, mse,
goodness_score,
presentation_comments,
code_review_comments):
template = latex_jinja_env.get_template('report_template.tex')
#Render tex and write to file
rendered_tex = template.render(teamname=team_name.replace('_', ' '),
mse=mse,
goodness=goodness_score,
presentationComments=presentation_comments,
codereviewComments=code_review_comments)
tex_filename = f'{team_name}_report.tex'
with open(tex_filename, 'w') as f:
f.write(rendered_tex)
#run latex on the file
subprocess.run(['latexmk', '-pdf',
'-output-directory=reports', tex_filename])
return
def parse_team_name(team_name):
return team_name.replace(' ', '').replace('-', '').replace('_', '').lower()
def parse_question_scores(score):
return int(score.split(',')[0])
def get_presentation_dataframes(presentation_csv):
df = pd.read_csv(presentation_csv)
df = df.drop([0,1])
df = df.astype('str')
df['Q1'] = df['Q1'].apply(parse_team_name)
df['Q2'] = df['Q2'].apply(parse_question_scores)
df['Q3'] = df['Q3'].apply(parse_question_scores)
df['Q4'] = df['Q4'].apply(parse_question_scores)
df['Q5'] = df['Q5'].apply(parse_question_scores)
df['Q6'] = df['Q6'].apply(parse_question_scores)
df['Q7'] = df['Q7'].apply(parse_question_scores)
df['Q8'] = df['Q8'].apply(parse_question_scores)
scores_df = df.loc[:, 'Q1':'Q8'].groupby('Q1').mean().sum(axis=1)
comments_df = df[['Q1','Q9']].groupby('Q1')['Q9'].apply(list)
return scores_df, comments_df
def get_code_review_dataframes(code_review_csv):
df = pd.read_csv(code_review_csv)
df = df.drop([0,1])
df = df.astype('str')
df['Q1'] = df['Q1'].apply(parse_team_name)
df['Q2'] = df['Q2'].apply(parse_question_scores)
df['Q3'] = df['Q3'].apply(parse_question_scores)
df['Q4'] = df['Q4'].apply(parse_question_scores)
df['Q5'] = df['Q5'].apply(parse_question_scores)
df['Q6'] = df['Q6'].apply(parse_question_scores)
scores_df = df.loc[:, 'Q1':'Q6'].groupby('Q1').mean().sum(axis=1)
comments_df = df[['Q1','Q7']].groupby('Q1')['Q7'].apply(list)
return scores_df, comments_df
# %%
if __name__ == '__main__':
from github import Github
from io import StringIO
gh = Github(os.environ['PGEHACKATHON_SECRET_TOKEN'])
repos = gh.get_organization_repos('PGEHackathon')
blocked_list = ['PGEHackathon/data', 'PGEHackathon/workshop',
'PGEHackathon/scoring', 'PGEHackathon/PGEHackathon',
'PGEHackathon/resources', 'PGEHackathon/TheNomads',
'PGEHackathon/truth_data', 'PGEHackathon/fooled-by-randomness',
'PGEHackathon/404_Not_Found', 'PGEHackathon/ripROACH', 'PGEHackathon/PumpJack']
solution_array = np.load('True_for_predrill_3yr.npy') # Solution
team_names = []
team_mse = []
team_goodness_score = []
for repo in repos:
if repo not in blocked_list:
print(f"Generating Report For: {repo}")
result = gh.get_file_in_repo('solution.csv', repo)
if result is not None:
team_name = repo.split('/')[1]
team_names.append(team_name)
prediction_df = pd.read_csv(StringIO(result))
mse = create_accuracy_plot_and_return_mse(prediction_df,
solution_array)
team_mse.append(mse)
create_realizations_plots(prediction_df, solution_array)
goodness_score = \
create_goodness_plot_and_return_goodness_score(prediction_df,
solution_array)
team_goodness_score.append(goodness_score)
presentation_score_df, presentation_comments_df = \
get_presentation_dataframes('presentation.csv')
code_review_score_df, code_review_comments_df = \
get_code_review_dataframes('code_review.csv')
try:
presentation_comments = \
presentation_comments_df[parse_team_name(team_name)]
code_review_comments = \
code_review_comments_df[parse_team_name(team_name)]
except:
presentation_comments = ["None"]
code_review_comments = ["None"]
create_team_report(team_name, mse,
goodness_score,
presentation_comments,
code_review_comments)
df = pd.DataFrame(np.array([team_names,
team_mse,
team_goodness_score]).T, columns=['Team Names',
'MSE',
'Goodness Score'])
df['Short Names'] = df['Team Names'].apply(parse_team_name)
df.set_index(['Short Names'], inplace=True)
df['Pres. Score'] = presentation_score_df
df['Code Score'] = code_review_score_df
df['MSE Rank'] = df['MSE'].astype('float64').rank(method='min', ascending=True, na_option='top')
df['Goodness Rank'] = df['Goodness Score'].astype('float64').rank(method='min', ascending=False, na_option='top')
df['Pres. Rank'] = df['Pres. Score'].astype('float64').rank(method='min', ascending=False, na_option='top')
df['Code Rank'] = df['Code Score'].astype('float64').rank(method='min', ascending=False, na_option='top')
df['Overall Rank'] = (df['MSE Rank'].astype('float64') +
df['Goodness Rank'].astype('float64') +
df['Pres. Rank'].astype('float64') +
df['Code Rank'].astype('float64')
).rank(method='min')
df.sort_values('Overall Rank', inplace=True)
df.index.name = None
#df.reset_index(inplace=True)
with open('final_rankings_table.tex', 'w') as f:
df.to_latex(index=False, buf=f)
subprocess.run(['latexmk', '-pdf',
'-output-directory=reports', 'final_report.tex'])
|
[
"matplotlib.pyplot.title",
"numpy.load",
"pandas.read_csv",
"github.Github",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"os.path.abspath",
"matplotlib.rcParams.update",
"numpy.int",
"numpy.linspace",
"sklearn.metrics.mean_squared_error",
"io.StringIO",
"matplotlib.pyplot.legend",
"numpy.percentile",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"subprocess.run",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((308, 350), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (['pgf_with_latex'], {}), '(pgf_with_latex)\n', (334, 350), False, 'import matplotlib\n'), ((496, 522), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (506, 522), True, 'import matplotlib.pyplot as plt\n'), ((528, 594), 'matplotlib.pyplot.plot', 'plt.plot', (['solution_array', 'prediction_array', '"""o"""'], {'label': '"""Estimates"""'}), "(solution_array, prediction_array, 'o', label='Estimates')\n", (536, 594), True, 'import matplotlib.pyplot as plt\n'), ((599, 658), 'matplotlib.pyplot.plot', 'plt.plot', (['[500, 2000]', '[500, 2000]', '"""--r"""'], {'label': '"""1:1 line"""'}), "([500, 2000], [500, 2000], '--r', label='1:1 line')\n", (607, 658), True, 'import matplotlib.pyplot as plt\n'), ((660, 790), 'matplotlib.pyplot.plot', 'plt.plot', (['[solution_array[0], solution_array[0]]', '[prediction_array[0], solution_array[0]]', '"""--"""'], {'color': '"""gray"""', 'label': '"""misfit"""'}), "([solution_array[0], solution_array[0]], [prediction_array[0],\n solution_array[0]], '--', color='gray', label='misfit')\n", (668, 790), True, 'import matplotlib.pyplot as plt\n'), ((999, 1023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""True, MSTB"""'], {}), "('True, MSTB')\n", (1009, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1060), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Prediction, MSTB"""'], {}), "('Prediction, MSTB')\n", (1040, 1060), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1081), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (1075, 1081), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1100), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1098, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1107, 1139), 'matplotlib.pyplot.axis', 'plt.axis', (['[500, 2000, 500, 2000]'], {}), '([500, 2000, 500, 2000])\n', (1115, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1168), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""accuracy.pgf"""'], {}), "('accuracy.pgf')\n", (1152, 1168), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1706), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (1690, 1706), True, 'import matplotlib.pyplot as plt\n'), ((2447, 2465), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2463, 2465), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2501), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""realizations.pgf"""'], {}), "('realizations.pgf')\n", (2481, 2501), True, 'import matplotlib.pyplot as plt\n'), ((3689, 3715), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (3699, 3715), True, 'import matplotlib.pyplot as plt\n'), ((3721, 3771), 'matplotlib.pyplot.plot', 'plt.plot', (['goodness_score', '"""--ko"""'], {'label': '"""Goodness"""'}), "(goodness_score, '--ko', label='Goodness')\n", (3729, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3777, 3827), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 10]', '[0, 10]', '"""-r"""'], {'label': '"""1:1 line"""'}), "([0, 10], [0, 10], '-r', label='1:1 line')\n", (3785, 3827), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4150), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Within percentile"""'], {}), "('Within percentile')\n", (4129, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4157, 4207), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of wells within the range"""'], {}), "('Percentage of wells within the range')\n", (4167, 4207), True, 'import matplotlib.pyplot as plt\n'), ((4212, 4224), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4222, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4231, 4258), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""goodness.pgf"""'], {}), "('goodness.pgf')\n", (4242, 4258), True, 'import matplotlib.pyplot as plt\n'), ((6254, 6332), 'subprocess.run', 'subprocess.run', (["['latexmk', '-pdf', '-output-directory=reports', tex_filename]"], {}), "(['latexmk', '-pdf', '-output-directory=reports', tex_filename])\n", (6268, 6332), False, 'import subprocess\n'), ((6611, 6640), 'pandas.read_csv', 'pd.read_csv', (['presentation_csv'], {}), '(presentation_csv)\n', (6622, 6640), True, 'import pandas as pd\n'), ((7341, 7369), 'pandas.read_csv', 'pd.read_csv', (['code_review_csv'], {}), '(code_review_csv)\n', (7352, 7369), True, 'import pandas as pd\n'), ((8008, 8055), 'github.Github', 'Github', (["os.environ['PGEHACKATHON_SECRET_TOKEN']"], {}), "(os.environ['PGEHACKATHON_SECRET_TOKEN'])\n", (8014, 8055), False, 'from github import Github\n'), ((8533, 8569), 'numpy.load', 'np.load', (['"""True_for_predrill_3yr.npy"""'], {}), "('True_for_predrill_3yr.npy')\n", (8540, 8569), True, 'import numpy as np\n'), ((11904, 11992), 'subprocess.run', 'subprocess.run', (["['latexmk', '-pdf', '-output-directory=reports', 'final_report.tex']"], {}), "(['latexmk', '-pdf', '-output-directory=reports',\n 'final_report.tex'])\n", (11918, 11992), False, 'import subprocess\n'), ((849, 963), 'matplotlib.pyplot.plot', 'plt.plot', (['[solution_array[i], solution_array[i]]', '[prediction_array[i], solution_array[i]]', '"""--"""'], {'color': '"""gray"""'}), "([solution_array[i], solution_array[i]], [prediction_array[i],\n solution_array[i]], '--', color='gray')\n", (857, 963), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1241), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['solution_array', 'prediction_array'], {}), '(solution_array, prediction_array)\n', (1207, 1241), False, 'from sklearn.metrics import mean_squared_error\n'), ((1744, 1768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(2)', '(i + 1)'], {}), '(5, 2, i + 1)\n', (1755, 1768), True, 'import matplotlib.pyplot as plt\n'), ((4657, 4681), 'numpy.array', 'np.array', (['goodness_score'], {}), '(goodness_score)\n', (4665, 4681), True, 'import numpy as np\n'), ((4684, 4700), 'numpy.arange', 'np.arange', (['(0)', '(11)'], {}), '(0, 11)\n', (4693, 4700), True, 'import numpy as np\n'), ((5032, 5060), 'numpy.round', 'np.round', (['goodness_score_', '(3)'], {}), '(goodness_score_, 3)\n', (5040, 5060), True, 'import numpy as np\n'), ((1969, 1999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Prediction, MSTB"""'], {}), "('Prediction, MSTB')\n", (1979, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2053), 'matplotlib.pyplot.title', 'plt.title', (['f"""Preproduction No. {74 + i}"""'], {}), "(f'Preproduction No. {74 + i}')\n", (2022, 2053), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2254), 'matplotlib.pyplot.title', 'plt.title', (['f"""Preproduction No. {74 + i}"""'], {}), "(f'Preproduction No. {74 + i}')\n", (2223, 2254), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2385), 'matplotlib.pyplot.legend', 'plt.legend', (['[pdf, real]', "['PDF of realz.', 'True']"], {'bbox_to_anchor': '(1.05, 1.0)', 'loc': '"""upper left"""'}), "([pdf, real], ['PDF of realz.', 'True'], bbox_to_anchor=(1.05, \n 1.0), loc='upper left')\n", (2294, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2416, 2441), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (2426, 2441), True, 'import matplotlib.pyplot as plt\n'), ((3099, 3166), 'numpy.percentile', 'np.percentile', (['prediction_realizations[j]', 'list_percentile_lower[i]'], {}), '(prediction_realizations[j], list_percentile_lower[i])\n', (3112, 3166), True, 'import numpy as np\n'), ((3185, 3252), 'numpy.percentile', 'np.percentile', (['prediction_realizations[j]', 'list_percentile_upper[i]'], {}), '(prediction_realizations[j], list_percentile_upper[i])\n', (3198, 3252), True, 'import numpy as np\n'), ((5475, 5495), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (5490, 5495), False, 'import os\n'), ((10449, 10502), 'numpy.array', 'np.array', (['[team_names, team_mse, team_goodness_score]'], {}), '([team_names, team_mse, team_goodness_score])\n', (10457, 10502), True, 'import numpy as np\n'), ((3942, 3963), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(6)'], {}), '(0, 10, 6)\n', (3953, 3963), True, 'import numpy as np\n'), ((3993, 4014), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(6)'], {}), '(0, 10, 6)\n', (4004, 4014), True, 'import numpy as np\n'), ((4042, 4063), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(6)'], {}), '(0, 10, 6)\n', (4053, 4063), True, 'import numpy as np\n'), ((4093, 4114), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(6)'], {}), '(0, 10, 6)\n', (4104, 4114), True, 'import numpy as np\n'), ((3968, 3982), 'numpy.int', 'np.int', (['(i * 10)'], {}), '(i * 10)\n', (3974, 3982), True, 'import numpy as np\n'), ((4068, 4082), 'numpy.int', 'np.int', (['(i * 10)'], {}), '(i * 10)\n', (4074, 4082), True, 'import numpy as np\n'), ((9001, 9017), 'io.StringIO', 'StringIO', (['result'], {}), '(result)\n', (9009, 9017), False, 'from io import StringIO\n')]
|
# Plot Linked Subreddits
# Import Modules
import os
import pandas as pd
import numpy as np
import csv
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
linked_sr = pd.read_csv('Outputs/CS_FULL/LinkedSubreddits_CS_FULL.csv')
linked_sr = linked_sr.sort_values(by=['Times_Linked'],ascending=False)
# Remove link to self?
linked_sr = linked_sr[linked_sr.Subreddits != '/r/climateskeptics']
# Remove links to bots
linked_sr = linked_sr[linked_sr.Subreddits != '/r/autotldr']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/autowikibot']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/autowikibotsubjectglitched']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/sneakpeekbot']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/wikitextbot']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/remindmebot']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/xkcd_transcriber']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/youtubot']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/redditcom']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/tweetposter']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/helperbot_']
linked_sr = linked_sr[linked_sr.Subreddits != '/r/totesmessenger']
top20 = linked_sr[:19]
sr = top20['Subreddits']
num_links = top20['Times_Linked']
top20.reset_index(drop=True, inplace=True)
max_links = top20['Times_Linked'][0]
spacing=200
ytickvals = np.arange(0,max_links+spacing,spacing)
fig, ax = plt.subplots()
ax.text(0.9, 0.95, 'A', transform=ax.transAxes,fontsize=18, fontweight='bold', va='top')
plt.bar(sr,num_links,edgecolor='black',color='papayawhip')
plt.xticks(rotation='vertical',fontsize=12)
plt.ylabel('Frequency',fontsize=18)
plt.yticks(ytickvals)
plt.ylim(0,(max_links+(spacing/2)))
fig.tight_layout()
# Climate
linked_sr2 = pd.read_csv('Outputs/CLIM_FULL/LinkedSubreddits_CLIM.csv')
linked_sr2 = linked_sr2.sort_values(by=['Times_Linked'],ascending=False)
# Remove link to self?
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/climate']
# Remove links to bots
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/autotldr']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/autowikibot']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/autowikibotsubjectglitched']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/sneakpeekbot']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/wikitextbot']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/remindmebot']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/xkcd_transcriber']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/youtubot']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/redditcom']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/tweetposter']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/helperbot_']
linked_sr2 = linked_sr2[linked_sr2.Subreddits != '/r/totesmessenger']
top20_2 = linked_sr2[:19]
sr2 = top20_2['Subreddits']
num_links2 = top20_2['Times_Linked']
top20_2.reset_index(drop=True, inplace=True)
max_links = top20_2['Times_Linked'][0]
spacing=200
ytickvals = np.arange(0,max_links+spacing,spacing)
fig , ax2 = plt.subplots()
ax2.text(0.9, 0.95, 'B', transform=ax2.transAxes,fontsize=18, fontweight='bold', va='top')
plt.bar(sr2,num_links2,edgecolor='black',color='#afeeee')
plt.xticks(rotation='vertical',fontsize=12)
plt.ylabel('Frequency',fontsize=18)
plt.yticks(ytickvals)
plt.ylim(0,(max_links+(spacing/2)))
fig.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"pandas.read_csv",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots"
] |
[((195, 254), 'pandas.read_csv', 'pd.read_csv', (['"""Outputs/CS_FULL/LinkedSubreddits_CS_FULL.csv"""'], {}), "('Outputs/CS_FULL/LinkedSubreddits_CS_FULL.csv')\n", (206, 254), True, 'import pandas as pd\n'), ((1414, 1456), 'numpy.arange', 'np.arange', (['(0)', '(max_links + spacing)', 'spacing'], {}), '(0, max_links + spacing, spacing)\n', (1423, 1456), True, 'import numpy as np\n'), ((1464, 1478), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1476, 1478), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1629), 'matplotlib.pyplot.bar', 'plt.bar', (['sr', 'num_links'], {'edgecolor': '"""black"""', 'color': '"""papayawhip"""'}), "(sr, num_links, edgecolor='black', color='papayawhip')\n", (1575, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1671), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""', 'fontsize': '(12)'}), "(rotation='vertical', fontsize=12)\n", (1637, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1671, 1707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {'fontsize': '(18)'}), "('Frequency', fontsize=18)\n", (1681, 1707), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1728), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ytickvals'], {}), '(ytickvals)\n', (1717, 1728), True, 'import matplotlib.pyplot as plt\n'), ((1729, 1765), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(max_links + spacing / 2)'], {}), '(0, max_links + spacing / 2)\n', (1737, 1765), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1866), 'pandas.read_csv', 'pd.read_csv', (['"""Outputs/CLIM_FULL/LinkedSubreddits_CLIM.csv"""'], {}), "('Outputs/CLIM_FULL/LinkedSubreddits_CLIM.csv')\n", (1819, 1866), True, 'import pandas as pd\n'), ((3072, 3114), 'numpy.arange', 'np.arange', (['(0)', '(max_links + spacing)', 'spacing'], {}), '(0, max_links + spacing, spacing)\n', (3081, 3114), True, 'import numpy as np\n'), ((3124, 3138), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3136, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3290), 'matplotlib.pyplot.bar', 'plt.bar', (['sr2', 'num_links2'], {'edgecolor': '"""black"""', 'color': '"""#afeeee"""'}), "(sr2, num_links2, edgecolor='black', color='#afeeee')\n", (3237, 3290), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3332), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""', 'fontsize': '(12)'}), "(rotation='vertical', fontsize=12)\n", (3298, 3332), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {'fontsize': '(18)'}), "('Frequency', fontsize=18)\n", (3342, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3389), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ytickvals'], {}), '(ytickvals)\n', (3378, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3390, 3426), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(max_links + spacing / 2)'], {}), '(0, max_links + spacing / 2)\n', (3398, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3445, 3455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3453, 3455), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import pyglet
from glearn.viewers.modes.viewer_mode import ViewerMode
from glearn.networks.layers.conv2d import Conv2dLayer
class CNNViewerMode(ViewerMode):
def __init__(self, config, visualize_grid=[1, 1], **kwargs):
super().__init__(config, **kwargs)
self.visualize_grid = visualize_grid
self.visualize_layer = None
self.visualize_feature = None
self.last_results = None
def prepare(self, trainer):
super().prepare(trainer)
network = self.policy.network
self.filters = self.config.find("filters")
conv3d_layers = network.get_layers(Conv2dLayer)
if self.debugging:
n = 0
for layer in conv3d_layers:
features = layer.references["features"]
for f in features:
network.context.add_fetch(f"conv2d_{n}", f, "predict")
n += 1
def view_results(self, query, feed_map, results):
# visualize prediction results
if self.has_dataset and self.trainer.is_evaluate(query):
self.view_predict(feed_map["X"], feed_map["Y"], results["predict"])
# visualize CNN features
if "conv2d_0" in results:
self.view_features(results)
def on_key_press(self, key, modifiers):
super().on_key_press(key, modifiers)
# feature visualization keys
if self.debugging:
if key == pyglet.window.key._0:
self.clear_visualize()
elif key == pyglet.window.key.EQUAL:
if self.visualize_layer is None:
self.visualize_layer = 0
self.visualize_feature = 0
else:
max_layers = len(self.filters)
self.visualize_layer = min(self.visualize_layer + 1, max_layers - 1)
max_features = self.filters[self.visualize_layer][2]
self.visualize_feature = min(self.visualize_feature, max_features - 1)
self.view_features()
elif key == pyglet.window.key.MINUS:
if self.visualize_layer is not None:
self.visualize_layer -= 1
if self.visualize_layer < 0:
self.clear_visualize()
else:
max_features = self.filters[self.visualize_layer][2]
self.visualize_feature = min(self.visualize_feature, max_features - 1)
self.view_features()
elif key == pyglet.window.key.BRACKETRIGHT:
if self.visualize_layer is not None:
max_features = self.filters[self.visualize_layer][2]
self.visualize_feature = min(self.visualize_feature + 1, max_features - 1)
self.view_features()
elif key == pyglet.window.key.BRACKETLEFT:
if self.visualize_layer is not None:
self.visualize_feature = max(self.visualize_feature - 1, 0)
self.view_features()
def view_predict(self, inputs, outputs, predict):
# build image grid of inputs
grid = self.visualize_grid + [1]
image_size = np.multiply(self.input.shape, grid)
width = self.input.shape[1]
height = self.input.shape[0]
image = np.zeros(image_size)
for row in range(grid[0]):
for col in range(grid[1]):
index = row * grid[1] + col
if index >= len(inputs):
break
input_image = inputs[index] * 255
x = col * width
y = row * height
image[y:y + height, x:x + width] = input_image
expected = outputs[index]
predicted = predict[index]
correct = predicted == expected
predict_s = f"{predicted}"
color = (0, 255, 0, 255) if correct else (255, 0, 0, 255)
lx = x + width
ly = image_size[0] - (y + height)
self.viewer.add_label(f"action:{index}", predict_s, x=lx, y=ly, font_size=8,
color=color, anchor_x='right', anchor_y='bottom')
self.viewer.set_main_image(image)
def view_features(self, results=None):
if results is None:
results = self.last_results
else:
self.last_results = results
if self.visualize_layer is not None and results is not None:
# get layer values to visualize
values = results[f"conv2d_{self.visualize_layer}"]
flat_values = values.ravel()
value_min = min(flat_values)
value_max = max(flat_values)
value_range = max([0.1, value_max - value_min])
# build grid of feature images
vh = self.viewer.height
width = self.input.shape[1]
height = self.input.shape[0]
for row in range(self.visualize_grid[0]):
for col in range(self.visualize_grid[1]):
# build image for selected feature
index = row * self.visualize_grid[1] + col
_, f_height, f_width, _ = values.shape
image = np.zeros((f_height, f_width, 1))
for y, f_row in enumerate(values[index]):
for x, f_col in enumerate(f_row):
value = f_col[self.visualize_feature]
image[y][x][0] = int((value - value_min) / value_range * 255)
# add image
x = col * width
y = vh - (row + 1) * height
self.viewer.add_image(f"feature:{index}", image, x=x, y=y, width=width,
height=height)
def clear_visualize(self):
self.visualize_layer = None
self.viewer.remove_images("feature")
|
[
"numpy.zeros",
"numpy.multiply"
] |
[((3247, 3282), 'numpy.multiply', 'np.multiply', (['self.input.shape', 'grid'], {}), '(self.input.shape, grid)\n', (3258, 3282), True, 'import numpy as np\n'), ((3372, 3392), 'numpy.zeros', 'np.zeros', (['image_size'], {}), '(image_size)\n', (3380, 3392), True, 'import numpy as np\n'), ((5314, 5346), 'numpy.zeros', 'np.zeros', (['(f_height, f_width, 1)'], {}), '((f_height, f_width, 1))\n', (5322, 5346), True, 'import numpy as np\n')]
|
"""
This module provides tools for stacking a model on top of other
models without information leakage from a target variable to
predictions made by base models.
@author: <NAME>
"""
from typing import List, Dict, Tuple, Callable, Union, Optional, Any
from abc import ABC, abstractmethod
import numpy as np
from sklearn.base import (
BaseEstimator, RegressorMixin, ClassifierMixin,
clone
)
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from sklearn.model_selection import (
KFold, StratifiedKFold, GroupKFold, TimeSeriesSplit
)
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.pipeline import Pipeline
from joblib import Parallel, delayed
from .utils import InitablePipeline
# For the sake of convenience, define a new type.
FoldType = Union[KFold, StratifiedKFold, GroupKFold, TimeSeriesSplit]
def _fit_estimator(
X: np.ndarray,
y: np.ndarray,
estimator: BaseEstimator,
fit_kwargs: Dict[str, Any]
) -> BaseEstimator:
"""
A private function for fitting base estimators in parallel.
It should not be called outside of this module.
"""
return estimator.fit(X, y, **fit_kwargs)
class BaseStacking(BaseEstimator, ABC):
"""
A parent class for regression and classification stacking.
:param base_estimators_types:
list of types of first stage estimators, a type can occur
multiple times here
:param base_estimators_params:
list of (hyper)parameters of first stage estimators such
that its i-th element relates to the i-th element of
`base_estimator_types`
:param meta_estimator_type:
a type of second stage estimator
:param meta_estimator_params:
(hyper)parameters of second stage estimator
:param splitter:
an object that splits learning sample into folds for making
out-of-fold predictions with base estimators, these predictions
are used as features by second stage estimator
:param keep_meta_X:
if it is `True`, out-of-fold predictions made by first stage
estimators are stored in the class attribute named `meta_X_`
:param random_state:
random state for all estimators and splitting into folds;
if it is set, it overrides all other random states,
i.e., the ones that are set in `base_estimators_params`,
`meta_estimator_params`, and `splitter`; it is not set
by default
:param n_jobs:
number of parallel jobs for fitting each of base estimators
to different folds
"""
def __init__(
self,
base_estimators_types: Optional[List[type]] = None,
base_estimators_params: Optional[List[Dict[str, Any]]] = None,
meta_estimator_type: Optional[type] = None,
meta_estimator_params: Optional[Dict[str, Any]] = None,
splitter: Optional[FoldType] = None,
keep_meta_X: bool = True,
random_state: Optional[int] = None,
n_jobs: int = 1
):
self.base_estimators_types = base_estimators_types
self.base_estimators_params = base_estimators_params
self.meta_estimator_type = meta_estimator_type
self.meta_estimator_params = meta_estimator_params
self.splitter = splitter
self.keep_meta_X = keep_meta_X
self.random_state = random_state
self.n_jobs = n_jobs
@staticmethod
def __preprocess_base_estimators_sources(
types: List[type],
params: List[Dict[str, Any]]
) -> Tuple[List[type], List[Dict[str, Any]]]:
# Prepare types and parameters of base estimators, replace `None`s.
types = [x if x != Pipeline else InitablePipeline for x in types]
params = params or [dict() for _ in range(len(types))]
return types, params
@staticmethod
def __match_base_estimators_sources(
types: List[type],
params: List[Dict[str, Any]]
) -> List[Tuple[type, Dict[str, Any]]]:
# Validate and zip `types` and `params`.
if len(types) != len(params):
raise ValueError(
(
'Lengths mismatch: `base_estimators_types` has length {}, '
'whereas `base_estimator_params` has length {}.'
).format(len(types), len(params))
)
pairs = list(zip(types, params))
for estimator_type, estimator_params in pairs:
if (estimator_type == InitablePipeline and
'steps' not in estimator_params.keys()):
raise ValueError('Argument `steps` is not passed to pipeline.')
return pairs
def _create_base_estimators_from_their_types(
self,
types: List[type]
) -> List[BaseEstimator]:
# Create a list of base estimators from a list of their types and
# parameters of `self`.
types, params = self.__preprocess_base_estimators_sources(
types, self.base_estimators_params
)
pairs = self.__match_base_estimators_sources(types, params)
pairs = [
(t, p)
if 'random_state' not in t().get_params().keys()
or self.random_state is None
else (t, dict(p, **{'random_state': self.random_state}))
for t, p in pairs
]
base_estimators = [
estimator_type().set_params(**params)
for estimator_type, params in pairs
]
return base_estimators
@abstractmethod
def _create_base_estimators(self) -> List[BaseEstimator]:
# Instantiate base estimators from initialization parameters.
pass
def __prepare_all_for_base_estimators_fitting(
self,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None
) -> Tuple[List[BaseEstimator], Dict[type, Dict[str, Any]]]:
# Run preprocessing that is needed for base estimators fitting.
base_estimators = self._create_base_estimators()
base_fit_kwargs = (
base_fit_kwargs or {type(x): dict() for x in base_estimators}
)
return base_estimators, base_fit_kwargs
def _create_meta_estimator_from_its_type(
self,
meta_estimator_type: type,
) -> BaseEstimator:
# Instantiate second stage estimator based on its type and parameters
# of `self`.
if meta_estimator_type == Pipeline:
meta_estimator_type = InitablePipeline
meta_estimator_params = self.meta_estimator_params or dict()
random_state_is_applicable = (
'random_state' in meta_estimator_type().get_params().keys()
)
random_state_is_set = self.random_state is not None
if random_state_is_applicable and random_state_is_set:
meta_estimator_params['random_state'] = self.random_state
meta_estimator = (
meta_estimator_type().set_params(**meta_estimator_params)
)
return meta_estimator
@abstractmethod
def _create_meta_estimator(self) -> BaseEstimator:
# Instantiate second stage estimator from initialization parameters.
pass
def __create_splitter(self) -> FoldType:
# Create splitter that is used for the first stage of stacking.
splitter = self.splitter or KFold()
random_state_is_applicable = (
hasattr(splitter, 'shuffle') and splitter.shuffle
)
random_state_is_set = self.random_state is not None
if random_state_is_applicable and random_state_is_set:
splitter.random_state = self.random_state
return splitter
@abstractmethod
def _preprocess_target_variable(self, y: np.ndarray) -> np.ndarray:
# Run operations that are specific to regression or classification.
pass
def _fit_base_estimators(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None
) -> type(None):
# Fit each of base estimators to a whole learning sample.
base_estimators, base_fit_kwargs = (
self.__prepare_all_for_base_estimators_fitting(base_fit_kwargs)
)
self.base_estimators_ = [
estimator.fit(X, y, **base_fit_kwargs.get(type(estimator), dict()))
for estimator in base_estimators
]
@staticmethod
@abstractmethod
def _infer_operation(fitted_estimator: BaseEstimator) -> Callable:
# Figure out what `fitted_estimator` must do according to its type.
pass
@abstractmethod
def _apply_fitted_base_estimator(
self,
apply_fn: Callable,
estimator: BaseEstimator,
X: np.ndarray,
labels_from_training_folds: Optional[List[int]] = None
) -> np.ndarray:
# Use `estimator` on `X` with `apply_fn` which calls one of
# `predict`, `predict_proba`, and `transform` methods.
pass
@staticmethod
def __take_folds_data(
X: np.ndarray,
y: np.ndarray,
folds: List[Tuple[np.ndarray, np.ndarray]]
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
# Return three lists: list of features for fitting, list of targets
# for fitting, and list of hold-out features for making of
# out-of-fold predictions.
zipped_folds_data = [
(X[fit_indices, :], y[fit_indices], X[hold_out_indices, :])
for fit_indices, hold_out_indices in folds
]
folds_data = tuple(list(data) for data in zip(*zipped_folds_data))
return folds_data
@staticmethod
def __restore_initial_order(
meta_features: np.ndarray,
folds: List[Tuple[np.ndarray]]
) -> np.ndarray:
# Rearrange data for the second stage model and get order of rows
# that corresponds to initial order of objects.
# This is needed, because meta estimator can have sample weights.
ordering_column = np.hstack([x[1] for x in folds]).reshape((-1, 1))
meta_features = np.hstack((meta_features, ordering_column))
meta_features = meta_features[meta_features[:, -1].argsort(), :-1]
return meta_features
def __compute_meta_feature_produced_by_estimator(
self,
estimator_fits_to_folds: List[BaseEstimator],
apply_fn: Callable,
hold_out_Xs: List[np.ndarray],
fit_ys: List[np.ndarray]
) -> np.ndarray:
# Collect all out-of-fold predictions produced by the estimator
# such that its clones trained on different folds are stored in
# `estimator_fits_to_folds`, then combine these predictions in
# a single column.
meta_feature = [
self._apply_fitted_base_estimator(
apply_fn, estimator_on_other_folds, hold_out_X,
sorted(np.unique(fit_y).tolist())
if hasattr(self, 'classes_') else []
)
for estimator_on_other_folds, hold_out_X, fit_y
in zip(
estimator_fits_to_folds, hold_out_Xs, fit_ys
)
]
meta_x = np.vstack(meta_feature)
return meta_x
def __collect_out_of_fold_predictions(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None
) -> np.ndarray:
# Collect out-of-fold predictions of all base estimators that are
# trained on all folds except the one for which predictions are
# being made, then return matrix of out-of-fold predictions.
base_estimators, base_fit_kwargs = (
self.__prepare_all_for_base_estimators_fitting(base_fit_kwargs)
)
splitter = self.__create_splitter()
folds = list(splitter.split(X))
fit_Xs, fit_ys, hold_out_Xs = self.__take_folds_data(
X, y, folds
)
meta_features = []
for estimator in base_estimators:
apply_fn = self._infer_operation(estimator)
estimator_fits_to_folds = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_estimator)(
*fit_data,
clone(estimator),
base_fit_kwargs.get(type(estimator), dict())
)
for fit_data in zip(fit_Xs, fit_ys)
)
current_meta_x = self.__compute_meta_feature_produced_by_estimator(
estimator_fits_to_folds, apply_fn, hold_out_Xs, fit_ys
)
meta_features.append(current_meta_x)
shuffled_meta_X = np.hstack(meta_features)
meta_X = self.__restore_initial_order(
shuffled_meta_X, folds
)
return meta_X
def _fit_meta_estimator(
self,
meta_X: np.ndarray,
y: np.ndarray,
meta_fit_kwargs: Optional[Dict[str, Any]] = None
) -> type(None):
# Fit second stage estimator on out-of-fold predictions made by first
# stage estimators.
meta_estimator = self._create_meta_estimator()
meta_fit_kwargs = meta_fit_kwargs or dict()
self.meta_estimator_ = meta_estimator.fit(meta_X, y, **meta_fit_kwargs)
def _fit(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None,
meta_fit_kwargs: Optional[Dict[str, Any]] = None,
) -> 'BaseStacking':
# Implement internal logic of fitting.
X, y = check_X_y(X, y)
y = self._preprocess_target_variable(y)
self._fit_base_estimators(X, y, base_fit_kwargs)
meta_X = self.__collect_out_of_fold_predictions(X, y, base_fit_kwargs)
self._fit_meta_estimator(meta_X, y, meta_fit_kwargs)
if self.keep_meta_X:
self.meta_X_ = meta_X
return self
def fit(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None,
meta_fit_kwargs: Optional[Dict[str, Any]] = None
) -> 'BaseStacking':
"""
Train estimators from both stages of stacking.
:param X:
features
:param y:
target
:param base_fit_kwargs:
settings of first stage estimators training, first stage
estimators are identified by their types and, as of now,
two estimators of the same type can not have different
settings
:param meta_fit_kwargs:
settings of second stage estimator training
:return:
fitted instance
"""
return self._fit(X, y, base_fit_kwargs, meta_fit_kwargs)
@abstractmethod
def _apply_fitted_meta_estimator(
self,
meta_X: np.ndarray,
return_probabilities: bool = False
) -> np.ndarray:
# Make predictions with second stage estimator.
pass
def _predict(
self,
X: np.ndarray,
return_probabilities: bool = False
) -> np.ndarray:
# Implement internal logic of predicting.
check_is_fitted(self, ['base_estimators_', 'meta_estimator_'])
X = check_array(X)
meta_features = []
for estimator in self.base_estimators_:
apply_fn = self._infer_operation(estimator)
current_meta_feature = self._apply_fitted_base_estimator(
apply_fn, estimator, X,
list(range(len(self.classes_)))
if hasattr(self, 'classes_')
else []
)
meta_features.append(current_meta_feature)
meta_X = np.hstack(meta_features)
predictions = self._apply_fitted_meta_estimator(
meta_X, return_probabilities
)
return predictions
def predict(
self,
X: np.ndarray,
) -> np.ndarray:
"""
Predict target variable on a new dataset.
:param X:
features
:return:
predictions
"""
return self._predict(X)
def _fit_predict(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None,
meta_fit_kwargs: Optional[Dict[str, Any]] = None,
return_probabilities: bool = False
) -> np.ndarray:
# Implement internal logic of predicting for the training set.
keep_meta_X = self.keep_meta_X
self.keep_meta_X = True
self._fit(X, y, base_fit_kwargs, meta_fit_kwargs)
if return_probabilities:
predictions = self.meta_estimator_.predict_proba(self.meta_X_)
else:
predictions = self.meta_estimator_.predict(self.meta_X_)
self.keep_meta_X = keep_meta_X
if not keep_meta_X:
self.drop_training_meta_features()
return predictions
def fit_predict(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None,
meta_fit_kwargs: Optional[Dict[str, Any]] = None
) -> np.ndarray:
"""
Train stacking and predict target variable on a learning
sample.
This is needed for correct measuring of train error -
composition of calls to `fit` and `predict` does not produce
the same results, because features for second stage estimator
are produced on the full learning sample there, whereas they
are produced out-of-fold here.
:param X:
features
:param y:
target
:param base_fit_kwargs:
settings of first stage estimators training, first stage
estimators are identified by their types, as of now two
estimators of the same type can not have different
settings
:param meta_fit_kwargs:
settings of second stage estimator training
:return:
predictions
"""
return self._fit_predict(X, y, base_fit_kwargs, meta_fit_kwargs)
def drop_training_meta_features(self) -> type(None):
"""
Delete a sample on which second stage estimator was trained.
:return:
None
"""
self.meta_X_ = None
class StackingRegressor(BaseStacking, RegressorMixin):
"""
A class that allows training a regressor on predictions made by
other regressors and/or transformations made by transformers.
Information does not leak through predictions and transformations,
because all of them are made in an out-of-fold manner.
"""
def _create_base_estimators(self) -> List[BaseEstimator]:
# Instantiate base estimators from initialization parameters.
default_types = [RandomForestRegressor, KNeighborsRegressor]
types = self.base_estimators_types or default_types
base_estimators = (
self._create_base_estimators_from_their_types(types)
)
return base_estimators
def _create_meta_estimator(self) -> BaseEstimator:
# Instantiate second stage estimator from initialization parameters.
meta_estimator_type = self.meta_estimator_type or LinearRegression
meta_estimator = self._create_meta_estimator_from_its_type(
meta_estimator_type
)
return meta_estimator
def _preprocess_target_variable(self, y: np.ndarray) -> np.ndarray:
# Just return `y`, regression targets do not need any preprocessing.
return y
@staticmethod
def _infer_operation(fitted_estimator: BaseEstimator) -> Callable:
# Figure out what `fitted_estimator` must do according to its type.
def predict(estimator: BaseEstimator, X: np.ndarray) -> np.ndarray:
return estimator.predict(X).reshape((-1, 1))
def transform(estimator: BaseEstimator, X: np.ndarray) -> np.ndarray:
result = estimator.transform(X)
result = (
result if len(result.shape) > 1 else result.reshape((-1, 1))
)
return result
if hasattr(fitted_estimator, 'predict'):
return predict
elif hasattr(fitted_estimator, 'transform'):
return transform
else:
raise TypeError(
'Invalid type of estimator: {}'.format(type(fitted_estimator))
)
def _apply_fitted_base_estimator(
self,
apply_fn: Callable,
estimator: BaseEstimator,
X: np.ndarray,
labels_from_training_folds: Optional[List[int]] = None
) -> np.ndarray:
# Use `estimator` on `X` with `apply_fn`.
result = apply_fn(estimator, X)
return result
def _apply_fitted_meta_estimator(
self,
meta_X: np.ndarray,
return_probabilities: bool = False
) -> np.ndarray:
# Make predictions with meta-estimator.
predictions = self.meta_estimator_.predict(meta_X)
return predictions
class StackingClassifier(BaseStacking, ClassifierMixin):
"""
A class that allows training a classifier on predictions made by
other classifiers and/or transformations made by transformers.
Information does not leak through predictions and transformations,
because all of them are made in an out-of-fold manner.
"""
def _create_base_estimators(self) -> List[BaseEstimator]:
# Instantiate base estimators from initialization parameters.
# The list of default types is not analogous to that from
# `StackingRegressor`, because inclusion of `KNeighborsClassifier`
# instead of `LogisticRegression` causes occasional failure of
# `sklearn` support test (actually, the issue is with the test).
default_types = [RandomForestClassifier, LogisticRegression]
types = self.base_estimators_types or default_types
base_estimators = (
self._create_base_estimators_from_their_types(types)
)
return base_estimators
def _create_meta_estimator(self) -> BaseEstimator:
# Instantiate second stage estimator from initialization parameters.
meta_estimator_type = self.meta_estimator_type or LogisticRegression
meta_estimator = self._create_meta_estimator_from_its_type(
meta_estimator_type
)
return meta_estimator
def _preprocess_target_variable(self, y: np.ndarray) -> np.ndarray:
# Convert class labels to dense integers.
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
@staticmethod
def _infer_operation(fitted_estimator: BaseEstimator) -> Callable:
# Figure out what `fitted_estimator` must do according to its type.
def predict(
estimator: BaseEstimator,
X: np.ndarray,
*args, **kwargs
) -> np.ndarray:
return estimator.predict(X).reshape((-1, 1))
def predict_proba(
estimator: BaseEstimator,
X: np.ndarray,
*args, **kwargs
) -> np.ndarray:
def predict_proba_for_all_classes(
estimator: BaseEstimator,
X: np.ndarray,
train_labels: List[int],
n_all_labels: int
) -> np.ndarray:
# Take into consideration that some classes may be not
# represented on training folds.
preds = np.zeros((X.shape[0], n_all_labels))
preds[:, train_labels] = estimator.predict_proba(X)
# Last column is dropped, because probabilities sum up to 1.
preds = preds[:, :-1]
return preds
return predict_proba_for_all_classes(estimator, X, *args, **kwargs)
def transform(
estimator: BaseEstimator,
X: np.ndarray,
*args, **kwargs
) -> np.ndarray:
result = estimator.transform(X)
result = (
result if len(result.shape) > 1 else result.reshape((-1, 1))
)
return result
if hasattr(fitted_estimator, 'predict_proba'):
return predict_proba
elif hasattr(fitted_estimator, 'predict'):
return predict
elif hasattr(fitted_estimator, 'transform'):
return transform
else:
raise TypeError(
'Invalid type of estimator: {}'.format(type(fitted_estimator))
)
def _apply_fitted_base_estimator(
self,
apply_fn: Callable,
estimator: BaseEstimator,
X: np.ndarray,
labels_from_training_folds: Optional[List[int]] = None
) -> np.ndarray:
# Use `estimator` on `X` with `apply_fn`.
result = apply_fn(
estimator,
X,
labels_from_training_folds,
len(self.classes_)
)
return result
def _apply_fitted_meta_estimator(
self,
meta_X: np.ndarray,
return_probabilities: bool = False
) -> np.ndarray:
# Make predictions with meta-estimator.
if return_probabilities:
if not hasattr(self.meta_estimator_, 'predict_proba'):
raise NotImplementedError(
"Second stage estimator has not `predict_proba` method."
)
predictions = self.meta_estimator_.predict_proba(meta_X)
else:
raw_predictions = self.meta_estimator_.predict(meta_X)
predictions = np.apply_along_axis(
lambda x: self.classes_[x],
axis=0,
arr=raw_predictions
)
return predictions
def predict_proba(
self,
X: np.ndarray
) -> np.ndarray:
"""
Predict probabilities of classes on a new dataset.
:param X:
features
:return:
estimated probabilities of classes
"""
return self._predict(X, return_probabilities=True)
def fit_predict_proba(
self,
X: np.ndarray,
y: np.ndarray,
base_fit_kwargs: Optional[Dict[type, Dict[str, Any]]] = None,
meta_fit_kwargs: Optional[Dict[str, Any]] = None
) -> np.ndarray:
"""
Train stacking and predict class probabilities on a learning
sample.
This is needed for correct measuring of train performance -
composition of calls to `fit` and `predict_proba` does not
produce the same results, because features for second stage
estimator are produced on the full learning sample there,
whereas they are produced out-of-fold here.
:param X:
features
:param y:
target
:param base_fit_kwargs:
settings of first stage estimators training, first stage
estimators are identified by their types, as of now two
estimators of the same type can not have different
settings
:param meta_fit_kwargs:
settings of second stage estimator training
:return:
predictions
"""
return self._fit_predict(
X, y, base_fit_kwargs, meta_fit_kwargs, return_probabilities=True
)
|
[
"sklearn.base.clone",
"sklearn.utils.validation.check_X_y",
"numpy.unique",
"numpy.zeros",
"sklearn.model_selection.KFold",
"sklearn.utils.validation.check_is_fitted",
"numpy.hstack",
"numpy.apply_along_axis",
"joblib.Parallel",
"joblib.delayed",
"sklearn.utils.multiclass.check_classification_targets",
"numpy.vstack",
"sklearn.utils.validation.check_array"
] |
[((10472, 10515), 'numpy.hstack', 'np.hstack', (['(meta_features, ordering_column)'], {}), '((meta_features, ordering_column))\n', (10481, 10515), True, 'import numpy as np\n'), ((11569, 11592), 'numpy.vstack', 'np.vstack', (['meta_feature'], {}), '(meta_feature)\n', (11578, 11592), True, 'import numpy as np\n'), ((13054, 13078), 'numpy.hstack', 'np.hstack', (['meta_features'], {}), '(meta_features)\n', (13063, 13078), True, 'import numpy as np\n'), ((14002, 14017), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (14011, 14017), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((15668, 15730), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', "['base_estimators_', 'meta_estimator_']"], {}), "(self, ['base_estimators_', 'meta_estimator_'])\n", (15683, 15730), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((15743, 15757), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (15754, 15757), False, 'from sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n'), ((16203, 16227), 'numpy.hstack', 'np.hstack', (['meta_features'], {}), '(meta_features)\n', (16212, 16227), True, 'import numpy as np\n'), ((23171, 23202), 'sklearn.utils.multiclass.check_classification_targets', 'check_classification_targets', (['y'], {}), '(y)\n', (23199, 23202), False, 'from sklearn.utils.multiclass import check_classification_targets\n'), ((23230, 23263), 'numpy.unique', 'np.unique', (['y'], {'return_inverse': '(True)'}), '(y, return_inverse=True)\n', (23239, 23263), True, 'import numpy as np\n'), ((7640, 7647), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (7645, 7647), False, 'from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold, TimeSeriesSplit\n'), ((26379, 26455), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: self.classes_[x])'], {'axis': '(0)', 'arr': 'raw_predictions'}), '(lambda x: self.classes_[x], axis=0, arr=raw_predictions)\n', (26398, 26455), True, 'import numpy as np\n'), ((10398, 10430), 'numpy.hstack', 'np.hstack', (['[x[1] for x in folds]'], {}), '([x[1] for x in folds])\n', (10407, 10430), True, 'import numpy as np\n'), ((12525, 12553), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (12533, 12553), False, 'from joblib import Parallel, delayed\n'), ((24223, 24259), 'numpy.zeros', 'np.zeros', (['(X.shape[0], n_all_labels)'], {}), '((X.shape[0], n_all_labels))\n', (24231, 24259), True, 'import numpy as np\n'), ((12571, 12594), 'joblib.delayed', 'delayed', (['_fit_estimator'], {}), '(_fit_estimator)\n', (12578, 12594), False, 'from joblib import Parallel, delayed\n'), ((12647, 12663), 'sklearn.base.clone', 'clone', (['estimator'], {}), '(estimator)\n', (12652, 12663), False, 'from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin, clone\n'), ((11293, 11309), 'numpy.unique', 'np.unique', (['fit_y'], {}), '(fit_y)\n', (11302, 11309), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.