code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import aoc_common as ac
import numpy as np
from aocd.models import Puzzle
puzzle = Puzzle(year=2019, day=11)
ram = [int(x) for x in puzzle.input_data.split(",")]
pointer = 0
relative_base = 0
painting = {(0, 0): 0}
coord = (0, 0)
color = 0 # Part One
color = 1 # Part Two
direction = "N"
our_computer = ac.full_intcode_computer(ram, pointer, relative_base, locals())
while True:
try:
new_color = next(our_computer)
d_color = next(our_computer)
painting[coord] = new_color
coord, direction = ac.robot_turner(coord, direction, d_color)
if coord in painting:
color = painting[coord]
else:
color = 0
except:
break
# print(len(painting.keys()))
x = []
y = []
z = []
for k, v in painting.items():
x.append(int(k[0]))
y.append(int(k[1]))
z.append(int(v))
min_x = abs(min(x))
min_y = abs(min(y))
x = [i + min_x for i in x]
y = [j + min_y for j in y]
message = np.zeros([6, 43])
message[y, x] = z
# message = np.where(message == 0, " ","■")
ac.screen(painting)
# print(np.array2string(np.flipud(message), max_line_width=np.inf))
|
[
"aoc_common.robot_turner",
"numpy.zeros",
"aoc_common.screen",
"aocd.models.Puzzle"
] |
[((84, 109), 'aocd.models.Puzzle', 'Puzzle', ([], {'year': '(2019)', 'day': '(11)'}), '(year=2019, day=11)\n', (90, 109), False, 'from aocd.models import Puzzle\n'), ((960, 977), 'numpy.zeros', 'np.zeros', (['[6, 43]'], {}), '([6, 43])\n', (968, 977), True, 'import numpy as np\n'), ((1042, 1061), 'aoc_common.screen', 'ac.screen', (['painting'], {}), '(painting)\n', (1051, 1061), True, 'import aoc_common as ac\n'), ((531, 573), 'aoc_common.robot_turner', 'ac.robot_turner', (['coord', 'direction', 'd_color'], {}), '(coord, direction, d_color)\n', (546, 573), True, 'import aoc_common as ac\n')]
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import plot_voltage
import pdn_params as pdn
from cython.sim_pdn import sim_throttling_wrapper
TEST_LIST_spec=[
"429.mcf",
"433.milc",
"435.gromacs",
"436.cactusADM",
"437.leslie3d",
"444.namd",
"445.gobmk",
"453.povray",
"454.calculix",
"456.hmmer",
"458.sjeng",
"459.GemsFDTD",
"462.libquantum",
"464.h264ref",
# "470.lbm",
"471.omnetpp",
"473.astar",
"481.wrf", \
"482.sphinx3", \
]
def sim_throttling(power, pwr_throttle):
print("Sim throttling...")
THROTTLE_DUR = pdn.THROTTLE_DUR
LEADTIME= pdn.LEADTIME
VDC = pdn.VDC
THRES = pdn.THRES
L = pdn.L
C = pdn.C
R = pdn.R
CLK = pdn.CLK
CLK_THROTTLE = pdn.CLK_THROTTLE
voltage, ve_cycles, power = sim_throttling_wrapper(power, pwr_throttle, THRES, L, C, R, VDC, CLK, CLK_THROTTLE, LEADTIME, THROTTLE_DUR)
plot_voltage.print_power(voltage, power, ve_cycles)
return voltage, power, ve_cycles
def run(print_stats=False):
VDC = pdn.VDC
THRES = pdn.THRES
L = pdn.L
C = pdn.C
R = pdn.R
CLK = pdn.CLK
THR_CLK = pdn.CLK_THROTTLE
HOME = os.environ['HOME']
#get power scaling constants
dirs = ["/home/jimmy/output_10_9/gem5_out/482.sphinx3_5_1000000_DESKTOP_HarvardPowerPredictor_4000000000",
"/home/jimmy/output_10_9/gem5_out/482.sphinx3_5_1000000_DESKTOP_HarvardPowerPredictor_2000000000"]
power = [plot_voltage.get_data(d, 'power.bin', np.single) for d in dirs]
(static_scale, dyn_scale) = plot_voltage.get_pwr_scaling(power[0],power[1],4E9,2E9)
d = "/home/jimmy/output_10_14/gem5_out/482.sphinx3_20_1000000_DESKTOP_HarvardPowerPredictor_fastforwardtest"
orig_data = plot_voltage.get_voltage(d, np.single, THRES, L, C, R, VDC, CLK, 0, static_scale, dyn_scale)
np.set_printoptions(threshold=np.inf)
thr_data = plot_voltage.get_voltage(d, np.single, THRES, L, C, R, VDC, CLK, THR_CLK-CLK, static_scale, dyn_scale)
mit_data = sim_throttling(orig_data[1], thr_data[1])
power_data = [orig_data[1],thr_data[1], mit_data[1]]
volt_data = [orig_data[0],thr_data[0], mit_data[0]]
#transform 2ghz to 4ghz
volt_test = np.copy(thr_data[0][0:100000])
volt_test = volt_test - 0.005
plot_voltage.plot([orig_data[0],thr_data[0], volt_test],
orig_data[2],
'10_14_mit_test',
labels=["fullclk","throttle", "test"])
if __name__ == "__main__":
run(True)
|
[
"plot_voltage.print_power",
"numpy.set_printoptions",
"numpy.copy",
"plot_voltage.get_voltage",
"plot_voltage.get_data",
"plot_voltage.plot",
"matplotlib.use",
"plot_voltage.get_pwr_scaling",
"cython.sim_pdn.sim_throttling_wrapper"
] |
[((28, 49), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (42, 49), False, 'import matplotlib\n'), ((899, 1010), 'cython.sim_pdn.sim_throttling_wrapper', 'sim_throttling_wrapper', (['power', 'pwr_throttle', 'THRES', 'L', 'C', 'R', 'VDC', 'CLK', 'CLK_THROTTLE', 'LEADTIME', 'THROTTLE_DUR'], {}), '(power, pwr_throttle, THRES, L, C, R, VDC, CLK,\n CLK_THROTTLE, LEADTIME, THROTTLE_DUR)\n', (921, 1010), False, 'from cython.sim_pdn import sim_throttling_wrapper\n'), ((1011, 1062), 'plot_voltage.print_power', 'plot_voltage.print_power', (['voltage', 'power', 've_cycles'], {}), '(voltage, power, ve_cycles)\n', (1035, 1062), False, 'import plot_voltage\n'), ((1648, 1724), 'plot_voltage.get_pwr_scaling', 'plot_voltage.get_pwr_scaling', (['power[0]', 'power[1]', '(4000000000.0)', '(2000000000.0)'], {}), '(power[0], power[1], 4000000000.0, 2000000000.0)\n', (1676, 1724), False, 'import plot_voltage\n'), ((1839, 1935), 'plot_voltage.get_voltage', 'plot_voltage.get_voltage', (['d', 'np.single', 'THRES', 'L', 'C', 'R', 'VDC', 'CLK', '(0)', 'static_scale', 'dyn_scale'], {}), '(d, np.single, THRES, L, C, R, VDC, CLK, 0,\n static_scale, dyn_scale)\n', (1863, 1935), False, 'import plot_voltage\n'), ((1937, 1974), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (1956, 1974), True, 'import numpy as np\n'), ((1995, 2103), 'plot_voltage.get_voltage', 'plot_voltage.get_voltage', (['d', 'np.single', 'THRES', 'L', 'C', 'R', 'VDC', 'CLK', '(THR_CLK - CLK)', 'static_scale', 'dyn_scale'], {}), '(d, np.single, THRES, L, C, R, VDC, CLK, THR_CLK -\n CLK, static_scale, dyn_scale)\n', (2019, 2103), False, 'import plot_voltage\n'), ((2314, 2344), 'numpy.copy', 'np.copy', (['thr_data[0][0:100000]'], {}), '(thr_data[0][0:100000])\n', (2321, 2344), True, 'import numpy as np\n'), ((2386, 2519), 'plot_voltage.plot', 'plot_voltage.plot', (['[orig_data[0], thr_data[0], volt_test]', 'orig_data[2]', '"""10_14_mit_test"""'], {'labels': "['fullclk', 'throttle', 'test']"}), "([orig_data[0], thr_data[0], volt_test], orig_data[2],\n '10_14_mit_test', labels=['fullclk', 'throttle', 'test'])\n", (2403, 2519), False, 'import plot_voltage\n'), ((1552, 1600), 'plot_voltage.get_data', 'plot_voltage.get_data', (['d', '"""power.bin"""', 'np.single'], {}), "(d, 'power.bin', np.single)\n", (1573, 1600), False, 'import plot_voltage\n')]
|
import os
import os.path
import sys
import logging
logger = logging.getLogger(__name__)
import numpy as np
import inspect
import datetime
import hashlib
import functools
import h5py
import filelock
import multiprocessing
import itertools
import random
from tqdm.auto import tqdm
#
# utilities for my hdf5 datasets
#
def _normalize_attribute_value_string(v):
# NOTE: Only ASCII strings allowed in string values.
return v.encode('ascii')
class _Hdf5GroupProxyObject:
def __init__(self, grp):
self.grp = grp
def get(self, key, default, *, _default_action=None):
if key in self.grp:
obj = self.grp[key]
if isinstance(obj, h5py.Group):
return _Hdf5GroupProxyObject(self.grp[key])
if isinstance(obj, h5py.Dataset):
return obj[()]
raise ValueError("Can't interface object value {!r}".format(obj))
if key in self.grp.attrs:
return self._unpack_attr_val(self.grp.attrs[key])
if _default_action:
return _default_action()
return default
def keys(self):
return itertools.chain(self.grp.keys(), self.grp.attrs.keys())
def keys_children(self):
return self.grp.keys()
def keys_attrs(self):
return self.grp.attrs.keys()
def all_attrs(self):
return dict([(k, self._unpack_attr_val(v)) for (k,v) in self.grp.attrs.items()])
def __getitem__(self, key):
def keyerror():
raise KeyError("No key {} in hdf5 group {!r} or its attributes"
.format(key, self.grp))
return self.get(key, None, _default_action=keyerror)
def _unpack_attr_val(self, att_val):
return _unpack_attr_val(att_val) # call global method
def value_equals(self, key, test_value):
val = self.get(key, None)
if val is None:
return (test_value is None)
if isinstance(val, np.ndarray) or isinstance(test_value, np.ndarray):
return np.all(val == test_value)
if _normalize_attribute_value_global(val, keep_float=False) \
!= _normalize_attribute_value_global(test_value, keep_float=False):
return False
return True
def __repr__(self):
return '_Hdf5GroupProxyObject('+repr(self.grp)+')'
def __str__(self):
ds = {k: str(v) for k, v in self.all_attrs().items() }
for k in self.keys_children():
v = self.grp[k]
ds[k] = '<{}>'.format(type(v).__name__)
return ('HDF5 group {' +
', '.join('{}: {}'.format(k,vstr) for k,vstr in ds.items()) + '}')
def hdf5_group(self):
"""
Return the group object in the HDF5 data structure, giving you direct access
to the :py:mod:`h5py` API in case you need it.
"""
return self.grp
def hdf5_key(self):
"""
Return the key in the HDF5 data structure where this group is located.
"""
return self.grp.name
def _unpack_attr_val(att_val):
if isinstance(att_val, bytes):
return att_val.decode('ascii')
#if isinstance(att_val, np.ndarray) and att_val.size == 1:
# # if it's a scalar, return the bare scalar and not an ndarray
# return att_val[()]
return att_val
def _normalize_attribute_value_global(
value, *,
normalize_string=_normalize_attribute_value_string,
keep_float=True
):
t = type(value)
if value is None:
return ""
if isinstance(value, str):
return _normalize_attribute_value_string(value)
if isinstance(value, bytes):
# bytes and str are treated the same, as ASCII strings. For storage
# of raw binary data you'll want to store a dataset of some kind
# e.g. with numpy.
return value
if isinstance(value, int) or np.issubdtype(t, np.integer):
return int(value)
if isinstance(value, float) or np.issubdtype(t, np.floating):
if keep_float:
return value
else:
return _normalize_attribute_value_string( '{:0.8g}'.format(value) )
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return _normalize_attribute_value_string(value.isoformat())
if isinstance(value, (datetime.timedelta,)):
return _normalize_attribute_value_string("total_seconds={:.06g}"
.format(value.total_seconds()))
raise ValueError("Cannot encode {!r} for HDF5 attribute storage, unknown type"
.format(value))
class Hdf5StoreResultsAccessor:
"""
TODO: Doc.....
Note: must be used in a context manager!
"""
def __init__(self, filename, *, realm='results'):
super().__init__()
self.filename = filename
self.realm = realm
self._lock_file_name = os.path.join(
os.path.dirname(filename),
'.' + os.path.basename(filename) + '.py_lock'
)
self._filelock = None
self._store = None
self.store_value_filters = []
def __enter__(self):
self._filelock = filelock.FileLock(self._lock_file_name)
self._filelock.acquire()
try:
self._store = h5py.File(self.filename, 'a')
except Exception:
self._filelock.release()
raise
return self
def __exit__(self, type, value, traceback):
try:
if self._store is not None:
self._store.close()
self._store = None
finally:
if self._filelock is not None:
self._filelock.release()
self._filelock = None
def iterate_results(self, *, predicate=None, **kwargs):
if self.realm not in self._store:
# no results registered yet, nothing to yield
return
grp_results = self._store[self.realm]
predicate_attrs = None
if predicate is not None:
sig = inspect.signature(predicate)
predicate_attrs = list( sig.parameters.keys() )
def want_this(grpiface):
for k,v in kwargs.items():
if not grpiface.value_equals(k, v):
return False
if predicate is not None:
return predicate(**{k: _unpack_attr_val(grpiface.get(k, None)) for k in predicate_attrs})
return True
for key in grp_results.keys():
grp = grp_results[key]
grpiface = _Hdf5GroupProxyObject(grp)
if want_this(grpiface):
yield grpiface
def attribute_values(self, attribute_name, *, include_none=False):
if self.realm not in self._store:
return set()
grp_results = self._store[self.realm]
return set(
_unpack_attr_val(attval)
for attval in (
grp.attrs.get(attribute_name, None)
for grp in (grp_results[key] for key in grp_results.keys())
)
if include_none or attval is not None
)
# vals = set()
# for key in grp_results.keys():
# grp = grp_results[key]
# this_val = _unpack_attr_val(grp.attrs[attribute_name])
# if this_val not in vals:
# vals.append(this_val)
# return vals
def has_result(self, attributes):
key = self._store_key(attributes)
if key in self._store:
return True
return False
def get_result(self, attributes):
key = self._store_key(attributes)
if key in self._store:
grp = self._store[key]
return _Hdf5GroupProxyObject(grp)
return None
def store_result(self, attributes, value, *, forbid_overwrite=False, info=None):
key = self._store_key(attributes)
if key in self._store:
if forbid_overwrite:
raise ValueError("key {!r} already exists in {}, not overwriting"
.format(key, self.realm))
logger.debug("Overwriting key %r in %s", key, self.realm)
del self._store[key]
grp = self._store.create_group(key)
for k, v in attributes.items():
grp.attrs[k] = self._normalize_attribute_value(v)
for filt in self.store_value_filters:
value = filt(value)
has_error = self._store_result_dict_value(grp, value)
# only raise errors *after* having written everything to disk, in case
# that computation was very time-costly to obtain and our poor user
# would otherwise lose all their hard-obtained results
if has_error is not None:
raise has_error
if info:
for k, v in info.items():
grp.attrs[k] = self._normalize_attribute_value(v)
def _store_result_dict_value(self, grp, value):
has_error = None
for k, v in value.items():
if k.startswith('_'):
continue
try:
for filt in self.store_value_filters:
v = filt(v)
if v is None:
continue
if isinstance(v, dict):
newgrp = grp.create_group(k)
has_error = self._store_result_dict_value(newgrp, v)
elif isinstance(v, (np.ndarray, int, float)) \
or np.issubdtype(np.dtype(type(v)), np.integer) \
or np.issubdtype(np.dtype(type(v)), np.floating):
# Pass on any numpy array as is to h5py. Also store floats
# and ints directly
dset = grp.create_dataset(k, data=v)
elif isinstance(v, str):
# difficult to support strings in HDF5 -- see
# https://docs.h5py.org/en/stable/strings.html
#
# we use " np.void(utf8 bytes) " stored in an attribute as
# it looks like it's the safest. NOTE: You need to access
# the string via result['string_field'].tobytes().decode('utf-8')
grp.attrs[k] = np.void(v.encode('utf-8'))
logger.warning("Storing string as UTF-8 opaque bytes for field ‘%s’. Use "
"“result['%s'].tobytes().decode('utf-8')” when reading "
"out the string.", k, k)
elif isinstance(v, bytes):
# store raw bytes
grp.attrs[k] = np.void(v)
logger.warning("Storing bytes as opaque type for field ‘%s’. Use "
"“result['%s'].tobytes()” when reading "
"out the bytes again.", k, k)
elif isinstance(v, (datetime.date, datetime.time, datetime.datetime)):
grp.attrs[k] = v.isoformat().encode('ascii')
elif isinstance(v, (datetime.timedelta,)):
grp.attrs[k] = ("timedelta(seconds={:.06g})"
.format(v.total_seconds())).encode('ascii')
else:
has_error = ValueError("Can't save object {!r}, unknown type".format(v))
# continue saving other stuff
except Exception as e:
has_error = e
return has_error
def delete_result(self, attributes, *, dry_run=False):
key = self._store_key(attributes)
if key not in self._store:
raise ValueError("No such key for attributes {!r}".format(attributes))
if dry_run:
logger.info("Delete results %r, key=%r (dry run)", attributes, key)
else:
del self._store[key]
logger.info("Deleted results %r, key=%r", attributes, key)
def delete_results(self, *, dry_run=False, **kwargs):
keys_to_delete = []
for it in self.iterate_results(**kwargs):
keys_to_delete.append(it.hdf5_key())
for key in keys_to_delete:
if dry_run:
logger.info("Delete results %r (dry run)", key)
def _do_get_result(key):
# use "self" outside inner class
return _Hdf5GroupProxyObject(self._store[key])
class get_all_attrs_str:
def __str__(self):
return repr(_do_get_result(key).all_attrs())
logger.debug("with properties: %r -> %s", key, get_all_attrs_str())
else:
del self._store[key]
logger.info("Deleted results %r", key)
def update_keys(self, attribute_names, *, add_default_keys=None, dry_run=False):
"""
Checks that all result storage keys are up-to-date. If you introduce a new
kwarg attribute in the storage, we can set that attribute to all
existing results with the given value in `add_default_keys`.
- `attribute_names` is a list or tuple of attribute names to consider when
composing the storage key.
- `add_default_keys` is a dictionary of new attribute names and values
to set to records that don't have that attribute set
"""
rename_keys = [] # [ (oldkey,newkey), ... ]
set_attributes = {} # { newkey: {attribute1: value1 ...}, ... }
if add_default_keys is None:
add_default_keys = {}
grp_results = self._store[self.realm]
for key in grp_results.keys():
grp = grp_results[key]
these_attributes = {}
this_set_attributes = {}
for k in attribute_names:
att_value = None
if k in grp.attrs:
att_value = grp.attrs[k]
else:
if k in add_default_keys:
att_value = add_default_keys[k]
this_set_attributes[k] = att_value
else:
att_value = None
these_attributes[k] = att_value
# also take note of any default attributes to set that are not part
# of the results-identifying attributes
for k, v in ((akey, aval,)
for akey, aval in add_default_keys.items()
if akey not in attribute_names):
if k not in grp.attrs:
this_set_attributes[k] = v
newkey = self._store_key(these_attributes, hash_only=True)
if newkey != key:
logger.debug("Will rename {} -> {}".format(key, newkey))
rename_keys.append( (key, newkey) )
if this_set_attributes:
logger.debug("Will set attributes on newkey {}: {!r}"
.format(newkey, this_set_attributes))
set_attributes[newkey] = this_set_attributes
if not rename_keys and not set_attributes:
logger.debug("All keys and attributes are up-to-date.")
return
logger.debug("Finished inspecting keys, proceeding to updates ... ")
for oldkey, newkey in rename_keys:
if dry_run:
logger.info("\tgrp_results.move({!r}, {!r})".format(oldkey, newkey))
else:
grp_results.move(oldkey, newkey)
for newkey, attrib in set_attributes.items():
grp = grp_results[newkey] if not dry_run else None
for ak, av in attrib.items():
if dry_run:
logger.info("\tresults({!r}).attrs[{!r}] = {!r}".format(newkey, ak, av))
else:
grp.attrs[ak] = self._normalize_attribute_value(av)
logger.debug("Keys and attributes renamed successfully.")
def _normalize_attribute_value(self, value, **kwargs):
return _normalize_attribute_value_global(value, **kwargs)
def _store_key(self, attributes, *, hash_only=False):
m = hashlib.sha1()
stuff = "\n".join(
"{key}={value}\n".format(
key=k,
value=repr(self._normalize_attribute_value(attributes[k], keep_float=False))
)
for k in sorted(attributes.keys())
)
m.update( stuff.encode('ascii') )
the_hash = m.hexdigest()
if hash_only:
return the_hash
return '{}/{}'.format(self.realm, the_hash)
class NoResultException(Exception):
pass
class MultipleResults:
def __init__(self, results=None):
# results = [
# ({attrs1...}, {infoattrs1...}, <result1>),
# ({attrs2...}, {infoattrs2...}, <result2>),
# ...
# ]
# arttrsN are merged with "global" attributes (items
# in attrsN take precedence)
if results is not None:
self.results = results #[ (attrs, info, result) for (attrs, info, result) in results ]
else:
self.results = []
def append_result(self, attrs, info, result):
# if result is itself a MultipleResults instance, merge results.
if isinstance(result, MultipleResults):
for res_attrs, res_info_v, res in result.results:
try:
the_res_attrs = dict(attrs)
the_res_attrs.update(**res_attrs)
the_res_info = dict(info)
if res_info_v:
the_res_info.update(**res_info_v)
self.results.append( (the_res_attrs, the_res_info, res,) )
except Exception as e:
logger.warning(
f"Couldn't save result {attrs}, {res_attrs}; "
f"[info {info}, {res_info_v}] [result {res}]: {e}"
)
else:
self.results.append( (attrs, info, result) )
class _ShowValueShort:
def __init__(self, value, process_value=None):
self.value = value
self.process_value = process_value
def _processed_value(self):
if self.process_value is not None:
return self.process_value(self.value)
else:
return self.value
def __str__(self):
return _showvalue(self._processed_value())
def __repr__(self):
return repr(self._processed_value())
def _showvalue(value, short=False):
if isinstance(value, dict) and not short:
return '{' + ",".join(
"{}={}".format(k, _showvalue(v, short=True))
for k,v in value.items()
) + '}'
if short and isinstance(value, (np.ndarray,)):
# print short version of ndarray
with np.printoptions(precision=4,threshold=8,linewidth=9999,):
return str(value)
if isinstance(value, (float,)) or np.issubdtype(type(value), np.floating):
return "%.4g"%(value)
if value is None or isinstance(value, (int, bool, str, bytes)):
return str(value)
return '<{}>'.format(value.__class__.__name__)
def _call_with_accepted_kwargs(fun, kwargs):
sig = inspect.signature(fun)
fun_args = set( sig.parameters.keys() )
return fun(**{k: v
for k, v in kwargs.items()
if k in fun_args})
class FnComputer:
decode_inputargs = None
fixed_attributes = None
multiple_attribute_values = None
info = None
force_recompute = False
skip_store = False
def __call__(self):
raise RuntimeError("You need to reimplement the __call__() function")
class ComputeAndStore:
"""
Wraps a function `fn` that computes something potentially expensive with the
necessary code to perform the computation only if it doesn't already exist
in the data storage described by `store_filename` and `realm` and designed
to be managed by a :py:class:`HDF5StoreResultsAccessor`.
To determine whether the computation must be run, and to store the result
after the computation if it was carried out, the attributes that
characterize the associated result in the
:py:class:`HDF5StoreResultsAccessor` are determined as follows (for use with
:py:meth:`HDF5StoreResultsAccessor.has_result()` and
:py:meth:`HDF5StoreResultsAccessor.store_result()`). The function's named
arguments are considered as attributes, and they are merged with the given
attribute dictionary `fixed_attributes`.
The return value of the function (usually a dictionary) is then stored using
a :py:class:`HDF5StoreAccessor` instance in the given filename and realm,
with the associated attributes. The function may also return an instance of
:py:class:`MultipleResults`—see more on this topic below.
The `info` argument can be a dictionary of values to store alongside with
the result, but that do not contribute to the identification of the result
instance (see :py:meth:`HDF5StoreAccessor.store_result()`'s `info=` keyword
argument).
It is possible to "decode" some arguments of `fn()` if you would like the
attribute value in the store file to have a different format or
representation as the value actually passed on to `fn()`. Use the
`decode_inputargs()` for this purpose. It is given the tuple of input
arguments as-is (without any 'multiple-attributes' arguments—see below), and
is supposed to return the arguments to send to `fn()` instead (either as a
tuple or as a kwargs dictionary). If a tuple is returned, it must preserve
the order and number of the arguments.
The results storage file `store_filename` is accessed with a
:py:class:`HDF5StoreResultsAccessor` instance. The instance is only created
momentarily to check whether the results exist in the storage, and again if
necessary to store the result into the cache. In this way multiple
instances of this function can run in different processes without locking
out the results storage file.
Messages are logged to the given `logger` instance (see python's
:py:mod:`logging` mechanism), or to a default logger.
**Computing functions with multiple attribute values at in one function
call:**
Sometimes we want to compute multiple result objects in one go, especially
if they share some common intermediate steps. In such cases, the function
should return a :py:class:`MultipleResults` instance that collects the
different result objects along with their different attributes values. The
attributes specified in each object in `MultipleResults` are merged with the
function's arguments and with the `fixed_attributes`.
When the function returns multiple result objects, then `ComputeAndStore`
needs additional information in order to determine if a computation needs to
run, and if so, which of those multiple results need to be computed. Use
the `multiple_attribute_values` field to this effect. This field should be
a list of dictionaries, or a dictionary containing a list in one of its
values, that specify any additional attribute(s) and the values associated
with the results that the function is expected to return. These values are
used to check the existence of the result objects in the store.
If the function accepts a keyword argument associated with a "multiple
result attributes", then a list of all the values that we need to compute
(i.e., that are not in the store) is provided to the function via that
keyword argument. If multiple such arguments are accepted, then all these
keyword arguments `kw1`, `kw2`, ... are given a list of the same length,
such that `{kw1=kw1[j], kw2=kw2[j], ...}` for `j=0,1,...` describe the
result objects that need to be computed.
"""
def __init__(self, fn, store_filename, *,
realm=None,
fixed_attributes=None,
info=None,
decode_inputargs=None,
multiple_attribute_values=None,
force_recompute=None,
skip_store=None,
logger=None):
self.fn = fn
if isinstance(fn, FnComputer):
self.fn_name = fn.__class__.__name__
fn_sig = inspect.signature(fn.__call__)
else:
self.fn_name = fn.__name__
fn_sig = inspect.signature(fn)
self.fn_arg_names = list( fn_sig.parameters.keys() )
self.store_filename = store_filename
self.realm = realm
self.fixed_attributes = {}
if getattr(fn, 'fixed_attributes', None) is not None:
self.fixed_attributes.update(fn.fixed_attributes)
if fixed_attributes is not None:
self.fixed_attributes.update(fixed_attributes)
self.info = {}
if getattr(fn, 'info', None) is not None:
self.info.update(fn.info)
if info is not None:
self.info.update(info)
self.decode_inputargs = None
if getattr(fn, 'decode_inputargs', None) is not None:
self.decode_inputargs = fn.decode_inputargs
if decode_inputargs is not None:
if self.decode_inputargs is not None:
raise ValueError("decode_inputargs=... specified both in FnComputer class "
"and as argument to ComputeAndStore()")
self.decode_inputargs = decode_inputargs
self.multiple_attribute_values = None
if getattr(fn, 'multiple_attribute_values', None) is not None:
self.multiple_attribute_values = fn.multiple_attribute_values
if multiple_attribute_values is not None:
if self.multiple_attribute_values is not None:
raise ValueError("multiple_attribute_values=... specified both in FnComputer "
"class and as argument to ComputeAndStore()")
self.multiple_attribute_values = multiple_attribute_values
if self.multiple_attribute_values is None:
self.multiple_attribute_values = []
# go through multiple_attribute_values, and replace dictionary-of-list
# by list-of-dictionaries, i.e. {'a': [1, 2]} -> [{'a': 1}, {'a': 2}]
self.multiple_attribute_values = \
flatten_attribute_value_lists(self.multiple_attribute_values)
self.multiple_attribute_all_keys = \
list(set( itertools.chain.from_iterable(
d.keys() for d in self.multiple_attribute_values
) ))
#print(f"{self.multiple_attribute_values=}")
self.fn_attribute_names = [k for k in self.fn_arg_names
if k not in self.multiple_attribute_all_keys ]
self.force_recompute = False
if hasattr(fn, 'force_recompute'):
self.force_recompute = fn.force_recompute
if force_recompute is not None:
self.force_recompute = self.force_recompute or force_recompute
self.skip_store = False
if hasattr(fn, 'skip_store'):
self.skip_store = fn.skip_store
if skip_store is not None:
self.skip_store = self.skip_store and skip_store
if logger is None:
self.logger = logging.getLogger(__name__ + '.ComputeAndStore')
else:
self.logger = logger
def _prepare_inputargs_as_kwargs(self, inputargs):
decoded_inputargs = inputargs
if self.decode_inputargs is not None:
decoded_inputargs = self.decode_inputargs(inputargs)
if isinstance(decoded_inputargs, dict):
kwargs = decoded_inputargs
else:
if len(decoded_inputargs) != len(self.fn_attribute_names):
raise ValueError("Can't match (decoded) input arguments %r to "
"function parameters %r"
% (decoded_inputargs, self.fn_attribute_names))
kwargs = dict(zip(self.fn_attribute_names, decoded_inputargs))
return kwargs
def __call__(self, inputargs):
return self.call_with_inputs( [inputargs] )
def call_with_inputs(self, list_of_inputargs):
logger = self.logger
import phfnbutils # TimeThis
if self.skip_store:
# offer friendly warning to make sure the user didn't forget to
# unset skip_store before a very long computation
logger.warning("`skip_store` is set to True, results will not be stored at the end!")
# we might have to decode the inputargs, in case they have attribute
# values encoded in some way (e.g. dependent attributes zipped together)
kwargs = None
list_of_kwargs = [ self._prepare_inputargs_as_kwargs(inputargs)
for inputargs in list_of_inputargs ]
list_of_kwargs_and_attributes = [
(kwargs, dict(self.fixed_attributes, **kwargs))
for kwargs in list_of_kwargs
]
#logger.debug("requested %s(%r)", self.fn_name,
# _ShowValueShort(list_of_kwargs_and_attributes, lambda x: [y[1] for y in x]))
with self._get_store() as store:
# def is_need_to_recompute(attributes):
# if self.force_recompute:
# return True
# return not store.has_result(attributes)
#
# def which_attributes_need_recompute
list_of_kwargs_and_attributes_and_multiattribs = []
for kwargs, attributes in list_of_kwargs_and_attributes:
multiple_attribute_values = self.multiple_attribute_values
if not multiple_attribute_values:
multiple_attribute_values = [ {} ]
# here we use multiple_attribute_values also for functions that
# don't explicitly have any multiple_attribute_values. In
# thoses cases an empty list means that there is nothing to
# compute, and a list containing only an empty dictionary means
# that we should compute that function.
if not self.force_recompute:
multiple_attribute_values = [
m
for m in multiple_attribute_values
if not store.has_result(dict(attributes, **m))
]
if not multiple_attribute_values:
# nothing to compute even for non-multiple-attributed
# functions, see comment above
logger.debug("Results for %s [%s] already present, not repeating computation",
_ShowValueShort(attributes),
_ShowValueShort(self.multiple_attribute_values))
continue
multiattribkwargs = {
k: [m.get(k, None) for m in multiple_attribute_values]
for k in self.multiple_attribute_all_keys
}
list_of_kwargs_and_attributes_and_multiattribs.append(
(kwargs, attributes, multiattribkwargs)
)
# if not self.multiple_attribute_values:
# if is_need_to_recompute(attributes):
# def have_all_necessary_results_in_store():
# if not self.multiple_attribute_values:
# return store.has_result(attributes)
# return
# if not self.force_recompute and have_all_necessary_results_in_store():
# logger.debug("Results for %s already present, not repeating computation",
# _ShowValueShort(attributes))
# else:
# new_list_of_kwargs_and_attributes.append( (kwargs,attributes,) )
if not list_of_kwargs_and_attributes_and_multiattribs:
logger.debug("There's nothing to compute.")
return
all_results = MultipleResults()
for kwargs, attributes, multiattribkwargs \
in list_of_kwargs_and_attributes_and_multiattribs:
logger.info("computing for attributes = %s [with multi-attributes = %s]",
_ShowValueShort(attributes), _ShowValueShort(multiattribkwargs))
run_kwargs = dict(kwargs, **{k: v for (k,v) in multiattribkwargs.items()
if k in self.fn_arg_names})
tr = {}
result = None
try:
with phfnbutils.TimeThis(tr, silent=True):
# call the function that actually computes the result
result = self.fn(**run_kwargs)
except NoResultException as e:
logger.warning(
"No result (NoResultException): %s [for %s after %s seconds]",
e, _ShowValueShort(attributes), tr['timethisresult'].dt,
)
return False
except Exception as e:
logger.error("Exception while computing result!", exc_info=True)
return False
dt = tr['timethisresult'].dt
if result is None:
logger.warning("No result (returned None) for %s, after %s seconds",
_ShowValueShort(attributes), dt)
return False
logger.debug("result: %s", _ShowValueShort(result))
logger.info("Got result for %s [runtime: %s seconds]",
_ShowValueShort(attributes), dt)
the_info = {}
for info_k, info_v in self.info.items():
if callable(info_v):
info_v = _call_with_accepted_kwargs(info_v, attributes)
the_info[info_k] = info_v
the_info.update(timethisresult=dt)
all_results.append_result(attributes, the_info, result)
# store results
if not self.skip_store:
with self._get_store() as store:
for attributes, the_info, result in all_results.results:
store.store_result(attributes, result, info=the_info)
# signal to caller that we've computed (a) new result(s) -- but this
# return value is probably ignored anyways
return True
def _get_store(self):
store_kwargs = {}
if self.realm is not None:
store_kwargs.update(realm=self.realm)
return Hdf5StoreResultsAccessor(self.store_filename, **store_kwargs)
def flatten_attribute_value_lists(alist):
# {'a': [1, 2]} -> [{'a': 1}, {'a': 2}] for all keys in all listed dictionaries
if isinstance(alist, dict):
alist = [alist]
need_another_loop = True
while need_another_loop:
#print(f"Looping to flatten attribute value lists, {alist=}")
newalist = []
need_another_loop = False
for a in alist:
#print(f"Inspecting {a=}")
assert isinstance(a, dict) # should be dict here
k, v = next( ((k, v) for (k,v) in a.items() if isinstance(v, list)),
(None,None) )
if k is not None:
#print(f"Expanding {k=}: {v=}")
need_another_loop = True
# expand list value into list of dictionaries with each value
def _updated_k_with_vitem(vitem):
d = dict(a)
d[k] = vitem
return d
expanded = [
_updated_k_with_vitem(vitem)
for vitem in v
]
#print(f"{expanded=}") # DEBUG
newalist += expanded
else:
newalist += [a] # ok, keep this dict as is
alist = newalist
return newalist
|
[
"h5py.File",
"numpy.void",
"hashlib.sha1",
"os.path.basename",
"filelock.FileLock",
"os.path.dirname",
"numpy.all",
"logging.getLogger",
"inspect.signature",
"phfnbutils.TimeThis",
"numpy.printoptions",
"numpy.issubdtype"
] |
[((61, 88), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (78, 88), False, 'import logging\n'), ((19202, 19224), 'inspect.signature', 'inspect.signature', (['fun'], {}), '(fun)\n', (19219, 19224), False, 'import inspect\n'), ((3862, 3890), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.integer'], {}), '(t, np.integer)\n', (3875, 3890), True, 'import numpy as np\n'), ((3953, 3982), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.floating'], {}), '(t, np.floating)\n', (3966, 3982), True, 'import numpy as np\n'), ((5160, 5199), 'filelock.FileLock', 'filelock.FileLock', (['self._lock_file_name'], {}), '(self._lock_file_name)\n', (5177, 5199), False, 'import filelock\n'), ((16130, 16144), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (16142, 16144), False, 'import hashlib\n'), ((2018, 2043), 'numpy.all', 'np.all', (['(val == test_value)'], {}), '(val == test_value)\n', (2024, 2043), True, 'import numpy as np\n'), ((4912, 4937), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4927, 4937), False, 'import os\n'), ((5273, 5302), 'h5py.File', 'h5py.File', (['self.filename', '"""a"""'], {}), "(self.filename, 'a')\n", (5282, 5302), False, 'import h5py\n'), ((6029, 6057), 'inspect.signature', 'inspect.signature', (['predicate'], {}), '(predicate)\n', (6046, 6057), False, 'import inspect\n'), ((18802, 18859), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(4)', 'threshold': '(8)', 'linewidth': '(9999)'}), '(precision=4, threshold=8, linewidth=9999)\n', (18817, 18859), True, 'import numpy as np\n'), ((24346, 24376), 'inspect.signature', 'inspect.signature', (['fn.__call__'], {}), '(fn.__call__)\n', (24363, 24376), False, 'import inspect\n'), ((24451, 24472), 'inspect.signature', 'inspect.signature', (['fn'], {}), '(fn)\n', (24468, 24472), False, 'import inspect\n'), ((27331, 27379), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.ComputeAndStore')"], {}), "(__name__ + '.ComputeAndStore')\n", (27348, 27379), False, 'import logging\n'), ((4957, 4983), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4973, 4983), False, 'import os\n'), ((32714, 32750), 'phfnbutils.TimeThis', 'phfnbutils.TimeThis', (['tr'], {'silent': '(True)'}), '(tr, silent=True)\n', (32733, 32750), False, 'import phfnbutils\n'), ((10631, 10641), 'numpy.void', 'np.void', (['v'], {}), '(v)\n', (10638, 10641), True, 'import numpy as np\n')]
|
'''
import numpy as np
import pandas as pd
import nltk
nltk.download('punkt') # one time execution
import re
we_df = pd.read_hdf('mini.h5', start = 0, stop = 100) # (362891, 300)
pi(we_df.shape)
words = we_df.index
pi(words)
pi(words[50000])
pi(we_df.iloc[50000])
mes = 'This is some demo text, which has some spe$hial charecters! And numbers 10, also mixed with text, like - numb3r and number34. Just for testing. #peace_out!'
def get_text_vector(text):
re.findall(r'[a-zA-Z]+', )
'''
# python textrank.py
# textrank (using conceptnet word ventors/embeddings and cosinesimilarity)
import numpy as np
import pandas as pd
'''
import time
from sklearn.metrics import confusion_matrix
import json
import re
'''
cnnb_df = pd.read_hdf('mini.h5')
# cnnb_df = cnnb_df/59 # not req. (takes ~1.3sec)
def pi(a, b = None):
if b:
print('\n', b, a, '\n', type(a))
else:
print('\n', a, '\n', type(a))
'''
mes = 'This is some demo text, which has some spe$hial characters! And numbers 10, also mixed with text, like - numb3r and number34. Just for testing. #peace_out!'
#words = ['This', 'is', 'some', 'demo', 'text', 'which', 'has', 'some', 'spe', 'hial', 'characters', 'And', 'numbers', '10', 'also', 'mixed', 'with', 'text', 'like', 'numb', 'r', 'and', 'number', 'Just', 'for', 'testing', 'peace_out']
mes2 = 'demo text, which only has plain characters and no numbers, also not mixed with text, like - numb3r and number34. Just for testing.'
#vec = text_to_vec(list(map(lambda x: x.lower(), words)))
words = re.findall(r'[a-zA-Z]+', mes.lower())
words2 = re.findall(r'[a-zA-Z]+', mes2.lower())
#pi(words)
vec = text_to_vec(words)
vec2 = text_to_vec(words2)
sim = get_cosine_similarity(vec, vec2)
pi(sim)
pi(keyerror_list)
'''
# Read data
df = pd.read_csv('demo_articles.csv')
df.head()
df['article_text'][0]
# Form sentences
from nltk.tokenize import sent_tokenize
sentences = []
for s in df['article_text']:
sentences.append(sent_tokenize(s))
sentences = [y for x in sentences for y in x] # flatten list / 2d to 1d / combine
# Text preprocessing
# remove punctuations, numbers and special characters
clean_sentences = pd.Series(sentences).str.replace("[^a-zA-Z]", " ")
# make alphabets lowercase
clean_sentences = [s.lower() for s in clean_sentences]
import nltk
#nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
print(stop_words)
print(len(stop_words))
# function to remove stopwords
def remove_stopwords(sen):
sen_new = " ".join([i for i in sen if i not in stop_words])
return sen_new
# remove stopwords from the sentences
clean_sentences = [remove_stopwords(r.split()) for r in clean_sentences]
# Vector Representation of Sentences
# Form vector from text
keyerror_list = []
def word_to_vec(word):
vec = pd.Series(np.zeros(shape=(300)))
try:
wuri = '/c/en/' + word
vec = cnnb_df.loc[wuri]
except KeyError:
keyerror_list.append(wuri)
return vec
sentence_vectors = []
for i in clean_sentences:
if len(i) != 0:
#v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()]) / (len(i.split())+0.001)
v = sum([word_to_vec(word) for word in i.split()]) / (len(i.split())+0.001)
else:
v = pd.Series(np.zeros(shape=(300)))
sentence_vectors.append(v)
# Similarity Matrix Preparation
# similarity matrix
sim_mat = np.zeros([len(sentences), len(sentences)])
'''
from sklearn.metrics.pairwise import cosine_similarity
'''
# Vector comparision
def get_cosine_similarity(vec1, vec2):
# =a.b/|a||b| =dot_prod/vec_mag
try:
return sum(vec1 * vec2) / ( pow(sum(vec1*vec1), 0.5) * pow(sum(vec2*vec2), 0.5) )
except ZeroDivisionError:
return 0
for i in range(len(sentences)):
for j in range(len(sentences)):
if i != j:
#sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,300), sentence_vectors[j].reshape(1,300))[0,0]
sim_mat[i][j] = get_cosine_similarity(sentence_vectors[i], sentence_vectors[j])
'''
sim_mat[i][j] = get_cosine_similarity(sentence_vectors[i], sentence_vectors[j])
__main__:3: RuntimeWarning: invalid value encountered in double_scalars
'''
# Applying PageRank Algorithm
import networkx as nx
nx_graph = nx.from_numpy_array(sim_mat)
scores = nx.pagerank(nx_graph, max_iter=100) # default max_iter is 100
# Summary Extraction
ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
# Extract top 10 sentences as the summary
for i in range(10):
print(ranked_sentences[i][1])
|
[
"pandas.read_hdf",
"networkx.pagerank",
"pandas.read_csv",
"numpy.zeros",
"networkx.from_numpy_array",
"nltk.tokenize.sent_tokenize",
"pandas.Series",
"nltk.corpus.stopwords.words"
] |
[((772, 794), 'pandas.read_hdf', 'pd.read_hdf', (['"""mini.h5"""'], {}), "('mini.h5')\n", (783, 794), True, 'import pandas as pd\n'), ((1847, 1879), 'pandas.read_csv', 'pd.read_csv', (['"""demo_articles.csv"""'], {}), "('demo_articles.csv')\n", (1858, 1879), True, 'import pandas as pd\n'), ((2488, 2514), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2503, 2514), False, 'from nltk.corpus import stopwords\n'), ((4468, 4496), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['sim_mat'], {}), '(sim_mat)\n', (4487, 4496), True, 'import networkx as nx\n'), ((4507, 4542), 'networkx.pagerank', 'nx.pagerank', (['nx_graph'], {'max_iter': '(100)'}), '(nx_graph, max_iter=100)\n', (4518, 4542), True, 'import networkx as nx\n'), ((2049, 2065), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['s'], {}), '(s)\n', (2062, 2065), False, 'from nltk.tokenize import sent_tokenize\n'), ((2956, 2975), 'numpy.zeros', 'np.zeros', ([], {'shape': '(300)'}), '(shape=300)\n', (2964, 2975), True, 'import numpy as np\n'), ((2254, 2274), 'pandas.Series', 'pd.Series', (['sentences'], {}), '(sentences)\n', (2263, 2274), True, 'import pandas as pd\n'), ((3426, 3445), 'numpy.zeros', 'np.zeros', ([], {'shape': '(300)'}), '(shape=300)\n', (3434, 3445), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn.init as init
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from .layer_norm import LayerNorm
def maybe_mask(attn, attn_mask):
if attn_mask is not None:
assert attn_mask.size() == attn.size(), \
'Attention mask shape {} mismatch ' \
'with Attention logit tensor shape ' \
'{}.'.format(attn_mask.size(), attn.size())
attn.data.masked_fill_(attn_mask, -float('inf'))
class DotProductAttention(nn.Module):
def __init__(self, num_units, num_mem_units, num_heads):
super(DotProductAttention, self).__init__()
self.linear_ins = [
nn.Linear(num_units, num_mem_units, bias=False) for _ in range(num_heads)]
self.linear_outs = [nn.Linear(
num_mem_units + 2 * num_units, num_units, bias=False) for _ in range(num_heads)]
for i, x in enumerate(self.linear_ins + self.linear_outs):
setattr(self, 'param_%s' % i, x)
self.num_heads = num_heads
def forward(self, query, context, attn_mask=None):
"""Apply attention.
query: batch x dim
context: batch x length x dim
"""
input_ = query
for i in range(self.num_heads):
query_proj = self.linear_ins[i](
input_).unsqueeze(2) # batch x dim x 1
attn = torch.bmm(context, query_proj).squeeze(2) # batch x length
maybe_mask(attn, attn_mask)
attn = F.softmax(attn, dim=1)
wc = torch.bmm(attn.unsqueeze(1), context).squeeze(1) # batch x dim
wc = torch.cat([wc, input_, query], 1) # batch x 2dim
wc = self.linear_outs[i](wc)
wc = torch.tanh(wc)
input_ = wc
return wc, attn
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, dim, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temper = np.power(dim, 0.5)
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, attn_mask=None):
attn = torch.bmm(q, k.transpose(1, 2)) / self.temper
maybe_mask(attn, attn_mask)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class RepeatLinear(nn.Module):
def __init__(self, repeat, feature_dim, dim):
super(RepeatLinear, self).__init__()
self.repeat = repeat
self.layer = nn.Parameter(torch.FloatTensor(repeat, feature_dim, dim))
self.output_dim = dim
init.xavier_normal(self.layer)
def forward(self, x):
_, dim1, dim2 = x.size()
if self.repeat > 1:
out = x.repeat(self.repeat, 1, 1).view(self.repeat, -1, dim2)
else:
out = x.view(1, -1, dim2)
return torch.bmm(out, self.layer).view(-1, dim1, self.output_dim)
class MultiHeadAttention(nn.Module):
def __init__(
self, num_heads, num_units, query_dim, key_dim, value_dim,
dropout_p=0.1):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.num_units = num_units
assert query_dim == key_dim
self.query_dim = query_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.query_layer = RepeatLinear(num_heads, num_units, query_dim)
self.key_layer = RepeatLinear(num_heads, num_units, key_dim)
self.value_layer = RepeatLinear(num_heads, num_units, value_dim)
self.attention = ScaledDotProductAttention(num_units)
self.proj = nn.Linear(num_heads * value_dim, num_units)
self.dropout = nn.Dropout(dropout_p)
self.layer_norm = LayerNorm(num_units)
def forward(self, query, keys, values, attn_mask=None):
# query shape: batch x num queries x num units
# keys shape: batch x num kv x num units
# values shape: batch x num kv x num units
# batch * heads x num queries x query_dim
Q = self.query_layer(query)
# batch * heads x num kv x key_dim (= query_dim)
K = self.key_layer(keys)
# batch * heads x num kv x value_dim
V = self.value_layer(values)
# outputs: batch * heads x num queries x value_dim
# attns: batch * heads x num queries x num kv
outputs, attns = self.attention(
Q, K, V, attn_mask=attn_mask.repeat(self.num_heads, 1, 1) if attn_mask is not None else None)
# TODO: transpose or unfold?
bsz = query.size(0)
# batch x num queries x num_heads * value_dim
outputs = torch.cat(torch.split(outputs, bsz, dim=0), dim=-1)
# batch x num queries x num_units
outputs = self.proj(outputs)
outputs = self.dropout(outputs)
return self.layer_norm(outputs + query), attns
class SimpleMultiHeadAttention(MultiHeadAttention):
def __init__(self, num_heads, num_units, dropout_p=0.1):
assert num_units % num_heads == 0
dim = num_units / num_heads
super(SimpleMultiHeadAttention, self).__init__(
num_heads, num_units, dim, dim, dim, dropout_p)
def forward(self, query, values, attn_mask=None):
if query.dim() == 2:
query = query.unsqueeze(1)
outputs, attns = super(SimpleMultiHeadAttention, self).forward(
query, values, values, attn_mask)
if query.dim() == 2:
outputs = outputs.squeeze(1)
return outputs, attns
class SimpleSDPAttention(ScaledDotProductAttention):
def __init__(self, query_dim, values_dim, dropout_p=0.0):
super(SimpleSDPAttention, self).__init__(values_dim, dropout_p)
self.query_proj = nn.Linear(query_dim, values_dim)
def forward(self, query, values, attn_mask=None):
# query shape: batch x query dim
# values shape: batch x num values x values dim
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
output, attn = super(SimpleSDPAttention, self).forward(
self.query_proj(query).unsqueeze(1), values, values, attn_mask)
output = output.squeeze(1)
return output, attn
|
[
"torch.nn.Dropout",
"torch.nn.init.xavier_normal",
"torch.bmm",
"numpy.power",
"torch.split",
"torch.FloatTensor",
"torch.cat",
"torch.nn.functional.softmax",
"torch.nn.Softmax",
"torch.nn.Linear",
"torch.tanh"
] |
[((2039, 2057), 'numpy.power', 'np.power', (['dim', '(0.5)'], {}), '(dim, 0.5)\n', (2047, 2057), True, 'import numpy as np\n'), ((2081, 2105), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout'], {}), '(attn_dropout)\n', (2091, 2105), True, 'import torch.nn as nn\n'), ((2129, 2147), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2139, 2147), True, 'import torch.nn as nn\n'), ((2381, 2399), 'torch.bmm', 'torch.bmm', (['attn', 'v'], {}), '(attn, v)\n', (2390, 2399), False, 'import torch\n'), ((2704, 2734), 'torch.nn.init.xavier_normal', 'init.xavier_normal', (['self.layer'], {}), '(self.layer)\n', (2722, 2734), True, 'import torch.nn.init as init\n'), ((3737, 3780), 'torch.nn.Linear', 'nn.Linear', (['(num_heads * value_dim)', 'num_units'], {}), '(num_heads * value_dim, num_units)\n', (3746, 3780), True, 'import torch.nn as nn\n'), ((3804, 3825), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_p'], {}), '(dropout_p)\n', (3814, 3825), True, 'import torch.nn as nn\n'), ((5842, 5874), 'torch.nn.Linear', 'nn.Linear', (['query_dim', 'values_dim'], {}), '(query_dim, values_dim)\n', (5851, 5874), True, 'import torch.nn as nn\n'), ((711, 758), 'torch.nn.Linear', 'nn.Linear', (['num_units', 'num_mem_units'], {'bias': '(False)'}), '(num_units, num_mem_units, bias=False)\n', (720, 758), True, 'import torch.nn as nn\n'), ((814, 877), 'torch.nn.Linear', 'nn.Linear', (['(num_mem_units + 2 * num_units)', 'num_units'], {'bias': '(False)'}), '(num_mem_units + 2 * num_units, num_units, bias=False)\n', (823, 877), True, 'import torch.nn as nn\n'), ((1533, 1555), 'torch.nn.functional.softmax', 'F.softmax', (['attn'], {'dim': '(1)'}), '(attn, dim=1)\n', (1542, 1555), True, 'import torch.nn.functional as F\n'), ((1654, 1687), 'torch.cat', 'torch.cat', (['[wc, input_, query]', '(1)'], {}), '([wc, input_, query], 1)\n', (1663, 1687), False, 'import torch\n'), ((1762, 1776), 'torch.tanh', 'torch.tanh', (['wc'], {}), '(wc)\n', (1772, 1776), False, 'import torch\n'), ((2621, 2664), 'torch.FloatTensor', 'torch.FloatTensor', (['repeat', 'feature_dim', 'dim'], {}), '(repeat, feature_dim, dim)\n', (2638, 2664), False, 'import torch\n'), ((4757, 4789), 'torch.split', 'torch.split', (['outputs', 'bsz'], {'dim': '(0)'}), '(outputs, bsz, dim=0)\n', (4768, 4789), False, 'import torch\n'), ((2964, 2990), 'torch.bmm', 'torch.bmm', (['out', 'self.layer'], {}), '(out, self.layer)\n', (2973, 2990), False, 'import torch\n'), ((1412, 1442), 'torch.bmm', 'torch.bmm', (['context', 'query_proj'], {}), '(context, query_proj)\n', (1421, 1442), False, 'import torch\n')]
|
# 根据图片和音乐合成带节奏的相册视频
from typing import Tuple, Union, Any
import moviepy.editor
from moviepy.video.fx.speedx import speedx
import wave
import numpy as np
import re
from progressbar import *
from common import python_box
from common import gui
import psutil
import time
import math
import moviepy.audio.fx.all
class FfmpegPlugin:
def __init__(self):
self.t = time.time()
self.ffmpeg = "ffmpeg"
def __del__(self):
print("use time:", time.time() - self.t)
def video2audio(self, directory):
f_lst = python_box.dir_list(directory, "mp4$")
for file in f_lst:
wav = re.sub("mp4", "", file) + "wav"
print(file, wav)
cmd = "%s -y -i '%s' '%s'" % (self.ffmpeg, file, wav)
print(cmd)
os.system(cmd)
def audio_split(self, directory):
f_lst = python_box.dir_list(directory, "mp3$")
for file in f_lst:
seconds = 0
while 1:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
start = ("%01d:%02d:%02d" % (h, m, s))
end = "0:0:07"
seconds += 7
print(file)
mp4 = file
mp4_split = re.sub(".mp3", "", file) + "_%d.pcm" % seconds
cmd = "{ffmpeg} -y -ss {start} -t {end} -i {mp4} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {mp4_split}".format(
ffmpeg=self.ffmpeg, start=start, end=end, mp4_split=mp4_split, mp4=mp4)
print(cmd)
os.system(cmd)
size = os.path.getsize(mp4_split)
if size == 0:
break
def video_split(self, file):
mp4 = file
mp4_split = re.sub(".mp4", "", file) + "_split.mp4"
start = "0:0:9"
end = "0:4:49"
print(file)
cmd = '''{ffmpeg} -y -ss {start} -t {end} -i "{mp4}" -vcodec copy -acodec copy "{mp4_split}"'''.format(
ffmpeg=self.ffmpeg, start=start, end=end, mp4_split=mp4_split, mp4=mp4)
print(cmd)
os.system(cmd)
def video_concat(self, dir):
os.chdir(dir)
f_lst = []
for file in python_box.dir_list(dir, "mp4"):
file = "file '{}'".format(file)
f_lst.append(file)
videoInfo = dir + "/videoInfo.txt"
python_box.write_file(f_lst, videoInfo)
cmd = '''{} -f concat -i {} -c copy {}output.mp4'''.format(self.ffmpeg, videoInfo, dir + "/")
print(cmd)
os.chdir(dir)
os.system(cmd)
os.remove(videoInfo)
def imageSequence(directory, target):
# 只支持相同尺寸图片合成视频
clip = moviepy.editor.ImageSequenceClip(directory, fps=10)
clip.write_videofile(target)
def movie_concat(directory): # 合并后衔接处卡顿重复
outPath = directory + "/concatVideo.mp4"
f_lst = python_box.dir_list(directory, "mp4")
videoClips = []
for file in f_lst:
videoClip = moviepy.editor.VideoFileClip(file)
videoClips.append(videoClip)
videoClip = moviepy.editor.concatenate_videoclips(videoClips)
videoClip.write_videofile(outPath)
def clip_speed_change(clip, speed, ta, tb):
"""
调节速度
keep change's time
:param clip:
:param speed:
:param ta: 开始时间
:param tb: 结束时间
:return:
"""
tb = ta + (tb - ta) * speed
if tb <= clip.duration:
speed_lambda = lambda c: speedx(c, speed)
try:
clip = clip.subfx(speed_lambda, ta, tb)
# 此处报错关闭所有python即可解决,升级库
except Exception as e:
print(e)
return clip
def num_speed(numpy_arr, n):
new_numpy_arr = np.array([])
for speed in numpy_arr:
if speed > 1:
new_speed = 1 + (speed - 1) * n
else:
if n <= 1:
new_speed = (1 - (1 - speed) * n)
if n > 1:
new_speed = speed / n
new_numpy_arr = np.append(new_numpy_arr, new_speed)
return new_numpy_arr
def get_current_index(np_array: np.ndarray, value):
"""
获取顺序排序数组中t附近的索引
:param np_array:
:param value:
:return:
"""
index = np.where(np_array <= value)
if len(index) > 0:
if len(index[0]) > 0:
return index[0][len(index[0]) - 1]
return len(np_array) - 1
def compute_time_line(np_time: np.ndarray, np_speed: np.ndarray, clips: list, audio_duration) -> list:
"""
算法循环找出clip适合的时长,使总时长接近audio_duration
:param np_time:
:param np_speed:
:param clips:
:param audio_duration:
:return:
"""
default_var = audio_duration / len(clips)
change_var = 0.01
durations = []
while True:
durations.clear()
for _ in clips:
like_index = get_current_index(np_time, sum(durations))
clip_duration = 1.0 / np_speed[like_index]
clip_duration = clip_duration * default_var
durations.append(clip_duration)
total = sum(durations)
if total > audio_duration:
default_var *= 1 - change_var
if total <= audio_duration:
default_var *= 1 + change_var
got = math.fabs(total - audio_duration) < 1
if got:
break
else:
change_var *= 0.8
if len(sys.argv) >= 3 and sys.argv[2] == "plot":
from common import tools
data = []
for i in durations:
data.append(1 / i)
tools.plot_list(data)
return durations
class MovieLib(FfmpegPlugin):
def __init__(self):
super().__init__()
self.image_list = []
self.audio_lst = []
self.imageVideo = None
self.audio_file = None
self.speed_video_file = None
self.temp_videos = []
# 速度变化敏感度
self.sens = 0.6
self.change_speed_time = 0.8
self.audio_leader = True
def set_out(self, directory):
dir_ = os.path.split(directory)[0]
self.imageVideo = os.path.join(dir_, "pic2video.mp4")
self.audio_file = os.path.join(dir_, "pic2video.wav")
self.speed_video_file = os.path.join(dir_, f"{os.path.basename(dir_)}.mp4")
def add_bgm(self, audio_dir):
self.audio_lst.append(audio_dir)
def add_pic(self, pic_dir):
self.image_list.extend(sorted(python_box.dir_list(pic_dir, "jpg", walk=True)))
if not self.speed_video_file:
self.set_out(pic_dir)
def audio2data(self, audio):
f = wave.open(audio, 'rb')
params = f.getparams()
nchannels, sampwidth, self.framerate, nframes = params[:4]
strData = f.readframes(nframes)
f.close()
waveData = np.fromstring(strData, dtype=np.short)
waveData.shape = -1, 2
waveData = waveData.T
waveData = waveData[0]
audioTime = np.arange(0, nframes) * (1.0 / self.framerate)
if len(sys.argv) >= 3 and sys.argv[2] == "plot":
from common import tools
tools.plot_list(waveData, audioTime)
np.abs(waveData, out=waveData)
return audioTime, waveData
def frame2speed(self, audioTime: list, wave_data: list, f_duration=None) -> Tuple[
np.ndarray, Union[Union[float, int], Any]]:
"""
根据帧获取音频速度
:param f_duration:
:param audioTime:
:param wave_data:
:return:
"""
np_time = np.array([])
np_speed = np.array([])
# 获取关键帧
f = 0
if f_duration is None:
f_duration = int(self.framerate * 0.5)
while f <= len(audioTime) - 1:
t = audioTime[f]
speed = np.mean(wave_data[f:f + f_duration])
f += f_duration
np_time = np.append(np_time, t)
np_speed = np.append(np_speed, speed)
# 调整速度敏感度
np_speed = np_speed / np.mean(np_speed)
np_speed = np.where(np_speed >= 8, 8, np_speed)
np_speed = np.where(np_speed <= 0.2, 0.2, np_speed)
np_speed = np.where(np_speed >= 1, np_speed * self.sens, np_speed)
np_speed = np.where(np_speed < 1, np_speed / self.sens, np_speed)
np_speed = np_speed / np.mean(np_speed)
return np_time, np_speed
def video_speed_with_audio(self):
# 视频速度匹配音频节奏 适用视频为重复性图片或者平调速度
sys.setrecursionlimit(10000000)
video = moviepy.editor.VideoFileClip(self.imageVideo)
video.audio.write_audiofile(self.audio_file)
audioTime, wave_data = self.audio2data(self.audio_file)
np_time, np_speed = self.frame2speed(audioTime, wave_data,
f_duration=int(self.framerate * self.change_speed_time))
# 处理视频
bar_setting = ['change speed: ', Percentage(), Bar("#"), Timer(), ' ', ETA()]
speed_clip = moviepy.editor.VideoFileClip(self.imageVideo) # initial clip
audio_clip = speed_clip.audio
bar = ProgressBar(widgets=bar_setting, maxval=len(np_speed)).start()
bar_update_tie = 1
for i in range(len(np_speed)):
bar.update(bar_update_tie)
bar_update_tie += 1
speed = np_speed[i]
t = np_time[i]
speed_clip = clip_speed_change(speed_clip, speed, t, t + self.change_speed_time) # 分段变速
np_time = np.append(np_time, t)
speed_clip.audio = audio_clip
print(self.speed_video_file)
video_without_audio = python_box.FileSys().get_outfile(self.speed_video_file, "no_audio")
speed_clip.write_videofile(video_without_audio, audio=False)
speed_clip = moviepy.editor.VideoFileClip(video_without_audio) # solve cant write audio
duration = speed_clip.duration
audio = moviepy.editor.AudioFileClip(self.audio_file)
audio.set_duration(duration)
speed_clip.audio = audio
speed_clip.write_videofile(self.speed_video_file)
# destroy
del audio
del speed_clip
try:
os.remove(video_without_audio)
os.remove(self.audio_file)
os.remove(self.imageVideo)
except Exception as e:
print(e)
bar.finish()
def crop_clip(self, clip: moviepy.editor.ImageClip, width=1080 * 4 / 3, height=1080):
w, h = clip.size # 视频长宽
w_h = w / h
if w_h <= width / height: # 宽度尺寸偏小
clip = clip.resize(width=width)
w, h = clip.size
clip = clip.crop(x_center=w / 2, y_center=h / 2, width=width, height=height)
if w_h > width / height:
clip = clip.resize(height=height)
w, h = clip.size
clip = clip.crop(x_center=w / 2, y_center=h / 2, width=width, height=height)
return clip
def image2speed_video(self, width=1080 * 4 / 3, height=1080):
"""
图片直接生成变速视频
跳过图片生成视频步骤
:param width:
:param height:
:return:
"""
# 生成音频数据
if len(self.audio_lst) == 0:
raise Exception("not exists any music")
audio_clips = []
for m in self.audio_lst:
clip = moviepy.editor.AudioFileClip(m)
audio_clips.append(clip)
audio_clip = moviepy.editor.concatenate_audioclips(audio_clips)
audio_clip.write_audiofile(self.audio_file)
audioTime, wave_data = self.audio2data(self.audio_file)
np_time, np_speed = self.frame2speed(audioTime, wave_data)
time_line = compute_time_line(np_time, np_speed, self.image_list, audio_clip.duration)
self.image_list.sort()
image_clips = []
for i in range(len(self.image_list)):
image_clip = moviepy.editor.ImageClip(self.image_list[i])
image_clip.start = sum(time_line[0:i])
image_clip.duration = time_line[i]
image_clip.fps = 1
image_clip = self.crop_clip(image_clip, width, height)
image_clips.append(image_clip)
video_clip = moviepy.editor.concatenate_videoclips(image_clips)
video_clip.audio = audio_clip
video_clip.write_videofile(self.speed_video_file, fps=5)
os.remove(self.audio_file)
def image2clip(self, width=1080 * 4 / 3, height=1080, duration=0.25):
fps = 1.0 / duration
width_height = width / height
if len(self.audio_lst) == 0:
raise Exception("exists any music")
audioClips = []
for m in self.audio_lst:
audioClip = moviepy.editor.AudioFileClip(m)
audioClips.append(audioClip)
audioClip = moviepy.editor.concatenate_audioclips(audioClips)
self.image_list.sort()
bar_setting = ['image2clip: ', Percentage(), Bar('#'), ' ', ETA()]
bar = ProgressBar(widgets=bar_setting, maxval=len(self.image_list)).start()
videoStartTime = 0
videoClips = []
fail_pic = []
bar_i = 0
for imageFileName in self.image_list:
bar_i += 1
try:
imageClip = moviepy.editor.ImageClip(imageFileName)
videoClip = imageClip.set_duration(duration)
videoClip = videoClip.set_start(videoStartTime)
videoClip = self.crop_clip(videoClip, width, height)
videoStartTime += duration
if 'video_clip' not in locals().keys():
video_clip = videoClip
else:
video_clip = moviepy.editor.concatenate_videoclips([video_clip, videoClip])
# 内存不足时,分步写入
if psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024 > 800:
i = 1
temp_video = python_box.FileSys().get_outfile(self.imageVideo, str(i))
while 1:
if os.path.exists(temp_video):
i += 1
temp_video = python_box.FileSys().get_outfile(self.imageVideo, str(i))
else:
self.temp_videos.append(temp_video)
break
video_clip.write_videofile(temp_video, fps=fps)
del video_clip
except Exception as e:
fail_pic.append(imageFileName)
print(e)
bar.update(bar_i)
if len(self.temp_videos) > 0:
videos = []
for temp_video in self.temp_videos:
video_clip = moviepy.editor.VideoFileClip(temp_video)
videos.append(video_clip)
video_clip = moviepy.editor.concatenate_videoclips(videos)
bar.finish()
# 设置音轨长度
video_duration = video_clip.duration
audio_duration = audioClip.duration
if self.audio_leader:
video_clip = video_clip.subfx(lambda c: speedx(c, video_duration / audio_duration))
else:
while audioClip.duration < video_duration:
audioClip = moviepy.editor.concatenate_audioclips([audioClip, audioClip])
audioClip = audioClip.set_duration(video_duration)
video_clip.audio = audioClip
video_clip.write_videofile(self.imageVideo, fps=fps)
del video_clip
for temp in self.temp_videos:
try:
os.remove(temp)
except Exception as e:
print(e)
return self.imageVideo
def run(self):
"""
批量图片合成clip
通过bgm识别播放节奏,生成新的clip
:return:
"""
self.image2speed_video()
if __name__ == "__main__":
"""
pic to video clip
"""
movie = MovieLib()
for i in range(6):
directory = gui.select_dir("多个图片目录,取消代表则选择完成")
if directory:
movie.add_pic(directory)
else:
break
for i in range(6):
file = gui.select_file("多个音乐文件,取消代表则选择完成")
if file:
movie.add_bgm(file)
else:
break
movie.run()
|
[
"wave.open",
"common.tools.plot_list",
"numpy.abs",
"math.fabs",
"common.python_box.write_file",
"moviepy.video.fx.speedx.speedx",
"common.python_box.FileSys",
"common.python_box.dir_list",
"time.time",
"numpy.append",
"common.gui.select_file",
"numpy.where",
"numpy.array",
"common.gui.select_dir",
"numpy.arange",
"numpy.mean",
"re.sub",
"numpy.fromstring"
] |
[((2845, 2882), 'common.python_box.dir_list', 'python_box.dir_list', (['directory', '"""mp4"""'], {}), "(directory, 'mp4')\n", (2864, 2882), False, 'from common import python_box\n'), ((3636, 3648), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3644, 3648), True, 'import numpy as np\n'), ((4129, 4156), 'numpy.where', 'np.where', (['(np_array <= value)'], {}), '(np_array <= value)\n', (4137, 4156), True, 'import numpy as np\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n'), ((543, 581), 'common.python_box.dir_list', 'python_box.dir_list', (['directory', '"""mp4$"""'], {}), "(directory, 'mp4$')\n", (562, 581), False, 'from common import python_box\n'), ((859, 897), 'common.python_box.dir_list', 'python_box.dir_list', (['directory', '"""mp3$"""'], {}), "(directory, 'mp3$')\n", (878, 897), False, 'from common import python_box\n'), ((2193, 2224), 'common.python_box.dir_list', 'python_box.dir_list', (['dir', '"""mp4"""'], {}), "(dir, 'mp4')\n", (2212, 2224), False, 'from common import python_box\n'), ((2352, 2391), 'common.python_box.write_file', 'python_box.write_file', (['f_lst', 'videoInfo'], {}), '(f_lst, videoInfo)\n', (2373, 2391), False, 'from common import python_box\n'), ((3914, 3949), 'numpy.append', 'np.append', (['new_numpy_arr', 'new_speed'], {}), '(new_numpy_arr, new_speed)\n', (3923, 3949), True, 'import numpy as np\n'), ((5410, 5431), 'common.tools.plot_list', 'tools.plot_list', (['data'], {}), '(data)\n', (5425, 5431), False, 'from common import tools\n'), ((6434, 6456), 'wave.open', 'wave.open', (['audio', '"""rb"""'], {}), "(audio, 'rb')\n", (6443, 6456), False, 'import wave\n'), ((6632, 6670), 'numpy.fromstring', 'np.fromstring', (['strData'], {'dtype': 'np.short'}), '(strData, dtype=np.short)\n', (6645, 6670), True, 'import numpy as np\n'), ((6981, 7011), 'numpy.abs', 'np.abs', (['waveData'], {'out': 'waveData'}), '(waveData, out=waveData)\n', (6987, 7011), True, 'import numpy as np\n'), ((7344, 7356), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7352, 7356), True, 'import numpy as np\n'), ((7376, 7388), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7384, 7388), True, 'import numpy as np\n'), ((7833, 7869), 'numpy.where', 'np.where', (['(np_speed >= 8)', '(8)', 'np_speed'], {}), '(np_speed >= 8, 8, np_speed)\n', (7841, 7869), True, 'import numpy as np\n'), ((7889, 7929), 'numpy.where', 'np.where', (['(np_speed <= 0.2)', '(0.2)', 'np_speed'], {}), '(np_speed <= 0.2, 0.2, np_speed)\n', (7897, 7929), True, 'import numpy as np\n'), ((7949, 8004), 'numpy.where', 'np.where', (['(np_speed >= 1)', '(np_speed * self.sens)', 'np_speed'], {}), '(np_speed >= 1, np_speed * self.sens, np_speed)\n', (7957, 8004), True, 'import numpy as np\n'), ((8024, 8078), 'numpy.where', 'np.where', (['(np_speed < 1)', '(np_speed / self.sens)', 'np_speed'], {}), '(np_speed < 1, np_speed / self.sens, np_speed)\n', (8032, 8078), True, 'import numpy as np\n'), ((15636, 15670), 'common.gui.select_dir', 'gui.select_dir', (['"""多个图片目录,取消代表则选择完成"""'], {}), "('多个图片目录,取消代表则选择完成')\n", (15650, 15670), False, 'from common import gui\n'), ((15800, 15835), 'common.gui.select_file', 'gui.select_file', (['"""多个音乐文件,取消代表则选择完成"""'], {}), "('多个音乐文件,取消代表则选择完成')\n", (15815, 15835), False, 'from common import gui\n'), ((1753, 1777), 're.sub', 're.sub', (['""".mp4"""', '""""""', 'file'], {}), "('.mp4', '', file)\n", (1759, 1777), False, 'import re\n'), ((3398, 3414), 'moviepy.video.fx.speedx.speedx', 'speedx', (['c', 'speed'], {}), '(c, speed)\n', (3404, 3414), False, 'from moviepy.video.fx.speedx import speedx\n'), ((5123, 5156), 'math.fabs', 'math.fabs', (['(total - audio_duration)'], {}), '(total - audio_duration)\n', (5132, 5156), False, 'import math\n'), ((6783, 6804), 'numpy.arange', 'np.arange', (['(0)', 'nframes'], {}), '(0, nframes)\n', (6792, 6804), True, 'import numpy as np\n'), ((6936, 6972), 'common.tools.plot_list', 'tools.plot_list', (['waveData', 'audioTime'], {}), '(waveData, audioTime)\n', (6951, 6972), False, 'from common import tools\n'), ((7589, 7625), 'numpy.mean', 'np.mean', (['wave_data[f:f + f_duration]'], {}), '(wave_data[f:f + f_duration])\n', (7596, 7625), True, 'import numpy as np\n'), ((7676, 7697), 'numpy.append', 'np.append', (['np_time', 't'], {}), '(np_time, t)\n', (7685, 7697), True, 'import numpy as np\n'), ((7721, 7747), 'numpy.append', 'np.append', (['np_speed', 'speed'], {}), '(np_speed, speed)\n', (7730, 7747), True, 'import numpy as np\n'), ((7796, 7813), 'numpy.mean', 'np.mean', (['np_speed'], {}), '(np_speed)\n', (7803, 7813), True, 'import numpy as np\n'), ((8109, 8126), 'numpy.mean', 'np.mean', (['np_speed'], {}), '(np_speed)\n', (8116, 8126), True, 'import numpy as np\n'), ((9243, 9264), 'numpy.append', 'np.append', (['np_time', 't'], {}), '(np_time, t)\n', (9252, 9264), True, 'import numpy as np\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((627, 650), 're.sub', 're.sub', (['"""mp4"""', '""""""', 'file'], {}), "('mp4', '', file)\n", (633, 650), False, 'import re\n'), ((6267, 6313), 'common.python_box.dir_list', 'python_box.dir_list', (['pic_dir', '"""jpg"""'], {'walk': '(True)'}), "(pic_dir, 'jpg', walk=True)\n", (6286, 6313), False, 'from common import python_box\n'), ((9370, 9390), 'common.python_box.FileSys', 'python_box.FileSys', ([], {}), '()\n', (9388, 9390), False, 'from common import python_box\n'), ((1248, 1272), 're.sub', 're.sub', (['""".mp3"""', '""""""', 'file'], {}), "('.mp3', '', file)\n", (1254, 1272), False, 'import re\n'), ((14796, 14838), 'moviepy.video.fx.speedx.speedx', 'speedx', (['c', '(video_duration / audio_duration)'], {}), '(c, video_duration / audio_duration)\n', (14802, 14838), False, 'from moviepy.video.fx.speedx import speedx\n'), ((13614, 13634), 'common.python_box.FileSys', 'python_box.FileSys', ([], {}), '()\n', (13632, 13634), False, 'from common import python_box\n'), ((13848, 13868), 'common.python_box.FileSys', 'python_box.FileSys', ([], {}), '()\n', (13866, 13868), False, 'from common import python_box\n')]
|
import decimal
import math
import warnings
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from decimal import Decimal, localcontext
from itertools import repeat
from pathlib import Path
from time import time
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from .config import get_global_config
from .types import FilenameType
def python_hash(SSN: int) -> int:
"""
A pythonic implementation of COBOL code using floating-point arithmetic. Note that
this will differ ever-so-slightly from the cobol_hash due to the differing rounding
conventions.
"""
# Constants determined by DoIT
L_SD = SSN
C_Q = 127773 # 3^2 * 14197
C_A = 16807 # 7^5
C_R = 2836 # 2^2 * 709
C_M = 2147483647 # prime (In fact, 2^{2^5 - 1} - 1, double Mersenne)
# Translated
W_HI = L_SD // C_Q
W_LO = L_SD % C_Q
# Recombine the quotient and remainder mod a medium-sized almost-prime with two
# coprime factors. (N.B. Not sure exactly why C_A is a power of 7 whereas C_R is
# almost prime. Would be curious to read the history of this algorithm.)
L_SD = C_A * W_LO - C_R * W_HI
# Note that C_M is _almost_ 2^31, but not quite. Also, note that
# C_A * W_LO - C_R * W_HI is maximized when SSN = C_Q - 1
# and it is minimized when SSN is the largest social security number which is
# exactly divisible by C_Q, i.e., (999_99_9999 // C_Q) * C_Q = 999_95_1498.
#
# In either case, C_A * W_LO - C_R * W_HI \in (-C_M, C_M) and so the following
# block guarantees that L_SD will be in [0, C_M).
#
# We also note that the _smallest negative_ value that C_A * W_LO - C_R * W_HI can
# achieve in theory is -1 (since C_A and C_R are coprime) but I haven't done the
# computation to determine whether it's actually possible in this range of numbers
if L_SD <= 0:
warnings.warn("L_SD is negative")
L_SD += C_M
# And so by the above comment, L_RAND is in [0, 1) and this rounding gives us the
# top 10 digits of the mantissa
L_RAND = math.floor(L_SD / C_M * 1e10) / 1e10
return L_RAND
def cobol_hash(SSN: int) -> float:
"""
A python implementation of COBOL's fixed-point arithmetic
"""
with localcontext() as ctx:
# Constants determined by DoIT
ctx.prec = 10
ctx.rounding = decimal.ROUND_DOWN
L_SD = Decimal(SSN)
C_A = Decimal("0000016807")
C_M = Decimal("2147483647")
C_Q = Decimal("0000127773")
C_R = Decimal("0000002836")
# Translated
W_HI = (L_SD / C_Q).quantize(Decimal("1E0")) # L_SD // C_Q
W_LO = L_SD - C_Q * W_HI # L_SD % C_Q
L_SD = C_A * W_LO - C_R * W_HI
if L_SD <= 0:
L_SD += C_M
L_RAND = (L_SD / C_M).quantize(Decimal("1E-10"))
if L_RAND == 0:
warnings.warn("L_RAND is zero")
L_SD += C_M
return L_RAND
def generate_outcomes(
input_list: Optional[List[int]] = None,
process_type: str = "cobol",
low: Optional[int] = None,
high: Optional[int] = None,
size: Optional[int] = None,
all_values: Optional[bool] = False,
generate_rand_whole: Optional[bool] = False,
) -> pd.DataFrame:
"""
Helper function that generates L_RAND outcomes with the option for pythonic or cobol implmentations.
"""
# Generate a random sample of SSNs to test, and sort to verify monotonicity of relationship
if input_list is not None:
ssn_pool = input_list
elif not all_values:
# Setting seed to ensure replicability
np.random.seed(0)
ssn_pool = np.random.randint(low=low, high=high, size=size)
ssn_pool.sort()
elif all_values:
ssn_pool = np.arange(low, high)
# apply random number generator to SSN pool
if process_type == "python":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(executor.map(python_hash, ssn_pool), total=len(ssn_pool))
)
if process_type == "cobol":
with ThreadPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(cobol_hash, ssn_pool.astype(str)), total=len(ssn_pool)
)
)
df = pd.DataFrame(ssn_outcomes, columns=["L_RAND"])
final_df = pd.concat([pd.Series(ssn_pool, name="SSN"), df], axis=1)
if generate_rand_whole:
final_df["L_RAND_WHOLE"] = final_df["L_RAND"] * 10_000_000_000
return final_df
def chunk_using_generators(lst, n):
for i in range(0, len(lst), n):
yield lst[i : i + n]
def generate_all_L_RAND(
filepath: Optional[FilenameType] = None,
filename: FilenameType = "ssn_output.csv.gz",
ssn_min: int = 1_01_0001,
ssn_max: int = 899_99_9999,
chunksize: int = 10_0000,
):
"""
A function that calculates L_RAND values for all possible SSN from 001_01_0001 to 899_99_9999.
This exercise was necessary to ensure that the maximum value attainable from all reasonable SSNs
would result in an L_RAND value less than 9_999_999_999.
"""
if filepath is None:
# default to the DATA_DIR / reference
filepath = Path(get_global_config().DATA_DIR) / "reference"
# Total list of valid SSNs
list_of_ssn = np.arange(ssn_min, ssn_max)
# Divide the total list into manageable chunks
list_of_list_of_ssn = list(chunk_using_generators(list_of_ssn, chunksize))
# Process each list using COBOL
with ProcessPoolExecutor() as executor:
ssn_outcomes = list(
tqdm(
executor.map(generate_outcomes, list_of_list_of_ssn, repeat("cobol")),
total=len(list_of_list_of_ssn),
)
)
# Output data into a gzip dataframe.
pd.DataFrame(pd.concat(ssn_outcomes)).sort_values(
by="L_RAND", ascending=False
).reset_index(drop=True).to_csv(
filepath / filename, compression="gzip", index=False
)
def add_ms_to_seed(ssn: int, ms: int = None):
"""
A good-enough solution to resolve local-randomization issues with the current DoIT algorithm.
"""
if ms is None:
ms = int(round(time(), 6) * 1e6) % 1_000_000
return int(str(ssn + ms)[::-1])
|
[
"pandas.DataFrame",
"numpy.random.seed",
"decimal.Decimal",
"concurrent.futures.ProcessPoolExecutor",
"math.floor",
"time.time",
"numpy.random.randint",
"numpy.arange",
"decimal.localcontext",
"pandas.Series",
"warnings.warn",
"concurrent.futures.ThreadPoolExecutor",
"pandas.concat",
"itertools.repeat"
] |
[((4358, 4404), 'pandas.DataFrame', 'pd.DataFrame', (['ssn_outcomes'], {'columns': "['L_RAND']"}), "(ssn_outcomes, columns=['L_RAND'])\n", (4370, 4404), True, 'import pandas as pd\n'), ((5383, 5410), 'numpy.arange', 'np.arange', (['ssn_min', 'ssn_max'], {}), '(ssn_min, ssn_max)\n', (5392, 5410), True, 'import numpy as np\n'), ((1939, 1972), 'warnings.warn', 'warnings.warn', (['"""L_SD is negative"""'], {}), "('L_SD is negative')\n", (1952, 1972), False, 'import warnings\n'), ((2129, 2167), 'math.floor', 'math.floor', (['(L_SD / C_M * 10000000000.0)'], {}), '(L_SD / C_M * 10000000000.0)\n', (2139, 2167), False, 'import math\n'), ((2309, 2323), 'decimal.localcontext', 'localcontext', ([], {}), '()\n', (2321, 2323), False, 'from decimal import Decimal, localcontext\n'), ((2451, 2463), 'decimal.Decimal', 'Decimal', (['SSN'], {}), '(SSN)\n', (2458, 2463), False, 'from decimal import Decimal, localcontext\n'), ((2478, 2499), 'decimal.Decimal', 'Decimal', (['"""0000016807"""'], {}), "('0000016807')\n", (2485, 2499), False, 'from decimal import Decimal, localcontext\n'), ((2514, 2535), 'decimal.Decimal', 'Decimal', (['"""2147483647"""'], {}), "('2147483647')\n", (2521, 2535), False, 'from decimal import Decimal, localcontext\n'), ((2550, 2571), 'decimal.Decimal', 'Decimal', (['"""0000127773"""'], {}), "('0000127773')\n", (2557, 2571), False, 'from decimal import Decimal, localcontext\n'), ((2586, 2607), 'decimal.Decimal', 'Decimal', (['"""0000002836"""'], {}), "('0000002836')\n", (2593, 2607), False, 'from decimal import Decimal, localcontext\n'), ((5586, 5607), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (5605, 5607), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((2667, 2681), 'decimal.Decimal', 'Decimal', (['"""1E0"""'], {}), "('1E0')\n", (2674, 2681), False, 'from decimal import Decimal, localcontext\n'), ((2870, 2886), 'decimal.Decimal', 'Decimal', (['"""1E-10"""'], {}), "('1E-10')\n", (2877, 2886), False, 'from decimal import Decimal, localcontext\n'), ((2925, 2956), 'warnings.warn', 'warnings.warn', (['"""L_RAND is zero"""'], {}), "('L_RAND is zero')\n", (2938, 2956), False, 'import warnings\n'), ((3667, 3684), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3681, 3684), True, 'import numpy as np\n'), ((3704, 3752), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'high': 'high', 'size': 'size'}), '(low=low, high=high, size=size)\n', (3721, 3752), True, 'import numpy as np\n'), ((3933, 3953), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (3951, 3953), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((4139, 4159), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (4157, 4159), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((4431, 4462), 'pandas.Series', 'pd.Series', (['ssn_pool'], {'name': '"""SSN"""'}), "(ssn_pool, name='SSN')\n", (4440, 4462), True, 'import pandas as pd\n'), ((3817, 3837), 'numpy.arange', 'np.arange', (['low', 'high'], {}), '(low, high)\n', (3826, 3837), True, 'import numpy as np\n'), ((5737, 5752), 'itertools.repeat', 'repeat', (['"""cobol"""'], {}), "('cobol')\n", (5743, 5752), False, 'from itertools import repeat\n'), ((6269, 6275), 'time.time', 'time', ([], {}), '()\n', (6273, 6275), False, 'from time import time\n'), ((5886, 5909), 'pandas.concat', 'pd.concat', (['ssn_outcomes'], {}), '(ssn_outcomes)\n', (5895, 5909), True, 'import pandas as pd\n')]
|
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import os
import torch
import argparse
from ofa.stereo_matching.data_providers.stereo import StereoDataProvider
from ofa.stereo_matching.run_manager import StereoRunConfig, RunManager
from ofa.stereo_matching.elastic_nn.networks.ofa_aanet import OFAAANet
from ofa.stereo_matching.elastic_nn.training.progressive_shrinking import load_models
import numpy as np
from ofa.utils.pytorch_utils import get_net_info
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gpu',
help='The gpu(s) to use',
type=str,
default='0')
parser.add_argument(
'-n',
'--net',
metavar='OFAAANet',
default='ofa_aanet',
choices=['ofa_aanet_d234_e346_k357_w1.0',
'ofa_aanet'],
help='OFA AANet networks')
args = parser.parse_args()
if args.gpu == 'all':
device_list = range(torch.cuda.device_count())
args.gpu = ','.join(str(_) for _ in device_list)
else:
device_list = [int(_) for _ in args.gpu.split(',')]
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
ofa_network = OFAAANet(ks_list=[3,5,7], expand_ratio_list=[2,4,6,8], depth_list=[2,3,4], scale_list=[2,3,4])
model_file = 'ofa_stereo_checkpoints/ofa_stereo_D234_E2468_K357_S4'
init = torch.load(model_file, map_location='cpu')
model_dict = init['state_dict']
ofa_network.load_state_dict(model_dict)
""" Randomly sample a sub-network,
you can also manually set the sub-network using:
ofa_network.set_active_subnet(ks=7, e=6, d=4)
"""
#ofa_network.sample_active_subnet()
#ofa_network.set_max_net()
d = 4
e = 8
ks = 7
s = 4
ofa_network.set_active_subnet(ks=ks, d=d, e=e, s=s)
subnet = ofa_network.get_active_subnet(preserve_weight=True)
#subnet = ofa_network
save_path = "ofa_stereo_checkpoints/aanet_D%d_E%d_K%d_S%d" % (d, e, ks, s)
torch.save(subnet.state_dict(), save_path)
net = subnet
net.eval()
net = net.cuda()
#net = net.get_tensorrt_model()
#torch.save(net.state_dict(), 'models/mobilefadnet_trt.pth')
get_net_info(net, input_shape=(3, 540, 960))
# fake input data
dummy_left = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
dummy_right = torch.randn(1, 3, 576, 960, dtype=torch.float).cuda()
# INIT LOGGERS
starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
repetitions = 30
timings=np.zeros((repetitions,1))
#GPU-WARM-UP
for _ in range(10):
_ = net(dummy_left, dummy_right)
# MEASURE PERFORMANCE
with torch.no_grad():
for rep in range(-3, repetitions):
starter.record()
_ = net(dummy_left, dummy_right)
ender.record()
# WAIT FOR GPU SYNC
torch.cuda.synchronize()
if rep >= 0:
curr_time = starter.elapsed_time(ender)
timings[rep] = curr_time
print(rep, curr_time)
mean_syn = np.sum(timings) / repetitions
std_syn = np.std(timings)
print(mean_syn)
|
[
"ofa.stereo_matching.elastic_nn.networks.ofa_aanet.OFAAANet",
"torch.cuda.synchronize",
"torch.cuda.Event",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.std",
"torch.load",
"numpy.zeros",
"torch.randn",
"torch.cuda.device_count",
"ofa.utils.pytorch_utils.get_net_info",
"torch.no_grad"
] |
[((608, 633), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (631, 633), False, 'import argparse\n'), ((1213, 1321), 'ofa.stereo_matching.elastic_nn.networks.ofa_aanet.OFAAANet', 'OFAAANet', ([], {'ks_list': '[3, 5, 7]', 'expand_ratio_list': '[2, 4, 6, 8]', 'depth_list': '[2, 3, 4]', 'scale_list': '[2, 3, 4]'}), '(ks_list=[3, 5, 7], expand_ratio_list=[2, 4, 6, 8], depth_list=[2, \n 3, 4], scale_list=[2, 3, 4])\n', (1221, 1321), False, 'from ofa.stereo_matching.elastic_nn.networks.ofa_aanet import OFAAANet\n'), ((1384, 1426), 'torch.load', 'torch.load', (['model_file'], {'map_location': '"""cpu"""'}), "(model_file, map_location='cpu')\n", (1394, 1426), False, 'import torch\n'), ((2125, 2169), 'ofa.utils.pytorch_utils.get_net_info', 'get_net_info', (['net'], {'input_shape': '(3, 540, 960)'}), '(net, input_shape=(3, 540, 960))\n', (2137, 2169), False, 'from ofa.utils.pytorch_utils import get_net_info\n'), ((2457, 2483), 'numpy.zeros', 'np.zeros', (['(repetitions, 1)'], {}), '((repetitions, 1))\n', (2465, 2483), True, 'import numpy as np\n'), ((2986, 3001), 'numpy.std', 'np.std', (['timings'], {}), '(timings)\n', (2992, 3001), True, 'import numpy as np\n'), ((2357, 2393), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (2373, 2393), False, 'import torch\n'), ((2395, 2431), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (2411, 2431), False, 'import torch\n'), ((2584, 2599), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2597, 2599), False, 'import torch\n'), ((2946, 2961), 'numpy.sum', 'np.sum', (['timings'], {}), '(timings)\n', (2952, 2961), True, 'import numpy as np\n'), ((1010, 1035), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1033, 1035), False, 'import torch\n'), ((2202, 2248), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(576)', '(960)'], {'dtype': 'torch.float'}), '(1, 3, 576, 960, dtype=torch.float)\n', (2213, 2248), False, 'import torch\n'), ((2270, 2316), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(576)', '(960)'], {'dtype': 'torch.float'}), '(1, 3, 576, 960, dtype=torch.float)\n', (2281, 2316), False, 'import torch\n'), ((2765, 2789), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2787, 2789), False, 'import torch\n')]
|
import os
import sys
from PIL import Image
import numpy as np
import random
import matplotlib.pyplot as plt
size_image = (256, 256)
class LSB:
# convert integer to 8-bit binary
def int2bin(self, image):
r, g, b = image
return (f'{r:08b}', f'{g:08b}', f'{b:08b}')
# convert 8-bit binary to integer
def bin2int(self, image):
r, g, b = image
return (int(r, 2), int(g, 2), int(b, 2))
# define the encryption function
def encryption(self, original, secret):
pixel_1 = original.load()
pixel_2 = secret.load()
outcome = Image.new(original.mode, original.size)
pixel_new = outcome.load()
for i in range(size_image[0]):
for j in range(size_image[1]):
r1, g1, b1 = self.int2bin(pixel_1[i, j])
r2, g2, b2 = self.int2bin(pixel_2[i, j])
pixel_new[i, j] = self.bin2int((r1[:4] + r2[:4], g1[:4] + g2[:4], b1[:4] + b2[:4]))
return outcome
# define the decryption function
def decryption(self, image):
pixel_merge = image.load()
secret = Image.new(image.mode, image.size)
pixel_secret = secret.load()
for i in range(size_image[0]):
for j in range(size_image[1]):
r, g, b = self.int2bin(pixel_merge[i, j])
pixel_secret[i, j] = self.bin2int((r[4:] + '0000', g[4:] + '0000', b[4:] + '0000'))
return secret
if __name__ == '__main__':
test_images = []
for imgnames in os.listdir("./images_test/"):
test_images.append(Image.open("./images_test/" + imgnames).resize(size_image, Image.ANTIALIAS))
np.random.shuffle(test_images)
lsb_implementation = LSB()
test_original = test_images[0:12]
test_secret = test_images[12:24]
test_merge = []
test_reveal = []
for i in range(12):
test_merge.append(lsb_implementation.encryption(test_original[i], test_secret[i]))
test_reveal.append(lsb_implementation.decryption(test_merge[-1]))
# Number of secret and cover pairs to show.
n = 12
def show_image(img, n_rows, n_col, idx, gray=False, first_row=False, title=None):
ax = plt.subplot(n_rows, n_col, idx)
plt.imshow(img)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if first_row:
plt.title(title)
plt.figure(figsize=(4, 12))
for i in range(12):
n_col = 4
show_image(test_original[i], n, n_col, i * n_col + 1, first_row=i == 0, title='Cover')
show_image(test_secret[i], n, n_col, i * n_col + 2, first_row=i == 0, title='Secret')
show_image(test_merge[i], n, n_col, i * n_col + 3, first_row=i == 0, title='Merge')
show_image(test_reveal[i], n, n_col, i * n_col + 4, first_row=i == 0, title='Reveal')
plt.savefig('./result_1.jpg')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"PIL.Image.new",
"matplotlib.pyplot.show",
"numpy.random.shuffle",
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"os.listdir",
"matplotlib.pyplot.savefig"
] |
[((1580, 1608), 'os.listdir', 'os.listdir', (['"""./images_test/"""'], {}), "('./images_test/')\n", (1590, 1608), False, 'import os\n'), ((1722, 1752), 'numpy.random.shuffle', 'np.random.shuffle', (['test_images'], {}), '(test_images)\n', (1739, 1752), True, 'import numpy as np\n'), ((2470, 2497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 12)'}), '(figsize=(4, 12))\n', (2480, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2965), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./result_1.jpg"""'], {}), "('./result_1.jpg')\n", (2947, 2965), True, 'import matplotlib.pyplot as plt\n'), ((2971, 2981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2979, 2981), True, 'import matplotlib.pyplot as plt\n'), ((626, 665), 'PIL.Image.new', 'Image.new', (['original.mode', 'original.size'], {}), '(original.mode, original.size)\n', (635, 665), False, 'from PIL import Image\n'), ((1159, 1192), 'PIL.Image.new', 'Image.new', (['image.mode', 'image.size'], {}), '(image.mode, image.size)\n', (1168, 1192), False, 'from PIL import Image\n'), ((2265, 2296), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_col', 'idx'], {}), '(n_rows, n_col, idx)\n', (2276, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2321), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2316, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2460), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2453, 2460), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1677), 'PIL.Image.open', 'Image.open', (["('./images_test/' + imgnames)"], {}), "('./images_test/' + imgnames)\n", (1648, 1677), False, 'from PIL import Image\n')]
|
import numpy as np
import torch
import torch.nn as nn
def conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MappingNet(nn.Module):
def __init__(self, opt):
super().__init__()
latent_dim = opt.latent_dim
style_dim = opt.style_size**2
hidden_dim = opt.hidden_dim
depth = opt.depth
layers = []
layers += [nn.Linear(latent_dim, hidden_dim)]
layers += [nn.ReLU()]
for _ in range(depth):
layers += [nn.Linear(hidden_dim, hidden_dim)]
layers += [nn.ReLU()]
layers += [nn.Linear(hidden_dim, style_dim)]
self.net = nn.Sequential(*layers)
def forward(self, z):
out = self.net(z)
return out
class Net(nn.Module):
def __init__(self, opt):
super().__init__()
inp_ch=opt.input_nch
ndf=opt.ndf
out_ch=opt.output_nch
Nr=opt.Nr
num_ups=int(np.log2(opt.up_factor))
need_bias=opt.need_bias
upsample_mode=opt.upsample_mode
layers = [conv(inp_ch, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(Nr):
layers += [conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(num_ups):
layers += [nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
for _ in range(Nr):
layers += [conv(ndf, ndf, 3, bias=need_bias),
nn.BatchNorm2d(ndf),
nn.ReLU(True)]
layers += [conv(ndf, out_ch, 3, bias=need_bias)]
self.net = nn.Sequential(*layers)
def forward(self, z, s=None):
out = self.net(z)
return out
|
[
"torch.nn.ReLU",
"torch.nn.Sequential",
"numpy.log2",
"torch.nn.Conv2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.Linear"
] |
[((129, 219), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'kernel_size'], {'padding': '(kernel_size // 2)', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size, padding=kernel_size // 2,\n bias=bias)\n', (138, 219), True, 'import torch.nn as nn\n'), ((782, 804), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (795, 804), True, 'import torch.nn as nn\n'), ((2044, 2066), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2057, 2066), True, 'import torch.nn as nn\n'), ((513, 546), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', 'hidden_dim'], {}), '(latent_dim, hidden_dim)\n', (522, 546), True, 'import torch.nn as nn\n'), ((567, 576), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (574, 576), True, 'import torch.nn as nn\n'), ((720, 752), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'style_dim'], {}), '(hidden_dim, style_dim)\n', (729, 752), True, 'import torch.nn as nn\n'), ((1083, 1105), 'numpy.log2', 'np.log2', (['opt.up_factor'], {}), '(opt.up_factor)\n', (1090, 1105), True, 'import numpy as np\n'), ((1263, 1282), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1277, 1282), True, 'import torch.nn as nn\n'), ((1302, 1315), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1309, 1315), True, 'import torch.nn as nn\n'), ((632, 665), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (641, 665), True, 'import torch.nn as nn\n'), ((690, 699), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (697, 699), True, 'import torch.nn as nn\n'), ((1435, 1454), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1449, 1454), True, 'import torch.nn as nn\n'), ((1479, 1492), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1486, 1492), True, 'import torch.nn as nn\n'), ((1551, 1598), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': 'upsample_mode'}), '(scale_factor=2, mode=upsample_mode)\n', (1562, 1598), True, 'import torch.nn as nn\n'), ((1722, 1741), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1736, 1741), True, 'import torch.nn as nn\n'), ((1766, 1779), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1773, 1779), True, 'import torch.nn as nn\n'), ((1902, 1921), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ndf'], {}), '(ndf)\n', (1916, 1921), True, 'import torch.nn as nn\n'), ((1950, 1963), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1957, 1963), True, 'import torch.nn as nn\n')]
|
import numpy as np
from copy import copy
from .utils.thresholdcurator import ThresholdCurator
from .quality_metric import QualityMetric
import spiketoolkit as st
import spikemetrics.metrics as metrics
from spikemetrics.utils import printProgressBar
from spikemetrics.metrics import find_neighboring_channels
from collections import OrderedDict
from sklearn.neighbors import NearestNeighbors
from .parameter_dictionaries import update_all_param_dicts_with_kwargs
class NoiseOverlap(QualityMetric):
installed = True # check at class level if installed or not
installation_mesg = "" # err
params = OrderedDict([('num_channels_to_compare', 13),
('max_spikes_per_unit_for_noise_overlap', 1000),
('num_features', 10),
('num_knn', 6)])
curator_name = "ThresholdNoiseOverlaps"
def __init__(self, metric_data):
QualityMetric.__init__(self, metric_data, metric_name="noise_overlap")
if not metric_data.has_recording():
raise ValueError("MetricData object must have a recording")
def compute_metric(self, num_channels_to_compare, max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
# Make sure max_spikes_per_unit_for_noise_overlap is not None
assert max_spikes_per_unit_for_noise_overlap is not None, "'max_spikes_per_unit_for_noise_overlap' must be an integer."
# update keyword arg in case it's already specified to something
kwargs['max_spikes_per_unit'] = max_spikes_per_unit_for_noise_overlap
params_dict = update_all_param_dicts_with_kwargs(kwargs)
save_property_or_features = params_dict['save_property_or_features']
seed = params_dict['seed']
# set random seed
if seed is not None:
np.random.seed(seed)
# first, get waveform snippets of every unit (at most n spikes)
# waveforms = List (units,) of np.array (n_spikes, n_channels, n_timepoints)
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids=self._metric_data._unit_ids,
**kwargs)
n_waveforms_per_unit = np.array([len(wf) for wf in waveforms])
n_spikes_per_unit = np.array([len(self._metric_data._sorting.get_unit_spike_train(u)) for u in self._metric_data._unit_ids])
if np.all(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap):
# in this case it means that waveforms have been computed on
# less spikes than max_spikes_per_unit_for_noise_overlap --> recompute
kwargs['recompute_info'] = True
waveforms = st.postprocessing.get_unit_waveforms(
self._metric_data._recording,
self._metric_data._sorting,
unit_ids = self._metric_data._unit_ids,
# max_spikes_per_unit = max_spikes_per_unit_for_noise_overlap,
**kwargs)
elif np.all(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap):
# waveforms computed on more spikes than needed --> sample
for i_w, wfs in enumerate(waveforms):
if len(wfs) > max_spikes_per_unit_for_noise_overlap:
selecte_idxs = np.random.permutation(len(wfs))[:max_spikes_per_unit_for_noise_overlap]
waveforms[i_w] = wfs[selecte_idxs]
# get channel idx and locations
channel_idx = np.arange(self._metric_data._recording.get_num_channels())
channel_locations = self._metric_data._channel_locations
if num_channels_to_compare > len(channel_idx):
num_channels_to_compare = len(channel_idx)
# get noise snippets
min_time = min([self._metric_data._sorting.get_unit_spike_train(unit_id=unit)[0]
for unit in self._metric_data._sorting.get_unit_ids()])
max_time = max([self._metric_data._sorting.get_unit_spike_train(unit_id=unit)[-1]
for unit in self._metric_data._sorting.get_unit_ids()])
max_spikes = np.max([len(self._metric_data._sorting.get_unit_spike_train(u)) for u in self._metric_data._unit_ids])
if max_spikes < max_spikes_per_unit_for_noise_overlap:
max_spikes_per_unit_for_noise_overlap = max_spikes
times_control = np.random.choice(np.arange(min_time, max_time),
size=max_spikes_per_unit_for_noise_overlap, replace=False)
clip_size = waveforms[0].shape[-1]
# np.array, (n_spikes, n_channels, n_timepoints)
clips_control_max = np.stack(self._metric_data._recording.get_snippets(snippet_len=clip_size,
reference_frames=times_control))
noise_overlaps = []
for i_u, unit in enumerate(self._metric_data._unit_ids):
# show progress bar
if self._metric_data.verbose:
printProgressBar(i_u + 1, len(self._metric_data._unit_ids))
# get spike and noise snippets
# np.array, (n_spikes, n_channels, n_timepoints)
clips = waveforms[i_u]
clips_control = clips_control_max
# make noise snippets size equal to number of spikes
if len(clips) < max_spikes_per_unit_for_noise_overlap:
selected_idxs = np.random.choice(np.arange(max_spikes_per_unit_for_noise_overlap),
size=len(clips), replace=False)
clips_control = clips_control[selected_idxs]
else:
selected_idxs = np.random.choice(np.arange(len(clips)),
size=max_spikes_per_unit_for_noise_overlap,
replace=False)
clips = clips[selected_idxs]
num_clips = len(clips)
# compute weight for correcting noise snippets
template = np.median(clips, axis=0)
chmax, tmax = np.unravel_index(np.argmax(np.abs(template)), template.shape)
max_val = template[chmax, tmax]
weighted_clips_control = np.zeros(clips_control.shape)
weights = np.zeros(num_clips)
for j in range(num_clips):
clip0 = clips_control[j, :, :]
val0 = clip0[chmax, tmax]
weight0 = val0 * max_val
weights[j] = weight0
weighted_clips_control[j, :, :] = clip0 * weight0
noise_template = np.sum(weighted_clips_control, axis=0)
noise_template = noise_template / np.sum(np.abs(noise_template)) * np.sum(np.abs(template))
# subtract it out
for j in range(num_clips):
clips[j, :, :] = _subtract_clip_component(clips[j, :, :], noise_template)
clips_control[j, :, :] = _subtract_clip_component(clips_control[j, :, :], noise_template)
# use only subsets of channels that are closest to peak channel
channels_to_use = find_neighboring_channels(chmax, channel_idx,
num_channels_to_compare, channel_locations)
channels_to_use = np.sort(channels_to_use)
clips = clips[:,channels_to_use,:]
clips_control = clips_control[:,channels_to_use,:]
all_clips = np.concatenate([clips, clips_control], axis=0)
num_channels_wfs = all_clips.shape[1]
num_samples_wfs = all_clips.shape[2]
all_features = _compute_pca_features(all_clips.reshape((num_clips * 2,
num_channels_wfs * num_samples_wfs)), num_features)
num_all_clips=len(all_clips)
distances, indices = NearestNeighbors(n_neighbors=min(num_knn + 1, num_all_clips - 1), algorithm='auto').fit(
all_features.T).kneighbors()
group_id = np.zeros((num_clips * 2))
group_id[0:num_clips] = 1
group_id[num_clips:] = 2
num_match = 0
total = 0
for j in range(num_clips * 2):
for k in range(1, min(num_knn + 1, num_all_clips - 1)):
ind = indices[j][k]
if group_id[j] == group_id[ind]:
num_match = num_match + 1
total = total + 1
pct_match = num_match / total
noise_overlap = 1 - pct_match
noise_overlaps.append(noise_overlap)
noise_overlaps = np.asarray(noise_overlaps)
if save_property_or_features:
self.save_property_or_features(self._metric_data._sorting, noise_overlaps, self._metric_name)
return noise_overlaps
def threshold_metric(self, threshold, threshold_sign, num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs):
noise_overlaps = self.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs)
threshold_curator = ThresholdCurator(sorting=self._metric_data._sorting, metric=noise_overlaps)
threshold_curator.threshold_sorting(threshold=threshold, threshold_sign=threshold_sign)
return threshold_curator
def _compute_pca_features(X, num_components):
u, s, vt = np.linalg.svd(X)
return u[:, :num_components].T
def _subtract_clip_component(clip1, component):
V1 = clip1.flatten()
V2 = component.flatten()
V1 = V1 - np.mean(V1)
V2 = V2 - np.mean(V2)
V1 = V1 - V2 * np.dot(V1, V2) / np.dot(V2, V2)
return V1.reshape(clip1.shape)
|
[
"spikemetrics.metrics.find_neighboring_channels",
"numpy.random.seed",
"numpy.sum",
"numpy.concatenate",
"numpy.abs",
"numpy.median",
"numpy.asarray",
"spiketoolkit.postprocessing.get_unit_waveforms",
"numpy.zeros",
"numpy.sort",
"numpy.linalg.svd",
"numpy.mean",
"numpy.arange",
"collections.OrderedDict",
"numpy.dot",
"numpy.all"
] |
[((611, 754), 'collections.OrderedDict', 'OrderedDict', (["[('num_channels_to_compare', 13), ('max_spikes_per_unit_for_noise_overlap',\n 1000), ('num_features', 10), ('num_knn', 6)]"], {}), "([('num_channels_to_compare', 13), (\n 'max_spikes_per_unit_for_noise_overlap', 1000), ('num_features', 10), (\n 'num_knn', 6)])\n", (622, 754), False, 'from collections import OrderedDict\n'), ((9602, 9618), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {}), '(X)\n', (9615, 9618), True, 'import numpy as np\n'), ((2048, 2195), 'spiketoolkit.postprocessing.get_unit_waveforms', 'st.postprocessing.get_unit_waveforms', (['self._metric_data._recording', 'self._metric_data._sorting'], {'unit_ids': 'self._metric_data._unit_ids'}), '(self._metric_data._recording, self.\n _metric_data._sorting, unit_ids=self._metric_data._unit_ids, **kwargs)\n', (2084, 2195), True, 'import spiketoolkit as st\n'), ((2457, 2525), 'numpy.all', 'np.all', (['(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap)'], {}), '(n_waveforms_per_unit < max_spikes_per_unit_for_noise_overlap)\n', (2463, 2525), True, 'import numpy as np\n'), ((8666, 8692), 'numpy.asarray', 'np.asarray', (['noise_overlaps'], {}), '(noise_overlaps)\n', (8676, 8692), True, 'import numpy as np\n'), ((9772, 9783), 'numpy.mean', 'np.mean', (['V1'], {}), '(V1)\n', (9779, 9783), True, 'import numpy as np\n'), ((9798, 9809), 'numpy.mean', 'np.mean', (['V2'], {}), '(V2)\n', (9805, 9809), True, 'import numpy as np\n'), ((1849, 1869), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1863, 1869), True, 'import numpy as np\n'), ((2751, 2898), 'spiketoolkit.postprocessing.get_unit_waveforms', 'st.postprocessing.get_unit_waveforms', (['self._metric_data._recording', 'self._metric_data._sorting'], {'unit_ids': 'self._metric_data._unit_ids'}), '(self._metric_data._recording, self.\n _metric_data._sorting, unit_ids=self._metric_data._unit_ids, **kwargs)\n', (2787, 2898), True, 'import spiketoolkit as st\n'), ((3073, 3142), 'numpy.all', 'np.all', (['(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap)'], {}), '(n_waveforms_per_unit >= max_spikes_per_unit_for_noise_overlap)\n', (3079, 3142), True, 'import numpy as np\n'), ((4446, 4475), 'numpy.arange', 'np.arange', (['min_time', 'max_time'], {}), '(min_time, max_time)\n', (4455, 4475), True, 'import numpy as np\n'), ((6082, 6106), 'numpy.median', 'np.median', (['clips'], {'axis': '(0)'}), '(clips, axis=0)\n', (6091, 6106), True, 'import numpy as np\n'), ((6276, 6305), 'numpy.zeros', 'np.zeros', (['clips_control.shape'], {}), '(clips_control.shape)\n', (6284, 6305), True, 'import numpy as np\n'), ((6328, 6347), 'numpy.zeros', 'np.zeros', (['num_clips'], {}), '(num_clips)\n', (6336, 6347), True, 'import numpy as np\n'), ((6650, 6688), 'numpy.sum', 'np.sum', (['weighted_clips_control'], {'axis': '(0)'}), '(weighted_clips_control, axis=0)\n', (6656, 6688), True, 'import numpy as np\n'), ((7166, 7259), 'spikemetrics.metrics.find_neighboring_channels', 'find_neighboring_channels', (['chmax', 'channel_idx', 'num_channels_to_compare', 'channel_locations'], {}), '(chmax, channel_idx, num_channels_to_compare,\n channel_locations)\n', (7191, 7259), False, 'from spikemetrics.metrics import find_neighboring_channels\n'), ((7322, 7346), 'numpy.sort', 'np.sort', (['channels_to_use'], {}), '(channels_to_use)\n', (7329, 7346), True, 'import numpy as np\n'), ((7482, 7528), 'numpy.concatenate', 'np.concatenate', (['[clips, clips_control]'], {'axis': '(0)'}), '([clips, clips_control], axis=0)\n', (7496, 7528), True, 'import numpy as np\n'), ((8063, 8086), 'numpy.zeros', 'np.zeros', (['(num_clips * 2)'], {}), '(num_clips * 2)\n', (8071, 8086), True, 'import numpy as np\n'), ((9846, 9860), 'numpy.dot', 'np.dot', (['V2', 'V2'], {}), '(V2, V2)\n', (9852, 9860), True, 'import numpy as np\n'), ((9829, 9843), 'numpy.dot', 'np.dot', (['V1', 'V2'], {}), '(V1, V2)\n', (9835, 9843), True, 'import numpy as np\n'), ((5482, 5530), 'numpy.arange', 'np.arange', (['max_spikes_per_unit_for_noise_overlap'], {}), '(max_spikes_per_unit_for_noise_overlap)\n', (5491, 5530), True, 'import numpy as np\n'), ((6160, 6176), 'numpy.abs', 'np.abs', (['template'], {}), '(template)\n', (6166, 6176), True, 'import numpy as np\n'), ((6775, 6791), 'numpy.abs', 'np.abs', (['template'], {}), '(template)\n', (6781, 6791), True, 'import numpy as np\n'), ((6742, 6764), 'numpy.abs', 'np.abs', (['noise_template'], {}), '(noise_template)\n', (6748, 6764), True, 'import numpy as np\n')]
|
"""
FLAME - Fuzzy clustering by Local Approximation of MEmbership
"""
from __future__ import print_function
import numpy as np
from scipy import sparse
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import normalize
from math import sqrt
# __author__ = "<NAME>"
'''
IMPORTANT!!!
I DID NOT DO THIS!! CREDIT GOES TO Matthew Billson github link: https://github.com/yclicc/FLAME-python
'''
class FLAME(BaseEstimator, ClusterMixin):
def __init__(self, metric="euclidean", cluster_neighbors=5, iteration_neighbors=5, max_iter=np.inf, eps=1e-10, thd=-2, verbose=0):
self.metric = metric
self.cluster_neighbors = cluster_neighbors
self.iteration_neighbors = iteration_neighbors
self.max_iter = max_iter
self.eps = eps
self.thd = thd
self.verbose = verbose
def _get_nearest(self, distances, n_neighbors, n_samples):
# Make a numpy arange for iteration purposes.
sample_range = np.arange(n_samples)[:, None]
# Do an introsort on each row of the distances matrix to put the nth smallest distance in the nth position and all
# smaller elements before it. Then keep only the first n+1 elements (including the element itself which will have
# distance 0 from itself and is removed later).
nearest_np = np.argpartition(distances, n_neighbors, axis=1)
nearest_np = nearest_np[:, :n_neighbors + 1]
# Find the largest distance of the kth closest points.
largest_distance = distances[sample_range, nearest_np[sample_range, -1]]
# Make two arrays of sets the first containing only the n nearest other elements to each element not
# including the element itself and the second containing the same plus any other elements tied for nth nearest
# again excluding the element itself (though if there are k other elements all 0 distance away other problems
# will result).
nearest = []
nearest_with_ties = []
for i in range(n_samples):
ties_for_largest_distance = np.where(distances[i] == largest_distance[i])
nearest.append(set(nearest_np[i, :].tolist()))
print(nearest)
print(i)
print(nearest_np[i])
nearest[-1].remove(i)
ties_for_largest_distance = set(ties_for_largest_distance[0].tolist())
ties_for_largest_distance.discard(i)
nearest_with_ties.append(nearest[i] | ties_for_largest_distance)
return nearest, nearest_with_ties
def _get_densities(self, distances, nearest, n_samples):
# Make a numpy arange for iteration purposes.
sample_range = np.arange(n_samples)[:, None]
nearest_np = np.array([list(s) for s in nearest])
n_shortest_distances = distances[sample_range, nearest_np]
local_distance_sums = n_shortest_distances.sum(axis=1)
largest_local_sum = local_distance_sums.max(axis=0)
densities = np.asarray(largest_local_sum / local_distance_sums)
return densities
def _get_supports(self, densities, nearest_with_ties, n_samples):
density_sum = densities.sum()
density_mean = density_sum / n_samples
density_sum2 = (densities * densities).sum()
thd = density_mean + self.thd * sqrt(density_sum2 / n_samples - density_mean * density_mean)
csos = []
outliers = []
remaining = []
for i in range(n_samples):
if densities[i] < thd:
outliers.append(i)
elif densities[i] > densities[list(nearest_with_ties[i])].max():
csos.append(i)
else:
remaining.append(i)
return csos, outliers, remaining
def _get_weights(self, distances, nearest_with_ties, fixed, n_samples):
nearest_with_ties = [sorted(list(s)) for s in nearest_with_ties]
weights = lil_matrix((n_samples, n_samples))
for i in range(n_samples):
if i in fixed:
weights[i, i] = 1
else:
for j in nearest_with_ties[i]:
weights[i, j] = distances[i, j]
if self.verbose: print("Assigned weights {0}.".format(i))
weights = weights.tocsr()
weights = normalize(weights, norm='l1', axis=1, copy=False)
return weights
def _get_starting_membership(self, csos, outliers, fixed, n_samples):
M = len(csos) + 1
starting_membership = np.zeros(shape=(n_samples, M))
general_row = np.ndarray(shape=(1, M))
general_row.fill(1. / M)
for i in range(n_samples):
if i not in fixed:
starting_membership[i, :] = general_row
for index, value in enumerate(csos):
starting_membership[value, index] = 1
for i in outliers:
starting_membership[i, -1] = 1
return starting_membership
def _flame(self, X):
"""
Pass Numpy or Pandas array of data as X. As metric pass any string as in sklearn.metrics.pairwise.pairwise_distances
or a callable on pairs of members of X. FLAME is computed with n_neighbors until max_iter or convergence up to eps.
thd is the threshold for outliers: Any element which has less than mean(density) + thd * std(density) will be an outlier.
"""
if sparse.issparse(X) and self.metric not in {"precomputed", "cityblock", "cosine", "euclidean", "l1", "l2",
"manhattan"} and not callable(self.metric):
raise TypeError("The metric {0} does not support sparse data.".format(self.metric))
# Convert pandas objects to numpy arrays.
if 'pandas' in str(X.__class__):
X = X.values
X = check_array(X, accept_sparse="csr", dtype=None)
# Get the number of samples. We use this a lot.
n_samples, _ = X.shape
distances = pairwise_distances(X, metric=self.metric)
nearest, nearest_with_ties = self._get_nearest(distances, self.cluster_neighbors, n_samples)
if self.verbose: print("Got distances and nearest.")
densities = self._get_densities(distances, nearest, n_samples)
if self.verbose: print("Got densities.")
csos, outliers, remaining = self._get_supports(densities, nearest_with_ties, n_samples)
if self.verbose: print("Got suppports.")
if self.verbose: print("There are {0} clusters and {1} outliers.".format(len(csos), len(outliers)))
fixed = set(csos) | set(outliers)
_, nearest_with_ties_for_iteration = self._get_nearest(distances, self.iteration_neighbors, n_samples)
weights = self._get_weights(distances, nearest_with_ties_for_iteration, fixed, n_samples)
if self.verbose: print("Got weights.")
membership_proba = self._get_starting_membership(csos, outliers, fixed, n_samples)
if self.verbose: print("Got starting memberships.")
i = 0
while i < self.max_iter:
lastMembership = membership_proba.copy()
membership_proba = weights.dot(membership_proba)
delta = np.absolute(membership_proba - lastMembership).max()
i += 1
if self.verbose: print("Done iteration {0}.".format(i))
if delta < self.eps:
break
num_clusters = membership_proba.shape[1] - 1
# Get cluster assignment.
pred = np.argmax(membership_proba, axis=1)
# Replace predictions of the outlier group with -1.
pred[pred == num_clusters] = -1
return membership_proba, pred, csos, outliers, densities
def fit(self, X):
self.membership_proba_, self.labels_, self.csos_, self.outliers_, self.densities_ = \
self._flame(X)
return self
def fit_predict(self, X, y=None):
y = self.fit(X).labels_
return y
def fit_predict_proba(self, X, y=None):
y = self.fit(X).membership_proba_
return y
if __name__== "__main__":
X = np.array(
[[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [11.1, 10, 10], [10, 10.8, 10], [10, 11, 12]])
print(X)
model = FLAME(cluster_neighbors=3, iteration_neighbors=3,verbose=1)
membership = model.fit_predict(X)
print(membership
)
|
[
"numpy.absolute",
"sklearn.metrics.pairwise.pairwise_distances",
"math.sqrt",
"numpy.argmax",
"sklearn.utils.check_array",
"scipy.sparse.issparse",
"numpy.asarray",
"numpy.zeros",
"numpy.argpartition",
"scipy.sparse.lil_matrix",
"numpy.where",
"numpy.array",
"sklearn.preprocessing.normalize",
"numpy.arange",
"numpy.ndarray"
] |
[((7213, 7338), 'numpy.array', 'np.array', (['[[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [11.1, 10,\n 10], [10, 10.8, 10], [10, 11, 12]]'], {}), '([[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [\n 11.1, 10, 10], [10, 10.8, 10], [10, 11, 12]])\n', (7221, 7338), True, 'import numpy as np\n'), ((1404, 1451), 'numpy.argpartition', 'np.argpartition', (['distances', 'n_neighbors'], {'axis': '(1)'}), '(distances, n_neighbors, axis=1)\n', (1419, 1451), True, 'import numpy as np\n'), ((2862, 2913), 'numpy.asarray', 'np.asarray', (['(largest_local_sum / local_distance_sums)'], {}), '(largest_local_sum / local_distance_sums)\n', (2872, 2913), True, 'import numpy as np\n'), ((3647, 3681), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(n_samples, n_samples)'], {}), '((n_samples, n_samples))\n', (3657, 3681), False, 'from scipy.sparse import csr_matrix, lil_matrix\n'), ((3933, 3982), 'sklearn.preprocessing.normalize', 'normalize', (['weights'], {'norm': '"""l1"""', 'axis': '(1)', 'copy': '(False)'}), "(weights, norm='l1', axis=1, copy=False)\n", (3942, 3982), False, 'from sklearn.preprocessing import normalize\n'), ((4116, 4146), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, M)'}), '(shape=(n_samples, M))\n', (4124, 4146), True, 'import numpy as np\n'), ((4163, 4187), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, M)'}), '(shape=(1, M))\n', (4173, 4187), True, 'import numpy as np\n'), ((5228, 5275), 'sklearn.utils.check_array', 'check_array', (['X'], {'accept_sparse': '"""csr"""', 'dtype': 'None'}), "(X, accept_sparse='csr', dtype=None)\n", (5239, 5275), False, 'from sklearn.utils import check_array\n'), ((5365, 5406), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X'], {'metric': 'self.metric'}), '(X, metric=self.metric)\n', (5383, 5406), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((6695, 6730), 'numpy.argmax', 'np.argmax', (['membership_proba'], {'axis': '(1)'}), '(membership_proba, axis=1)\n', (6704, 6730), True, 'import numpy as np\n'), ((1076, 1096), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (1085, 1096), True, 'import numpy as np\n'), ((2077, 2122), 'numpy.where', 'np.where', (['(distances[i] == largest_distance[i])'], {}), '(distances[i] == largest_distance[i])\n', (2085, 2122), True, 'import numpy as np\n'), ((2594, 2614), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2603, 2614), True, 'import numpy as np\n'), ((4875, 4893), 'scipy.sparse.issparse', 'sparse.issparse', (['X'], {}), '(X)\n', (4890, 4893), False, 'from scipy import sparse\n'), ((3155, 3215), 'math.sqrt', 'sqrt', (['(density_sum2 / n_samples - density_mean * density_mean)'], {}), '(density_sum2 / n_samples - density_mean * density_mean)\n', (3159, 3215), False, 'from math import sqrt\n'), ((6455, 6501), 'numpy.absolute', 'np.absolute', (['(membership_proba - lastMembership)'], {}), '(membership_proba - lastMembership)\n', (6466, 6501), True, 'import numpy as np\n')]
|
import copy
import numpy as np
import tensorflow as tf
from ammf.utils.wavedata.tools.obj_detection import obj_utils
from ammf.utils.wavedata.tools.obj_detection import evaluation
from ammf.core import anchor_projector
from ammf.core import box_3d_encoder
COLOUR_SCHEME_PREDICTIONS = {
"Easy GT": (255, 255, 0), # Yellow
"Medium GT": (255, 128, 0), # Orange
"Hard GT": (255, 0, 0), # Red
"Prediction": (50, 255, 50), # Green
}
def get_gts_based_on_difficulty(dataset, img_idx):
"""Returns lists of ground-truth based on difficulty.
"""
# Get all ground truth labels
all_gt_objs = obj_utils.read_labels(dataset.label_dir, img_idx)
# Filter to dataset classes
gt_objs = dataset.kitti_utils.filter_labels(all_gt_objs)
# Filter objects to desired difficulty
easy_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=0)
medium_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=1)
hard_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=2)
for gt_obj in easy_gt_objs:
gt_obj.type = 'Easy GT'
for gt_obj in medium_gt_objs:
gt_obj.type = 'Medium GT'
for gt_obj in hard_gt_objs:
gt_obj.type = 'Hard GT'
return easy_gt_objs, medium_gt_objs, hard_gt_objs, all_gt_objs
def get_max_ious_3d(all_gt_boxes_3d, pred_boxes_3d):
"""Helper function to calculate 3D IoU for the given predictions.
Args:
all_gt_boxes_3d: A list of the same ground-truth boxes in box_3d
format.
pred_boxes_3d: A list of predictions in box_3d format.
"""
# Only calculate ious if there are predictions
if pred_boxes_3d:
# Convert to iou format
gt_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
all_gt_boxes_3d)
pred_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
pred_boxes_3d)
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
for gt_obj_idx in range(len(all_gt_boxes_3d)):
gt_obj_iou_fmt = gt_objs_iou_fmt[gt_obj_idx]
ious_3d = evaluation.three_d_iou(gt_obj_iou_fmt,
pred_objs_iou_fmt)
max_ious_3d[gt_obj_idx] = np.amax(ious_3d)
else:
# No detections, all ious = 0
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
return max_ious_3d
def tf_project_to_image_space(anchors, calib_p2, image_shape, img_idx):
"""Helper function to convert data to tensors and project
to image space using the tf projection function.
"""
anchors_tensor = tf.convert_to_tensor(anchors, tf.float32)
calib_p2_tensor = tf.convert_to_tensor(calib_p2, tf.float32)
image_shape_tensor = tf.convert_to_tensor(image_shape, tf.float32)
projected_boxes_tensor, _ = \
anchor_projector.tf_project_to_image_space(
anchors_tensor,
calib_p2_tensor,
image_shape_tensor)
sess = tf.Session()
with sess.as_default():
projected_boxes = projected_boxes_tensor.eval()
return projected_boxes
|
[
"ammf.core.box_3d_encoder.box_3d_to_3d_iou_format",
"copy.deepcopy",
"tensorflow.convert_to_tensor",
"ammf.core.anchor_projector.tf_project_to_image_space",
"tensorflow.Session",
"ammf.utils.wavedata.tools.obj_detection.evaluation.three_d_iou",
"numpy.amax",
"ammf.utils.wavedata.tools.obj_detection.obj_utils.read_labels"
] |
[((632, 681), 'ammf.utils.wavedata.tools.obj_detection.obj_utils.read_labels', 'obj_utils.read_labels', (['dataset.label_dir', 'img_idx'], {}), '(dataset.label_dir, img_idx)\n', (653, 681), False, 'from ammf.utils.wavedata.tools.obj_detection import obj_utils\n'), ((2679, 2720), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['anchors', 'tf.float32'], {}), '(anchors, tf.float32)\n', (2699, 2720), True, 'import tensorflow as tf\n'), ((2743, 2785), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['calib_p2', 'tf.float32'], {}), '(calib_p2, tf.float32)\n', (2763, 2785), True, 'import tensorflow as tf\n'), ((2811, 2856), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image_shape', 'tf.float32'], {}), '(image_shape, tf.float32)\n', (2831, 2856), True, 'import tensorflow as tf\n'), ((2900, 2999), 'ammf.core.anchor_projector.tf_project_to_image_space', 'anchor_projector.tf_project_to_image_space', (['anchors_tensor', 'calib_p2_tensor', 'image_shape_tensor'], {}), '(anchors_tensor, calib_p2_tensor,\n image_shape_tensor)\n', (2942, 2999), False, 'from ammf.core import anchor_projector\n'), ((3044, 3056), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3054, 3056), True, 'import tensorflow as tf\n'), ((882, 904), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (895, 904), False, 'import copy\n'), ((984, 1006), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (997, 1006), False, 'import copy\n'), ((1084, 1106), 'copy.deepcopy', 'copy.deepcopy', (['gt_objs'], {}), '(gt_objs)\n', (1097, 1106), False, 'import copy\n'), ((1819, 1874), 'ammf.core.box_3d_encoder.box_3d_to_3d_iou_format', 'box_3d_encoder.box_3d_to_3d_iou_format', (['all_gt_boxes_3d'], {}), '(all_gt_boxes_3d)\n', (1857, 1874), False, 'from ammf.core import box_3d_encoder\n'), ((1916, 1969), 'ammf.core.box_3d_encoder.box_3d_to_3d_iou_format', 'box_3d_encoder.box_3d_to_3d_iou_format', (['pred_boxes_3d'], {}), '(pred_boxes_3d)\n', (1954, 1969), False, 'from ammf.core import box_3d_encoder\n'), ((2173, 2230), 'ammf.utils.wavedata.tools.obj_detection.evaluation.three_d_iou', 'evaluation.three_d_iou', (['gt_obj_iou_fmt', 'pred_objs_iou_fmt'], {}), '(gt_obj_iou_fmt, pred_objs_iou_fmt)\n', (2195, 2230), False, 'from ammf.utils.wavedata.tools.obj_detection import evaluation\n'), ((2315, 2331), 'numpy.amax', 'np.amax', (['ious_3d'], {}), '(ious_3d)\n', (2322, 2331), True, 'import numpy as np\n')]
|
"""
Utilities based on building baseline machine learning models.
"""
from typing import Union, Optional
from pandas import DataFrame, Series
from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal
from scipy.stats import boxcox, normaltest, mode
from sklearn.compose import ColumnTransformer
from sklearn.exceptions import ConvergenceWarning, DataConversionWarning
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (FunctionTransformer, OneHotEncoder,
RobustScaler, StandardScaler,
label_binarize)
from sklearn.utils._testing import ignore_warnings
from .auxiliary import infer_dtypes
from .enum import PredictionTask
BASELINE_CLASSIFIER = Pipeline([
('imputer', SimpleImputer()),
('classifier', LogisticRegression())
])
BASELINE_REGRESSION = Pipeline([
('imputer', SimpleImputer()),
('classifier', LinearRegression())
])
NUMERIC_TRANSFORMER = Pipeline([
('imputer', SimpleImputer()),
('scaler', StandardScaler())])
CATEGORICAL_TRANSFORMER = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OneHotEncoder(handle_unknown='ignore'))])
ORDINAL_TRANSFORMER = None # Not implemented
def get_prediction_task(df: DataFrame, label: str):
"Heuristics to infer prediction task (classification/regression)."
return 'classification' if len(set(df[label])) == 2 else 'regression'
@ignore_warnings(category=ConvergenceWarning)
def baseline_predictions(df: DataFrame, label: str, task='classification'):
"Train a baseline model and predict for a test set"
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline model
model = BASELINE_CLASSIFIER if task == 'classification' else BASELINE_REGRESSION
# 2. Train overall model
x_orig, y_orig = df.drop(label, axis=1), label_binarize(df[label], classes=list(set(df[label])))
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 3. Predict
if task == 'regression':
y_pred = model.predict(x_test.select_dtypes('number'))
elif task == 'classification':
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 4. Return both the predictions and x_test, y_test to analyze the performances
return y_pred, x_test, y_test
@ignore_warnings(category=DataConversionWarning)
def baseline_performance(df: DataFrame, label: str,
task: PredictionTask = PredictionTask.CLASSIFICATION,
adjusted_metric: bool = False):
"""Train a baseline model, predict for a test set and return the performance.
Args:
- df (DataFrame): original dataset
- label (str): name of target feature column
- task (PredictionTask): classification, regression
- adjusted_metric (bool): if True, return metric as percentage of max achievable performance
"""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, _, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance
if adjusted_metric:
perf = adjusted_performance(y_test, y_pred, task=task, metric=metric)
else:
perf = metric(y_test, y_pred)
return perf
def adjusted_performance(y_true, y_pred, task: PredictionTask, metric: callable):
"""Calculates the adjusted metric as ratio of real to maximum performance.
Returns the percentage to the best achievable performance starting from a baseline.
"""
task = PredictionTask(task)
y_default = mean(y_true) if task == PredictionTask.CLASSIFICATION else mode(y_true).mode[0] # define the value
y_base = tile(y_default, (len(y_true), 1)) # create an array with default value
best_perf = metric(y_true, y_true)
base_perf = metric(y_true, y_base)
real_perf = metric(y_true, y_pred)
return (real_perf - base_perf) / (best_perf - base_perf)
@ignore_warnings(category=DataConversionWarning)
def performance_per_feature_values(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance achieved per each value of a groupby feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performances per feature value
uniques = set(x_test[feature])
results = {}
for value in uniques: # for each category
y_pred_cat = y_pred[x_test[feature] == value]
y_true_cat = y_test[x_test[feature] == value]
try:
results[value] = metric(y_true_cat, y_pred_cat)
except ValueError as exc:
results[value] = f'[ERROR] Failed performance metric with message: {exc}'
return results
def performance_per_missing_value(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance difference between valued and missing values in feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance per valued vs missing feature
missing_mask = x_test[feature].isna()
results = {}
results['missing'] = metric(y_test[missing_mask], y_pred[missing_mask])
results['valued'] = metric(y_test[~missing_mask], y_pred[~missing_mask])
return results
@ignore_warnings(category=ConvergenceWarning)
def predict_missingness(df: DataFrame, feature: str):
"Train a baseline model to predict the missingness of a feature value."
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
target = f'is_missing_{feature}'
# 1. Define the baseline model
model = BASELINE_CLASSIFIER
# 2. Create the new target
df[target] = df[feature].isna()
# 3. Train overall model
x_orig, y_orig = df.drop([feature, target], axis=1), df[target]
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 4. Predict
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 5. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def standard_transform(df, dtypes, skip: Optional[list] = None, robust=False):
"""Applies standard transformation to the dataset (imputation, centering and scaling), returns transformed data
and the fitted transformer.
Numerical data is imputed with mean, centered and scaled by 4 standard deviations.
Categorical data is imputed with mode. Encoding is not performed in this stage to preserve the same columns.
If robust is passed as True, will truncate numerical data before computing statistics.
[1]From 1997 <NAME>; Martinez, <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
skip = [] if skip is None else skip
numerical_features = [key for key, value in dtypes.items() if value == 'numerical' and key not in skip]
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key not in skip]
assert len(numerical_features + categorical_features +
skip) == len(df.columns), 'the union of dtypes keys with skip should be the same as the df columns'
if robust:
numeric_transformer = Pipeline([
('imputer', SimpleImputer()),
('scaler', RobustScaler(quantile_range=(5.0, 95.0)))])
else:
numeric_transformer = NUMERIC_TRANSFORMER
preprocessor = ColumnTransformer(
transformers=[ # Numerical vars are scaled by 4sd so that most of the data are fit in the [-1, 1] range
('num', Pipeline(numeric_transformer.steps + \
[('divby4', FunctionTransformer(lambda x: x / 4))]), numerical_features),
('cat', Pipeline([('impute', SimpleImputer(strategy='most_frequent'))]), categorical_features)],
remainder='passthrough')
new_column_order = numerical_features + categorical_features + skip
tdf = DataFrame(preprocessor.fit_transform(df), index=df.index, columns=new_column_order)
return tdf, preprocessor
def performance_one_vs_rest(df: DataFrame, label_feat: str, _class: str, dtypes=None):
"""Train a classifier to predict a class in binary fashion against all other classes.
A normalized dataframe should be passed for best results"""
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
# 1. Define the baseline model
if not dtypes:
dtypes = infer_dtypes(df)
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key != label_feat]
preprocessor = ColumnTransformer(
transformers=[('cat', CATEGORICAL_TRANSFORMER, categorical_features)]) # OHE categorical variables
model = Pipeline([('preprocessing', preprocessor), ('classifier', LogisticRegression())])
# 2. Train overall model
x_orig, y_orig = df.drop(label_feat, axis=1), label_binarize(df[label_feat], classes=[_class]).squeeze()
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=24)
model.fit(x_train, y_train)
# 3. Predict
y_pred = model.predict_proba(x_test)[:, 1]
# 4. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def center_of_mass_statistic(column: Series, col_dtype: str) -> Union[float, int, str]:
"Returns a center of mass statistic of a column based on its dtype."
return column.mean() if col_dtype == 'numerical' else column.mode()[0] # only first mode
def estimate_centroid(df: DataFrame, dtypes: dict = None):
"""Makes a centroid estimation for a given dataframe.
Will use provided dtypes or infer in order to use best statistic columnwise"""
if dtypes:
if not all((col in dtypes for col in df.columns)):
dtypes = dtypes.update(infer_dtypes(df, skip=dtypes.columns))
else:
dtypes = infer_dtypes(df)
centroid = Series(df.iloc[0])
for col in centroid.index:
centroid[col] = center_of_mass_statistic(df[col], dtypes[col])
return centroid
def heom(x_df: DataFrame, y_df, dtypes):
"""Implements the Heterogeneous Euclidean-Overlap Metric between a sample x and a reference y.
The data is assumed to already be preprocessed (normalized and imputed).
[1]From 1997 <NAME>; <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
distances = DataFrame(empty(x_df.shape), index=x_df.index, columns=x_df.columns)
distance_funcs = {'categorical': lambda x, y: 0 if x == y else 1,
'numerical': lambda x, y: abs(x - y)} # Here we are assuming the data to be previously scaled
for col_idx, column in enumerate(distances.columns):
distances[column] = x_df[column].apply(distance_funcs[dtypes[column]], args=[y_df[col_idx]])
return distances
def estimate_sd(sample: DataFrame, reference=None, dtypes=None):
"""Estimates the standard deviation of a sample of records.
A reference can be passed in order to avoid new computation of mean or to use distances to another reference point.
The reference is expected as a (1, N) array where N is the number of columns in the sample.
Returns:
std_dev: the standard deviation of the distance vectors of the sample to the reference point
std_distances: the distances of the sample points to the reference point scaled by std_dev
"""
if dtypes: # Ensure dtypes are compatible with sample
if not all((col in dtypes for col in sample.columns)):
dtypes = dtypes.update(infer_dtypes(sample, skip=dtypes.columns))
else:
dtypes = infer_dtypes(sample)
if reference is None:
reference = estimate_centroid(sample, dtypes)
else:
assert len(reference) == len(
sample.columns), "The provided reference point does not have the same dimension as the sample records"
distances = heom(x_df=sample, y_df=reference, dtypes=dtypes)
euclidean_distances = (distances.apply(square).sum(axis=1) / len(sample.columns)).apply(sqrt)
std_dev = std(euclidean_distances)
std_distances = euclidean_distances / std_dev
return std_dev, std_distances
def gmm_clustering(data, n_gaussians):
"""Produces a GMM model with n_gaussians to cluster provided data."""
gmm_ = GaussianMixture(n_components=n_gaussians).fit(data)
return gmm_.predict(data), gmm_.aic(data)
def normality_test(data, suite='full', p_th=5e-3):
"""Performs a normality test on the data. Null hypothesis, data comes from normal distribution.
A transformations taken from a suite is applied to the data before each run of the normal test.
The first transformation in the suite that passes the normalcy test is returned
Returns:
result: True if any transformation led to a positive normal test, False otherwise
test: The first test in the suite to lead to positive normal test"""
transforms = {None: lambda x: x,
'inverse': reciprocal,
'square root': sqrt,
'log': nplog,
'Box Cox': boxcox}
if suite == 'full':
suite = transforms.keys()
else:
suite = list(suite) if isinstance(suite, str) else suite
for transform in suite:
try:
transformed_data = transforms[transform](data)
_, p_stat = normaltest(transformed_data, nan_policy='raise')
except (AttributeError, TypeError, ZeroDivisionError, ValueError):
continue
if p_stat > p_th:
return True, transform, p_stat
return False, None, None
|
[
"sklearn.preprocessing.FunctionTransformer",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"numpy.empty",
"sklearn.mixture.GaussianMixture",
"sklearn.compose.ColumnTransformer",
"numpy.mean",
"sklearn.impute.SimpleImputer",
"numpy.std",
"scipy.stats.normaltest",
"scipy.stats.mode",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.label_binarize",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"pandas.Series",
"sklearn.preprocessing.RobustScaler",
"sklearn.utils._testing.ignore_warnings"
] |
[((1737, 1781), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (1752, 1781), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((2755, 2802), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'DataConversionWarning'}), '(category=DataConversionWarning)\n', (2770, 2802), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((4551, 4598), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'DataConversionWarning'}), '(category=DataConversionWarning)\n', (4566, 4598), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((6380, 6424), 'sklearn.utils._testing.ignore_warnings', 'ignore_warnings', ([], {'category': 'ConvergenceWarning'}), '(category=ConvergenceWarning)\n', (6395, 6424), False, 'from sklearn.utils._testing import ignore_warnings\n'), ((2292, 2356), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_orig', 'y_orig'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(x_orig, y_orig, test_size=0.3, random_state=42)\n', (2308, 2356), False, 'from sklearn.model_selection import train_test_split\n'), ((6948, 7012), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_orig', 'y_orig'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(x_orig, y_orig, test_size=0.3, random_state=42)\n', (6964, 7012), False, 'from sklearn.model_selection import train_test_split\n'), ((7215, 7244), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7228, 7244), False, 'from sklearn.metrics import mean_squared_error, roc_auc_score\n'), ((9747, 9839), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', ([], {'transformers': "[('cat', CATEGORICAL_TRANSFORMER, categorical_features)]"}), "(transformers=[('cat', CATEGORICAL_TRANSFORMER,\n categorical_features)])\n", (9764, 9839), False, 'from sklearn.compose import ColumnTransformer\n'), ((10146, 10210), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_orig', 'y_orig'], {'test_size': '(0.3)', 'random_state': '(24)'}), '(x_orig, y_orig, test_size=0.3, random_state=24)\n', (10162, 10210), False, 'from sklearn.model_selection import train_test_split\n'), ((10365, 10394), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10378, 10394), False, 'from sklearn.metrics import mean_squared_error, roc_auc_score\n'), ((11061, 11079), 'pandas.Series', 'Series', (['df.iloc[0]'], {}), '(df.iloc[0])\n', (11067, 11079), False, 'from pandas import DataFrame, Series\n'), ((13238, 13262), 'numpy.std', 'std', (['euclidean_distances'], {}), '(euclidean_distances)\n', (13241, 13262), False, 'from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal\n'), ((4183, 4195), 'numpy.mean', 'mean', (['y_true'], {}), '(y_true)\n', (4187, 4195), False, 'from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal\n'), ((11577, 11594), 'numpy.empty', 'empty', (['x_df.shape'], {}), '(x_df.shape)\n', (11582, 11594), False, 'from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal\n'), ((1058, 1073), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (1071, 1073), False, 'from sklearn.impute import SimpleImputer\n'), ((1095, 1115), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1113, 1115), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1170, 1185), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (1183, 1185), False, 'from sklearn.impute import SimpleImputer\n'), ((1207, 1225), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1223, 1225), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1280, 1295), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (1293, 1295), False, 'from sklearn.impute import SimpleImputer\n'), ((1313, 1329), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1327, 1329), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((1387, 1426), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (1400, 1426), False, 'from sklearn.impute import SimpleImputer\n'), ((1445, 1483), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (1458, 1483), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((13473, 13514), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_gaussians'}), '(n_components=n_gaussians)\n', (13488, 13514), False, 'from sklearn.mixture import GaussianMixture\n'), ((14531, 14579), 'scipy.stats.normaltest', 'normaltest', (['transformed_data'], {'nan_policy': '"""raise"""'}), "(transformed_data, nan_policy='raise')\n", (14541, 14579), False, 'from scipy.stats import boxcox, normaltest, mode\n'), ((4242, 4254), 'scipy.stats.mode', 'mode', (['y_true'], {}), '(y_true)\n', (4246, 4254), False, 'from scipy.stats import boxcox, normaltest, mode\n'), ((9944, 9964), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (9962, 9964), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((10048, 10096), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['df[label_feat]'], {'classes': '[_class]'}), '(df[label_feat], classes=[_class])\n', (10062, 10096), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((8420, 8435), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (8433, 8435), False, 'from sklearn.impute import SimpleImputer\n'), ((8461, 8501), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': '(5.0, 95.0)'}), '(quantile_range=(5.0, 95.0))\n', (8473, 8501), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n'), ((8903, 8942), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (8916, 8942), False, 'from sklearn.impute import SimpleImputer\n'), ((8800, 8836), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['(lambda x: x / 4)'], {}), '(lambda x: x / 4)\n', (8819, 8836), False, 'from sklearn.preprocessing import FunctionTransformer, OneHotEncoder, RobustScaler, StandardScaler, label_binarize\n')]
|
from pathlib import Path
import numpy as np
from PIL import ImageFont
from scipy.ndimage import convolve
from scipy.spatial import cKDTree
resource_dir = (Path(__file__) / "../resources").absolute()
class Particle:
def __init__(self, x, y, color, ball_size=1):
self.pos = np.array([x, y]).astype(float)
self.vel = np.zeros(2)
self.acc = np.zeros(2)
self.target = self.pos
self.radius = ball_size
self.max_speed = 10
self.max_force = 0.6
self.color = np.array(color, dtype=np.uint8)
def update(self):
self.pos += self.vel
self.vel += self.acc
self.acc *= 0
def arrive(self):
# calculate the distance
dist = np.linalg.norm(self.target - self.pos)
# normalize it
desired = (self.target - self.pos) / dist
# if we are less than 100px away from our target, start to slow down
if dist < 100:
speed = dist / 100 * self.max_speed
else:
# otherwise go at full speed
speed = self.max_speed
# set the magnitude of our desired vector
desired *= speed
steer = desired - self.vel
steer_mag = np.linalg.norm(steer)
if steer_mag > self.max_force:
steer = steer / steer_mag * self.max_force
return steer
def render_text_perimeter_balls(
txt, pos=(0, 0), scale=16, color=(235, 64, 52), ball_size=4.5
):
# place particles on the text outline without overlapping them.
font = ImageFont.truetype(
(resource_dir / "VCR_OSD_MONO_1.001.ttf").as_posix(), scale
)
a = font.getmask(txt)
out = np.empty(a.size)
for y in range(a.size[0]):
for x in range(a.size[1]):
out[y, x] = a.getpixel((y, x))
out = out / 255
out = np.where(out > 0, 1, 0)
out = np.rot90(out)
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
out = convolve(out, kernel, mode="constant")
outline = np.where(out == 5, 1, 0)
indices = np.transpose(outline.nonzero()) + np.array(pos)
particles = []
for xy in indices:
particles.append(Particle(xy[1], xy[0], color, ball_size))
quadTree = cKDTree([p.pos for p in particles])
# loop over particles. remove all touching particles
to_remove = set()
for particle in particles:
if particle in to_remove:
continue
colliding_particles = [
particles[i]
for i in quadTree.query_ball_point(particle.pos, particle.radius * 2)
]
for p in colliding_particles:
if p != particle:
to_remove.add(p)
for particle in to_remove:
particles.remove(particle)
out = np.array([p.pos for p in particles])
# out = out/np.linalg.norm(out)
return out
if __name__ == "__main__":
# generate the particles with their target position
render_text_perimeter_balls("Hey!", scale=300, pos=(75, 250), color=(226, 53, 31))
render_text_perimeter_balls(
"#show-your-projects",
scale=70,
pos=(10, 150),
color=(231, 201, 49),
ball_size=2,
)
|
[
"numpy.empty",
"numpy.zeros",
"scipy.ndimage.convolve",
"pathlib.Path",
"numpy.where",
"numpy.array",
"numpy.rot90",
"numpy.linalg.norm",
"scipy.spatial.cKDTree"
] |
[((1668, 1684), 'numpy.empty', 'np.empty', (['a.size'], {}), '(a.size)\n', (1676, 1684), True, 'import numpy as np\n'), ((1825, 1848), 'numpy.where', 'np.where', (['(out > 0)', '(1)', '(0)'], {}), '(out > 0, 1, 0)\n', (1833, 1848), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.rot90', 'np.rot90', (['out'], {}), '(out)\n', (1867, 1872), True, 'import numpy as np\n'), ((1887, 1930), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n', (1895, 1930), True, 'import numpy as np\n'), ((1941, 1979), 'scipy.ndimage.convolve', 'convolve', (['out', 'kernel'], {'mode': '"""constant"""'}), "(out, kernel, mode='constant')\n", (1949, 1979), False, 'from scipy.ndimage import convolve\n'), ((1994, 2018), 'numpy.where', 'np.where', (['(out == 5)', '(1)', '(0)'], {}), '(out == 5, 1, 0)\n', (2002, 2018), True, 'import numpy as np\n'), ((2208, 2243), 'scipy.spatial.cKDTree', 'cKDTree', (['[p.pos for p in particles]'], {}), '([p.pos for p in particles])\n', (2215, 2243), False, 'from scipy.spatial import cKDTree\n'), ((2738, 2774), 'numpy.array', 'np.array', (['[p.pos for p in particles]'], {}), '([p.pos for p in particles])\n', (2746, 2774), True, 'import numpy as np\n'), ((339, 350), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (347, 350), True, 'import numpy as np\n'), ((370, 381), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (378, 381), True, 'import numpy as np\n'), ((527, 558), 'numpy.array', 'np.array', (['color'], {'dtype': 'np.uint8'}), '(color, dtype=np.uint8)\n', (535, 558), True, 'import numpy as np\n'), ((734, 772), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.target - self.pos)'], {}), '(self.target - self.pos)\n', (748, 772), True, 'import numpy as np\n'), ((1218, 1239), 'numpy.linalg.norm', 'np.linalg.norm', (['steer'], {}), '(steer)\n', (1232, 1239), True, 'import numpy as np\n'), ((2067, 2080), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (2075, 2080), True, 'import numpy as np\n'), ((158, 172), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (162, 172), False, 'from pathlib import Path\n'), ((289, 305), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (297, 305), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
"""Calculate IoU of part segmentation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import data_utils
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--folder_gt', '-g', help='Path to ground truth folder', required=True)
parser.add_argument('--folder_pred', '-p', help='Path to prediction folder', required=True)
parser.add_argument('--folder_data', '-d', help='Path to point cloud data folder')
parser.add_argument('--part_avg', '-a', action='store_true', help='Use part level average')
args = parser.parse_args()
print(args)
category_id_to_name = {
2691156: 'Airplane',
2773838: 'Bag',
2954340: 'Cap',
2958343: 'Car',
3001627: 'Chair',
3261776: 'Earphone',
3467517: 'Guitar',
3624134: 'Knife',
3636649: 'Lamp',
3642806: 'Laptop',
3790512: 'Motorbike',
3797390: 'Mug',
3948459: 'Pistol',
4099429: 'Rocket',
4225987: 'Skateboard',
4379243: 'Table'}
categories = sorted(os.listdir(args.folder_gt))
label_min = sys.maxsize
for category in categories:
category_folder_gt = os.path.join(args.folder_gt, category)
filenames = sorted(os.listdir(category_folder_gt))
for filename in filenames:
filepath_gt = os.path.join(category_folder_gt, filename)
label_gt = np.loadtxt(filepath_gt).astype(np.int32)
label_min = min(label_min, np.amin(label_gt))
IoU = 0.0
total_num = 0
for category in categories:
category_folder_gt = os.path.join(args.folder_gt, category)
category_folder_pred = os.path.join(args.folder_pred, category)
if args.folder_data:
category_folder_data = os.path.join(args.folder_data, category)
category_folder_err = os.path.join(args.folder_pred+'_err_ply', category)
IoU_category = 0.0
filenames = sorted(os.listdir(category_folder_gt))
for filename in filenames:
filepath_gt = os.path.join(category_folder_gt, filename)
filepath_pred = os.path.join(category_folder_pred, filename)
label_gt = np.loadtxt(filepath_gt).astype(np.int32) - label_min
label_pred = np.loadtxt(filepath_pred).astype(np.int32)
if args.folder_data:
filepath_data = os.path.join(category_folder_data, filename[:-3]+'pts')
filepath_err = os.path.join(category_folder_err, filename[:-3] + 'ply')
coordinates = [[float(value) for value in xyz.split(' ')]
for xyz in open(filepath_data, 'r') if len(xyz.split(' ')) == 3]
assert (label_gt.shape[0] == len(coordinates))
data_utils.save_ply_property(np.array(coordinates), (label_gt == label_pred), 6, filepath_err)
if args.part_avg:
label_max = np.amax(label_gt)
IoU_part = 0.0
for label_idx in range(label_max+1):
locations_gt = (label_gt == label_idx)
locations_pred = (label_pred == label_idx)
I_locations = np.logical_and(locations_gt, locations_pred)
U_locations = np.logical_or(locations_gt, locations_pred)
I = np.sum(I_locations) + np.finfo(np.float32).eps
U = np.sum(U_locations) + np.finfo(np.float32).eps
IoU_part = IoU_part + I/U
IoU_sample = IoU_part / (label_max+1)
else:
label_correct_locations = (label_gt == label_pred)
IoU_sample = np.sum(label_correct_locations) / label_gt.size
IoU_category = IoU_category + IoU_sample
IoU = IoU + IoU_category
IoU_category = IoU_category / len(filenames)
if category.isdigit():
print("IoU of %s: " % (category_id_to_name[int(category)]), IoU_category)
else:
print("IoU of %s: " % category, IoU_category)
total_num = total_num + len(filenames)
IoU = IoU / total_num
print("IoU: ", IoU)
if __name__ == '__main__':
main()
|
[
"numpy.sum",
"numpy.amin",
"argparse.ArgumentParser",
"numpy.logical_and",
"numpy.amax",
"numpy.finfo",
"numpy.array",
"numpy.loadtxt",
"numpy.logical_or",
"os.path.join",
"os.listdir"
] |
[((293, 318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (316, 318), False, 'import argparse\n'), ((1247, 1273), 'os.listdir', 'os.listdir', (['args.folder_gt'], {}), '(args.folder_gt)\n', (1257, 1273), False, 'import os\n'), ((1369, 1407), 'os.path.join', 'os.path.join', (['args.folder_gt', 'category'], {}), '(args.folder_gt, category)\n', (1381, 1407), False, 'import os\n'), ((1797, 1835), 'os.path.join', 'os.path.join', (['args.folder_gt', 'category'], {}), '(args.folder_gt, category)\n', (1809, 1835), False, 'import os\n'), ((1868, 1908), 'os.path.join', 'os.path.join', (['args.folder_pred', 'category'], {}), '(args.folder_pred, category)\n', (1880, 1908), False, 'import os\n'), ((1436, 1466), 'os.listdir', 'os.listdir', (['category_folder_gt'], {}), '(category_folder_gt)\n', (1446, 1466), False, 'import os\n'), ((1531, 1573), 'os.path.join', 'os.path.join', (['category_folder_gt', 'filename'], {}), '(category_folder_gt, filename)\n', (1543, 1573), False, 'import os\n'), ((1975, 2015), 'os.path.join', 'os.path.join', (['args.folder_data', 'category'], {}), '(args.folder_data, category)\n', (1987, 2015), False, 'import os\n'), ((2051, 2104), 'os.path.join', 'os.path.join', (["(args.folder_pred + '_err_ply')", 'category'], {}), "(args.folder_pred + '_err_ply', category)\n", (2063, 2104), False, 'import os\n'), ((2161, 2191), 'os.listdir', 'os.listdir', (['category_folder_gt'], {}), '(category_folder_gt)\n', (2171, 2191), False, 'import os\n'), ((2256, 2298), 'os.path.join', 'os.path.join', (['category_folder_gt', 'filename'], {}), '(category_folder_gt, filename)\n', (2268, 2298), False, 'import os\n'), ((2328, 2372), 'os.path.join', 'os.path.join', (['category_folder_pred', 'filename'], {}), '(category_folder_pred, filename)\n', (2340, 2372), False, 'import os\n'), ((1679, 1696), 'numpy.amin', 'np.amin', (['label_gt'], {}), '(label_gt)\n', (1686, 1696), True, 'import numpy as np\n'), ((2588, 2645), 'os.path.join', 'os.path.join', (['category_folder_data', "(filename[:-3] + 'pts')"], {}), "(category_folder_data, filename[:-3] + 'pts')\n", (2600, 2645), False, 'import os\n'), ((2676, 2732), 'os.path.join', 'os.path.join', (['category_folder_err', "(filename[:-3] + 'ply')"], {}), "(category_folder_err, filename[:-3] + 'ply')\n", (2688, 2732), False, 'import os\n'), ((3143, 3160), 'numpy.amax', 'np.amax', (['label_gt'], {}), '(label_gt)\n', (3150, 3160), True, 'import numpy as np\n'), ((1598, 1621), 'numpy.loadtxt', 'np.loadtxt', (['filepath_gt'], {}), '(filepath_gt)\n', (1608, 1621), True, 'import numpy as np\n'), ((2476, 2501), 'numpy.loadtxt', 'np.loadtxt', (['filepath_pred'], {}), '(filepath_pred)\n', (2486, 2501), True, 'import numpy as np\n'), ((3015, 3036), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (3023, 3036), True, 'import numpy as np\n'), ((3406, 3450), 'numpy.logical_and', 'np.logical_and', (['locations_gt', 'locations_pred'], {}), '(locations_gt, locations_pred)\n', (3420, 3450), True, 'import numpy as np\n'), ((3486, 3529), 'numpy.logical_or', 'np.logical_or', (['locations_gt', 'locations_pred'], {}), '(locations_gt, locations_pred)\n', (3499, 3529), True, 'import numpy as np\n'), ((3894, 3925), 'numpy.sum', 'np.sum', (['label_correct_locations'], {}), '(label_correct_locations)\n', (3900, 3925), True, 'import numpy as np\n'), ((2397, 2420), 'numpy.loadtxt', 'np.loadtxt', (['filepath_gt'], {}), '(filepath_gt)\n', (2407, 2420), True, 'import numpy as np\n'), ((3555, 3574), 'numpy.sum', 'np.sum', (['I_locations'], {}), '(I_locations)\n', (3561, 3574), True, 'import numpy as np\n'), ((3627, 3646), 'numpy.sum', 'np.sum', (['U_locations'], {}), '(U_locations)\n', (3633, 3646), True, 'import numpy as np\n'), ((3577, 3597), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3585, 3597), True, 'import numpy as np\n'), ((3649, 3669), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3657, 3669), True, 'import numpy as np\n')]
|
from __future__ import print_function
import os
import numpy as np
import torch
from torchvision import datasets, transforms
from .smallnorb_dataset_helper import smallnorb, smallnorb_equivariance
from .utils import random_split, CustomDataset
def get_dataset(args):
if args.dataset == "cifar10":
train_transform = transforms.Compose([
transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
train_dataset = datasets.CIFAR10('./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10('./data', train=False, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8, pin_memory=True, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, num_workers=8, pin_memory=True, shuffle=False)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.data,
labels=np.array(train_dataset.targets),
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# make channels last and convert to np arrays
#data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
#data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
#print("data['valid_mode_train'].shape", data['valid_mode_train'].shape)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "Fashion-MNIST":
train_transform = transforms.Compose([
transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.RandomCrop(32, padding=4),
#transforms.RandomAffine(degrees=0, translate=[0.2, 0.2]),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.2862,), (0.3529,))
])
test_transform = transforms.Compose([
transforms.Pad(padding=2),
transforms.ToTensor(),
transforms.Normalize((0.2862,), (0.3529,))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
train_dataset = datasets.FashionMNIST('./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.FashionMNIST('./data', train=False, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.train_data, labels=train_dataset.train_labels,
n_classes=10, n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# convert to np arrays
# data['valid_mode_train'] = np.array(data['valid_mode_train'])
# data['valid_mode_valid'] = np.array(data['valid_mode_valid'])
# data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
# data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "svhn":
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
#transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
print("train_transform", train_transform)
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
# extra_dataset = datasets.SVHN(
# './data', split='extra', transform=train_transform, download=True)
# # Combine both training splits (https://arxiv.org/pdf/1605.07146.pdf)
# data = np.concatenate([train_dataset.data, extra_dataset.data], axis=0)
# labels = np.concatenate([train_dataset.labels, extra_dataset.labels], axis=0)
# train_dataset.data = data
# train_dataset.labels = labels
train_dataset = datasets.SVHN(
'./data', split='train', transform=train_transform, download=True)
test_dataset = datasets.SVHN(
'./data', split='test', transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, num_workers=8, pin_memory=True,
batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, num_workers=8, pin_memory=True,
batch_size=args.test_batch_size, shuffle=True)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.data,
labels=train_dataset.labels,
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# make channels last and convert to np arrays
data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
print("data['valid_mode_train'].shape", data['valid_mode_train'].shape)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "smallnorb":
working_dir = args.working_dir
dataset_paths = {'train': os.path.join(working_dir, 'train'),
'test': os.path.join(working_dir, 'test')}
dataloaders, train_transf = smallnorb(args, dataset_paths)
train_loader = dataloaders['train_valid']
test_loader = dataloaders['test']
valid_mode_train_loader = dataloaders['train']
valid_mode_valid_loader = dataloaders['valid']
# print("len(train_loader.dataset)", len(train_loader.dataset))
# print("len(train_loader.dataset)", len(train_loader.dataset))
# print("len(test_loader.dataset)", len(test_loader.dataset))
# print("len(valid_mode_train_loader.dataset)", len(valid_mode_train_loader.dataset))
# print("len(valid_mode_valid_loader.dataset)", len(valid_mode_valid_loader.dataset))
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transf
elif args.dataset == "smallNORB_48_azimuth" or args.dataset == "smallNORB_48_elevation":
working_dir = args.working_dir
dataset_paths = {'train': os.path.join(working_dir, 'train'),
'test_novel': os.path.join(working_dir, 'test_novel'),
'test_familiar': os.path.join(working_dir, 'test_familiar')}
dataloaders, train_transform = smallnorb_equivariance(args, dataset_paths)
train_loader = dataloaders['train']
test_novel_loader = dataloaders['test_novel']
test_familiar_loader = dataloaders['test_familiar']
print("len(train_loader.dataset)", len(train_loader.dataset))
print("len(test_novel_loader.dataset)", len(test_novel_loader.dataset))
print("len(test_familiar_loader.dataset)", len(test_familiar_loader.dataset))
return train_loader, test_novel_loader, test_familiar_loader, train_transform
else:
print("Unsupported dataset.")
quit()
return train_loader, test_loader
|
[
"torchvision.transforms.ColorJitter",
"torchvision.datasets.FashionMNIST",
"torch.utils.data.DataLoader",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.join",
"torchvision.transforms.ToPILImage",
"torchvision.datasets.CIFAR10",
"torchvision.transforms.Pad",
"numpy.array",
"numpy.repeat",
"torchvision.transforms.Normalize",
"torchvision.datasets.SVHN",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ToTensor"
] |
[((1265, 1350), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'train_transform'}), "('./data', train=True, download=True, transform=train_transform\n )\n", (1281, 1350), False, 'from torchvision import datasets, transforms\n'), ((1369, 1434), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./data"""'], {'train': '(False)', 'transform': 'test_transform'}), "('./data', train=False, transform=test_transform)\n", (1385, 1434), False, 'from torchvision import datasets, transforms\n'), ((1458, 1578), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'num_workers': '(8)', 'pin_memory': '(True)', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n num_workers=8, pin_memory=True, shuffle=True)\n', (1485, 1578), False, 'import torch\n'), ((1597, 1722), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'num_workers': '(8)', 'pin_memory': '(True)', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.test_batch_size,\n num_workers=8, pin_memory=True, shuffle=False)\n', (1624, 1722), False, 'import torch\n'), ((2904, 3004), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(valid_mode_train_dataset, batch_size=args.\n batch_size, shuffle=True)\n', (2931, 3004), False, 'import torch\n'), ((3034, 3140), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_valid_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(valid_mode_valid_dataset, batch_size=args.\n test_batch_size, shuffle=False)\n', (3061, 3140), False, 'import torch\n'), ((4324, 4414), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""./data"""'], {'train': '(True)', 'download': '(True)', 'transform': 'train_transform'}), "('./data', train=True, download=True, transform=\n train_transform)\n", (4345, 4414), False, 'from torchvision import datasets, transforms\n'), ((4434, 4504), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""./data"""'], {'train': '(False)', 'transform': 'test_transform'}), "('./data', train=False, transform=test_transform)\n", (4455, 4504), False, 'from torchvision import datasets, transforms\n'), ((4528, 4616), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True)\n', (4555, 4616), False, 'import torch\n'), ((4635, 4728), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(test_dataset, batch_size=args.test_batch_size,\n shuffle=False)\n', (4662, 4728), False, 'import torch\n'), ((5906, 6006), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(valid_mode_train_dataset, batch_size=args.\n batch_size, shuffle=True)\n', (5933, 6006), False, 'import torch\n'), ((6036, 6142), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_valid_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(valid_mode_valid_dataset, batch_size=args.\n test_batch_size, shuffle=False)\n', (6063, 6142), False, 'import torch\n'), ((400, 452), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (422, 452), False, 'from torchvision import datasets, transforms\n'), ((485, 521), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (506, 521), False, 'from torchvision import datasets, transforms\n'), ((556, 589), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (587, 589), False, 'from torchvision import datasets, transforms\n'), ((624, 645), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (643, 645), False, 'from torchvision import datasets, transforms\n'), ((680, 751), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (700, 751), False, 'from torchvision import datasets, transforms\n'), ((864, 885), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (883, 885), False, 'from torchvision import datasets, transforms\n'), ((920, 991), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (940, 991), False, 'from torchvision import datasets, transforms\n'), ((1102, 1125), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1123, 1125), False, 'from torchvision import datasets, transforms\n'), ((1175, 1198), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1196, 1198), False, 'from torchvision import datasets, transforms\n'), ((2084, 2115), 'numpy.array', 'np.array', (['train_dataset.targets'], {}), '(train_dataset.targets)\n', (2092, 2115), True, 'import numpy as np\n'), ((7546, 7631), 'torchvision.datasets.SVHN', 'datasets.SVHN', (['"""./data"""'], {'split': '"""train"""', 'transform': 'train_transform', 'download': '(True)'}), "('./data', split='train', transform=train_transform, download=True\n )\n", (7559, 7631), False, 'from torchvision import datasets, transforms\n'), ((7663, 7741), 'torchvision.datasets.SVHN', 'datasets.SVHN', (['"""./data"""'], {'split': '"""test"""', 'transform': 'test_transform', 'download': '(True)'}), "('./data', split='test', transform=test_transform, download=True)\n", (7676, 7741), False, 'from torchvision import datasets, transforms\n'), ((7778, 7906), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'num_workers': '(8)', 'pin_memory': '(True)', 'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(dataset=train_dataset, num_workers=8,\n pin_memory=True, batch_size=args.batch_size, shuffle=True)\n', (7805, 7906), False, 'import torch\n'), ((7950, 8083), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'num_workers': '(8)', 'pin_memory': '(True)', 'batch_size': 'args.test_batch_size', 'shuffle': '(True)'}), '(dataset=test_dataset, num_workers=8, pin_memory\n =True, batch_size=args.test_batch_size, shuffle=True)\n', (7977, 8083), False, 'import torch\n'), ((9288, 9388), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(valid_mode_train_dataset, batch_size=args.\n batch_size, shuffle=True)\n', (9315, 9388), False, 'import torch\n'), ((9418, 9524), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_mode_valid_dataset'], {'batch_size': 'args.test_batch_size', 'shuffle': '(False)'}), '(valid_mode_valid_dataset, batch_size=args.\n test_batch_size, shuffle=False)\n', (9445, 9524), False, 'import torch\n'), ((3366, 3418), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.2)', 'contrast': '(0.2)'}), '(brightness=0.2, contrast=0.2)\n', (3388, 3418), False, 'from torchvision import datasets, transforms\n'), ((3451, 3487), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (3472, 3487), False, 'from torchvision import datasets, transforms\n'), ((3683, 3704), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3702, 3704), False, 'from torchvision import datasets, transforms\n'), ((3739, 3781), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.2862,)', '(0.3529,)'], {}), '((0.2862,), (0.3529,))\n', (3759, 3781), False, 'from torchvision import datasets, transforms\n'), ((3893, 3918), 'torchvision.transforms.Pad', 'transforms.Pad', ([], {'padding': '(2)'}), '(padding=2)\n', (3907, 3918), False, 'from torchvision import datasets, transforms\n'), ((3953, 3974), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3972, 3974), False, 'from torchvision import datasets, transforms\n'), ((4009, 4051), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.2862,)', '(0.3529,)'], {}), '((0.2862,), (0.3529,))\n', (4029, 4051), False, 'from torchvision import datasets, transforms\n'), ((4161, 4184), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4182, 4184), False, 'from torchvision import datasets, transforms\n'), ((4234, 4257), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4255, 4257), False, 'from torchvision import datasets, transforms\n'), ((8724, 8758), 'numpy.array', 'np.array', (["data['valid_mode_train']"], {}), "(data['valid_mode_train'])\n", (8732, 8758), True, 'import numpy as np\n'), ((8814, 8848), 'numpy.array', 'np.array', (["data['valid_mode_valid']"], {}), "(data['valid_mode_valid'])\n", (8822, 8848), True, 'import numpy as np\n'), ((2215, 2234), 'numpy.repeat', 'np.repeat', (['(1000)', '(10)'], {}), '(1000, 10)\n', (2224, 2234), True, 'import numpy as np\n'), ((6347, 6383), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (6368, 6383), False, 'from torchvision import datasets, transforms\n'), ((6462, 6483), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6481, 6483), False, 'from torchvision import datasets, transforms\n'), ((6497, 6596), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, \n 0.20101562, 0.19703614))\n', (6517, 6596), False, 'from torchvision import datasets, transforms\n'), ((6711, 6732), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6730, 6732), False, 'from torchvision import datasets, transforms\n'), ((6746, 6845), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4376821, 0.4437697, 0.47280442)', '(0.19803012, 0.20101562, 0.19703614)'], {}), '((0.4376821, 0.4437697, 0.47280442), (0.19803012, \n 0.20101562, 0.19703614))\n', (6766, 6845), False, 'from torchvision import datasets, transforms\n'), ((6929, 6952), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6950, 6952), False, 'from torchvision import datasets, transforms\n'), ((7002, 7025), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (7023, 7025), False, 'from torchvision import datasets, transforms\n'), ((9739, 9773), 'os.path.join', 'os.path.join', (['working_dir', '"""train"""'], {}), "(working_dir, 'train')\n", (9751, 9773), False, 'import os\n'), ((9808, 9841), 'os.path.join', 'os.path.join', (['working_dir', '"""test"""'], {}), "(working_dir, 'test')\n", (9820, 9841), False, 'import os\n'), ((5163, 5182), 'numpy.repeat', 'np.repeat', (['(1000)', '(10)'], {}), '(1000, 10)\n', (5172, 5182), True, 'import numpy as np\n'), ((10785, 10819), 'os.path.join', 'os.path.join', (['working_dir', '"""train"""'], {}), "(working_dir, 'train')\n", (10797, 10819), False, 'import os\n'), ((10860, 10899), 'os.path.join', 'os.path.join', (['working_dir', '"""test_novel"""'], {}), "(working_dir, 'test_novel')\n", (10872, 10899), False, 'import os\n'), ((10943, 10985), 'os.path.join', 'os.path.join', (['working_dir', '"""test_familiar"""'], {}), "(working_dir, 'test_familiar')\n", (10955, 10985), False, 'import os\n'), ((8590, 8609), 'numpy.repeat', 'np.repeat', (['(1000)', '(10)'], {}), '(1000, 10)\n', (8599, 8609), True, 'import numpy as np\n')]
|
import datetime
import logging
import tushare as ts
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
import tensorflow as tf
from ts.build_model import BuildModel
from ts.db_utils import get_daily_by_trade_date
from ts.simulation_history import SimulationHistory
from ts.st_history_data import x_train_col_index
class Change(SimulationHistory):
model_cache = {}
t1_predictions = None
t0_predictions = 0
t0_index = ''
def is_sell(self, index, row):
logging.debug('index: %s, date: %s', index, row['date'])
today = datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S')
df = get_daily_by_trade_date(self.get_code(), today.strftime('%Y%m%d'))
change_predictions, true_predictions = self.predictions(df, ['open', 'high', 'low', 'close'], 'pct_chg',
self.get_code() + '_pct_chg_model.h5')
logging.debug('change_predictions:%s, true_predictions:%s', change_predictions, true_predictions)
if len(df) == 0:
return False
if self.t0_predictions == None:
return False
if self.t0_predictions <= 0:
return False
logging.debug('row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s', row['ma5'] * (1+self.t0_predictions/100), row['ma5'], row['close'])
return row['close'] > row['ma5'] * (1+self.t0_predictions/100)
def is_buy(self, index, row):
logging.debug('index: %s, date: %s', index, row['date'])
today = datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S')
df = get_daily_by_trade_date(self.get_code(), today.strftime('%Y%m%d'))
change_predictions, true_predictions = self.predictions(df, ['open', 'high', 'low', 'close'], 'pct_chg',
self.get_code() + '_pct_chg_model.h5')
self.t0_predictions = change_predictions
logging.debug('change_predictions:%s, true_predictions:%s', change_predictions, true_predictions)
if self.t0_index != index:
self.t1_predictions = self.t0_predictions
self.t0_index = index
if len(df) == 0:
return False
if self.t0_predictions <= 0:
return False
logging.debug('row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s', row['ma5'] * (1-self.t0_predictions/100), row['ma5'], row['close'])
return row['close'] < row['ma5'] * (1-self.t0_predictions/100)
def predictions(self, df, column_names, label_name, module_name):
columns = df.columns.values.tolist()
stock_data = np.array(df)
x_train_col = x_train_col_index(columns, column_names)
y_train_col = x_train_col_index(columns, [label_name])[0]
x = np.array(stock_data[:, x_train_col])
y = np.array(stock_data[:, y_train_col])
if len(x) == 0:
return 0, 0
model = self.model_cache.get(module_name)
if model == None:
model = keras.models.load_model(module_name)
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
self.model_cache[module_name] = model
predictions = model.predict(x).flatten()[0]/10 + 1.5
return predictions, y[0]
|
[
"logging.debug",
"tensorflow.keras.models.load_model",
"tensorflow.train.RMSPropOptimizer",
"datetime.datetime.strptime",
"ts.st_history_data.x_train_col_index",
"numpy.array"
] |
[((532, 588), 'logging.debug', 'logging.debug', (['"""index: %s, date: %s"""', 'index', "row['date']"], {}), "('index: %s, date: %s', index, row['date'])\n", (545, 588), False, 'import logging\n'), ((605, 665), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['date']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row['date'], '%Y-%m-%d %H:%M:%S')\n", (631, 665), False, 'import datetime\n'), ((966, 1067), 'logging.debug', 'logging.debug', (['"""change_predictions:%s, true_predictions:%s"""', 'change_predictions', 'true_predictions'], {}), "('change_predictions:%s, true_predictions:%s',\n change_predictions, true_predictions)\n", (979, 1067), False, 'import logging\n'), ((1253, 1407), 'logging.debug', 'logging.debug', (['"""row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s"""', "(row['ma5'] * (1 + self.t0_predictions / 100))", "row['ma5']", "row['close']"], {}), "('row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s',\n row['ma5'] * (1 + self.t0_predictions / 100), row['ma5'], row['close'])\n", (1266, 1407), False, 'import logging\n'), ((1515, 1571), 'logging.debug', 'logging.debug', (['"""index: %s, date: %s"""', 'index', "row['date']"], {}), "('index: %s, date: %s', index, row['date'])\n", (1528, 1571), False, 'import logging\n'), ((1588, 1648), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['date']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row['date'], '%Y-%m-%d %H:%M:%S')\n", (1614, 1648), False, 'import datetime\n'), ((1999, 2100), 'logging.debug', 'logging.debug', (['"""change_predictions:%s, true_predictions:%s"""', 'change_predictions', 'true_predictions'], {}), "('change_predictions:%s, true_predictions:%s',\n change_predictions, true_predictions)\n", (2012, 2100), False, 'import logging\n'), ((2344, 2497), 'logging.debug', 'logging.debug', (['"""row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s"""', "(row['ma5'] * (1 - self.t0_predictions / 100))", "row['ma5']", "row['close']"], {}), "('row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s',\n row['ma5'] * (1 - self.t0_predictions / 100), row['ma5'], row['close'])\n", (2357, 2497), False, 'import logging\n'), ((2699, 2711), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (2707, 2711), True, 'import numpy as np\n'), ((2735, 2775), 'ts.st_history_data.x_train_col_index', 'x_train_col_index', (['columns', 'column_names'], {}), '(columns, column_names)\n', (2752, 2775), False, 'from ts.st_history_data import x_train_col_index\n'), ((2855, 2891), 'numpy.array', 'np.array', (['stock_data[:, x_train_col]'], {}), '(stock_data[:, x_train_col])\n', (2863, 2891), True, 'import numpy as np\n'), ((2904, 2940), 'numpy.array', 'np.array', (['stock_data[:, y_train_col]'], {}), '(stock_data[:, y_train_col])\n', (2912, 2940), True, 'import numpy as np\n'), ((2798, 2838), 'ts.st_history_data.x_train_col_index', 'x_train_col_index', (['columns', '[label_name]'], {}), '(columns, [label_name])\n', (2815, 2838), False, 'from ts.st_history_data import x_train_col_index\n'), ((3088, 3124), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['module_name'], {}), '(module_name)\n', (3111, 3124), False, 'from tensorflow import keras\n'), ((3150, 3182), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (3175, 3182), True, 'import tensorflow as tf\n')]
|
import numpy as np
from tensorflow.keras.models import load_model
from buffer import (
ReplayBuffer,
build
)
class DQNAgent:
def __init__(self, alpha, gamma, n_actions, epsilon,
batch_size, input_dims, fc1_dims, fc2_dims, epsilon_dec=0.996,
epsilon_end=0.01, mem_size=1000000,
fname='dqn_model.h5'):
self.action_space = [i for i in range(n_actions)]
self.n_actions = n_actions
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_dec = epsilon_dec
self.epsilon_min = epsilon_end
self.batch_size = batch_size
self.model_file = fname
self.memory = ReplayBuffer(mem_size, input_dims, n_actions, discrete=True)
self.q_eval = build(alpha, n_actions, input_dims, fc1_dims, fc2_dims)
def remember(self, state, action, reward, new_state, done):
self.memory.store_transition(state, action, reward, new_state, done)
def choose_action(self, state):
"""
Choose action given state of the game.
"""
state = state[np.newaxis, :]
# epsilon greedy
rand = np.random.random()
if rand < self.epsilon:
action = np.random.choice(self.action_space)
else:
actions = self.q_eval.predict(state)
action = np.argmax(actions)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
state, action, reward, new_state, done = self.memory.sample_buffer(self.batch_size)
action_values = np.array(self.action_space, dtype=np.int8)
action_indices = np.dot(action, action_values)
q_eval = self.q_eval.predict(state)
q_next = self.q_eval.predict(new_state)
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
q_target[batch_index, action_indices] = reward + self.gamma * np.max(q_next, axis=1) * done
_ = self.q_eval.fit(state, q_target, verbose=0)
self.epsilon = self.epsilon*self.epsilon_dec if self.epsilon > self.epsilon_min else self.epsilon_min
def save_model(self):
self.q_eval.save(self.model_file)
def load_model(self):
self.q_eval = load_model(self.model_file)
|
[
"tensorflow.keras.models.load_model",
"numpy.argmax",
"buffer.build",
"numpy.max",
"numpy.random.random",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"numpy.dot",
"buffer.ReplayBuffer"
] |
[((717, 777), 'buffer.ReplayBuffer', 'ReplayBuffer', (['mem_size', 'input_dims', 'n_actions'], {'discrete': '(True)'}), '(mem_size, input_dims, n_actions, discrete=True)\n', (729, 777), False, 'from buffer import ReplayBuffer, build\n'), ((809, 864), 'buffer.build', 'build', (['alpha', 'n_actions', 'input_dims', 'fc1_dims', 'fc2_dims'], {}), '(alpha, n_actions, input_dims, fc1_dims, fc2_dims)\n', (814, 864), False, 'from buffer import ReplayBuffer, build\n'), ((1222, 1240), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1238, 1240), True, 'import numpy as np\n'), ((1707, 1749), 'numpy.array', 'np.array', (['self.action_space'], {'dtype': 'np.int8'}), '(self.action_space, dtype=np.int8)\n', (1715, 1749), True, 'import numpy as np\n'), ((1775, 1804), 'numpy.dot', 'np.dot', (['action', 'action_values'], {}), '(action, action_values)\n', (1781, 1804), True, 'import numpy as np\n'), ((1970, 2012), 'numpy.arange', 'np.arange', (['self.batch_size'], {'dtype': 'np.int32'}), '(self.batch_size, dtype=np.int32)\n', (1979, 2012), True, 'import numpy as np\n'), ((2436, 2463), 'tensorflow.keras.models.load_model', 'load_model', (['self.model_file'], {}), '(self.model_file)\n', (2446, 2463), False, 'from tensorflow.keras.models import load_model\n'), ((1294, 1329), 'numpy.random.choice', 'np.random.choice', (['self.action_space'], {}), '(self.action_space)\n', (1310, 1329), True, 'import numpy as np\n'), ((1414, 1432), 'numpy.argmax', 'np.argmax', (['actions'], {}), '(actions)\n', (1423, 1432), True, 'import numpy as np\n'), ((2083, 2105), 'numpy.max', 'np.max', (['q_next'], {'axis': '(1)'}), '(q_next, axis=1)\n', (2089, 2105), True, 'import numpy as np\n')]
|
'''H5 data prep'''
## External modules.
import csv
import numpy as np
import os
import tables
## Internal modules.
from mml.config import dir_data_toread
from mml.config import dir_data_towrite
from mml.utils import makedir_safe
###############################################################################
## Clerical setup.
data_name = "adult"
toread_tr = os.path.join(dir_data_toread, data_name, "adult.data")
toread_te = os.path.join(dir_data_toread, data_name, "adult.test")
newdir = os.path.join(dir_data_towrite, data_name)
towrite = os.path.join(newdir, "adult.h5")
attribute_names = [
"age", "workclass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship", "race", "sex",
"capital-gain", "capital-loss", "hours-per-week", "native-country"
] # order is important.
attribute_dict = {
"age": ["continuous"],
"workclass": ["Private", "Self-emp-not-inc", "Self-emp-inc",
"Federal-gov", "Local-gov", "State-gov",
"Without-pay", "Never-worked"],
"fnlwgt": ["continuous"],
"education": ["Bachelors", "Some-college", "11th", "HS-grad",
"Prof-school", "Assoc-acdm", "Assoc-voc", "9th",
"7th-8th", "12th", "Masters", "1st-4th", "10th",
"Doctorate", "5th-6th", "Preschool"],
"education-num": ["continuous"],
"marital-status": ["Married-civ-spouse", "Divorced",
"Never-married", "Separated", "Widowed",
"Married-spouse-absent", "Married-AF-spouse"],
"occupation": ["Tech-support", "Craft-repair", "Other-service",
"Sales", "Exec-managerial", "Prof-specialty",
"Handlers-cleaners", "Machine-op-inspct",
"Adm-clerical", "Farming-fishing",
"Transport-moving", "Priv-house-serv",
"Protective-serv", "Armed-Forces"],
"relationship": ["Wife", "Own-child", "Husband", "Not-in-family",
"Other-relative", "Unmarried"],
"race": ["White", "Asian-Pac-Islander", "Amer-Indian-Eskimo",
"Other", "Black"],
"sex": ["Female", "Male"],
"capital-gain": ["continuous"],
"capital-loss": ["continuous"],
"hours-per-week": ["continuous"],
"native-country": ["United-States", "Cambodia", "England",
"Puerto-Rico", "Canada", "Germany",
"Outlying-US(Guam-USVI-etc)", "India",
"Japan", "Greece", "South", "China", "Cuba",
"Iran", "Honduras", "Philippines", "Italy",
"Poland", "Jamaica", "Vietnam", "Mexico",
"Portugal", "Ireland", "France",
"Dominican-Republic", "Laos", "Ecuador",
"Taiwan", "Haiti", "Columbia", "Hungary",
"Guatemala", "Nicaragua", "Scotland",
"Thailand", "Yugoslavia", "El-Salvador",
"Trinadad&Tobago", "Peru", "Hong",
"Holand-Netherlands"]
}
label_dict = {"<=50K": 0,
">50K": 1}
n_tr = 30162 # number of clean instances.
n_te = 15060 # number of clean instances.
n_all = n_tr+n_te
num_features = np.array(
[ len(attribute_dict[key]) for key in attribute_dict.keys() ]
).sum() # number of features after a one-hot encoding.
num_classes = 2
num_labels = 1
title = data_name+": Full dataset"
title_X = data_name+": Features"
title_y = data_name+": Labels"
dtype_X = np.float32
atom_X = tables.Float32Atom()
dtype_y = np.uint8
atom_y = tables.UInt8Atom()
def parse_line(x, y):
## Inputs are a bit complicated.
x_out_list = []
for j in range(len(x)):
value = x[j]
attribute = attribute_names[j]
num_distinct = len(attribute_dict[attribute])
## Ignore all points with missing entries.
if value == "?":
return (None, None)
else:
if num_distinct > 1:
idx_hot = attribute_dict[attribute].index(value)
onehot = np.zeros(num_distinct, dtype=dtype_X)
onehot[idx_hot] = 1.0
x_out_list.append(onehot)
else:
x_out_list.append(np.array([value], dtype=dtype_X))
x_out = np.concatenate(x_out_list)
if len(x_out) != num_features:
raise ValueError("Something is wrong with the feature vec parser.")
## Labels are easy.
y_out = np.array([label_dict[y]], dtype=dtype_y)
return x_out, y_out
def raw_to_h5():
'''
Transform the raw dataset into one of HDF5 type.
'''
X_raw_tr = np.zeros((n_tr,num_features), dtype=dtype_X)
y_raw_tr = np.zeros((n_tr,num_labels), dtype=dtype_y)
X_raw_te = np.zeros((n_te,num_features), dtype=dtype_X)
y_raw_te = np.zeros((n_te,num_labels), dtype=dtype_y)
print("Preparation: {}".format(data_name))
## Read in the raw training data.
with open(toread_tr, newline="") as f_table:
print("Read {}.".format(toread_tr))
f_reader = csv.reader(f_table, delimiter=",",
skipinitialspace=True)
## Populate the placeholder numpy arrays.
idx = 0
for line in f_reader:
if len(line) == 0:
continue # do nothing for blank lines.
## Numpy arrays for individual instance.
x, y = parse_line(x=line[0:-1], y=line[-1])
if x is None:
continue # skip instances with missing values.
else:
X_raw_tr[idx,:] = x
y_raw_tr[idx,0] = y
## Update the index (also counts the clean data points).
idx += 1
## Check that number of *clean* instances is as expected.
print(
"Number of clean guys (tr): {}. Note n_tr = {}".format(idx,n_tr)
)
## Read in the raw test data.
with open(toread_te, newline="") as f_table:
print("Read {}.".format(toread_te))
f_reader = csv.reader(f_table, delimiter=",",
skipinitialspace=True)
## Populate the placeholder numpy arrays.
idx = 0
for i, line in enumerate(f_reader):
if i == 0:
continue # skip the first line, only for TEST data.
if len(line) == 0:
continue # do nothing for blank lines.
## Numpy arrays for individual instance.
x, y = parse_line(x=line[0:-1], y=line[-1][0:-1])
# Note: for test data, we strip training "." from labels.
if x is None:
continue # skip instances with missing values.
else:
X_raw_te[idx,:] = x
y_raw_te[idx,0] = y
## Update the index (also counts the clean data points).
idx += 1
## Check that number of *clean* instances is as expected.
print(
"Number of clean guys (te): {}. Note n_te = {}".format(idx,n_te)
)
## Concatenate.
X_raw = np.vstack((X_raw_tr, X_raw_te))
y_raw = np.vstack((y_raw_tr, y_raw_te))
## Create and populate the HDF5 file.
makedir_safe(newdir)
with tables.open_file(towrite, mode="w", title=title) as myh5:
myh5.create_array(where=myh5.root,
name="X",
obj=X_raw,
atom=atom_X,
title=title_X)
myh5.create_array(where=myh5.root,
name="y",
obj=y_raw,
atom=atom_y,
title=title_y)
print(myh5)
print("Wrote {}.".format(towrite))
## Exit all context managers before returning.
print("Done ({}).".format(data_name))
return None
if __name__ == "__main__":
raw_to_h5()
###############################################################################
|
[
"csv.reader",
"numpy.zeros",
"tables.Float32Atom",
"numpy.vstack",
"mml.utils.makedir_safe",
"numpy.array",
"tables.UInt8Atom",
"tables.open_file",
"os.path.join",
"numpy.concatenate"
] |
[((368, 422), 'os.path.join', 'os.path.join', (['dir_data_toread', 'data_name', '"""adult.data"""'], {}), "(dir_data_toread, data_name, 'adult.data')\n", (380, 422), False, 'import os\n'), ((435, 489), 'os.path.join', 'os.path.join', (['dir_data_toread', 'data_name', '"""adult.test"""'], {}), "(dir_data_toread, data_name, 'adult.test')\n", (447, 489), False, 'import os\n'), ((499, 540), 'os.path.join', 'os.path.join', (['dir_data_towrite', 'data_name'], {}), '(dir_data_towrite, data_name)\n', (511, 540), False, 'import os\n'), ((551, 583), 'os.path.join', 'os.path.join', (['newdir', '"""adult.h5"""'], {}), "(newdir, 'adult.h5')\n", (563, 583), False, 'import os\n'), ((3551, 3571), 'tables.Float32Atom', 'tables.Float32Atom', ([], {}), '()\n', (3569, 3571), False, 'import tables\n'), ((3600, 3618), 'tables.UInt8Atom', 'tables.UInt8Atom', ([], {}), '()\n', (3616, 3618), False, 'import tables\n'), ((4331, 4357), 'numpy.concatenate', 'np.concatenate', (['x_out_list'], {}), '(x_out_list)\n', (4345, 4357), True, 'import numpy as np\n'), ((4506, 4546), 'numpy.array', 'np.array', (['[label_dict[y]]'], {'dtype': 'dtype_y'}), '([label_dict[y]], dtype=dtype_y)\n', (4514, 4546), True, 'import numpy as np\n'), ((4680, 4725), 'numpy.zeros', 'np.zeros', (['(n_tr, num_features)'], {'dtype': 'dtype_X'}), '((n_tr, num_features), dtype=dtype_X)\n', (4688, 4725), True, 'import numpy as np\n'), ((4740, 4783), 'numpy.zeros', 'np.zeros', (['(n_tr, num_labels)'], {'dtype': 'dtype_y'}), '((n_tr, num_labels), dtype=dtype_y)\n', (4748, 4783), True, 'import numpy as np\n'), ((4798, 4843), 'numpy.zeros', 'np.zeros', (['(n_te, num_features)'], {'dtype': 'dtype_X'}), '((n_te, num_features), dtype=dtype_X)\n', (4806, 4843), True, 'import numpy as np\n'), ((4858, 4901), 'numpy.zeros', 'np.zeros', (['(n_te, num_labels)'], {'dtype': 'dtype_y'}), '((n_te, num_labels), dtype=dtype_y)\n', (4866, 4901), True, 'import numpy as np\n'), ((7196, 7227), 'numpy.vstack', 'np.vstack', (['(X_raw_tr, X_raw_te)'], {}), '((X_raw_tr, X_raw_te))\n', (7205, 7227), True, 'import numpy as np\n'), ((7240, 7271), 'numpy.vstack', 'np.vstack', (['(y_raw_tr, y_raw_te)'], {}), '((y_raw_tr, y_raw_te))\n', (7249, 7271), True, 'import numpy as np\n'), ((7319, 7339), 'mml.utils.makedir_safe', 'makedir_safe', (['newdir'], {}), '(newdir)\n', (7331, 7339), False, 'from mml.utils import makedir_safe\n'), ((5118, 5175), 'csv.reader', 'csv.reader', (['f_table'], {'delimiter': '""","""', 'skipinitialspace': '(True)'}), "(f_table, delimiter=',', skipinitialspace=True)\n", (5128, 5175), False, 'import csv\n'), ((6118, 6175), 'csv.reader', 'csv.reader', (['f_table'], {'delimiter': '""","""', 'skipinitialspace': '(True)'}), "(f_table, delimiter=',', skipinitialspace=True)\n", (6128, 6175), False, 'import csv\n'), ((7349, 7397), 'tables.open_file', 'tables.open_file', (['towrite'], {'mode': '"""w"""', 'title': 'title'}), "(towrite, mode='w', title=title)\n", (7365, 7397), False, 'import tables\n'), ((4098, 4135), 'numpy.zeros', 'np.zeros', (['num_distinct'], {'dtype': 'dtype_X'}), '(num_distinct, dtype=dtype_X)\n', (4106, 4135), True, 'import numpy as np\n'), ((4268, 4300), 'numpy.array', 'np.array', (['[value]'], {'dtype': 'dtype_X'}), '([value], dtype=dtype_X)\n', (4276, 4300), True, 'import numpy as np\n')]
|
import numpy as np
import random as rd
import tensorflow as tf
from tensorflow import keras
class Brain():
def __init__(self,brain_spec, random = True, weights = None):
self.brain_spec = brain_spec
##INIT
#This is a new brai,
self.neurones = keras.Sequential()
for i in range(len(brain_spec)-2):
#init the weights between two layers, with matrix [layer_i,layer_i+1] and the bias
self.neurones.add(keras.layers.Dense(brain_spec[i+1],activation= "elu",input_shape=(brain_spec[i],)))
#output layer
self.neurones.add(keras.layers.Dense(brain_spec[-1], activation="softmax"))
#In case want specific value
if not(random):
assert(weights != None)
self.neurones.set_weights(weights)
#self.brain.compile(optimizer="adam", loss =t.tanh_custom_loss,metrics=[t.tanh_custom_loss])
self.optimizer = keras.optimizers.Adam(learning_rate=0.01)
def think(self, x):
return(self.neurones(np.expand_dims(x,axis=0))).numpy()[0]
def mutate(self,mutation_factor = 0.1):
weights = self.neurones.get_weights()
for layer in weights:
layer += layer*rd.uniform(-1*mutation_factor,1*mutation_factor)*np.random.randint(2,size=layer.shape)
self.neurones.set_weights(weights)
def expand(self):
pass
def learn(self,memory):
pass
if __name__ == "__main__":
TEST = True
if TEST:
test_input = np.array([1,1,1,1])
output_size = 4
brain_spec = [test_input.shape[0],5,output_size]
print("#################### RANDOM INIT ######################################")
head = Brain(brain_spec,random = True)
print(head.neurones.get_weights())
print("#################### DEFINE INIT ######################################")
head = Brain(brain_spec,random = False, weights=head.neurones.get_weights())
print(head.neurones.get_weights())
print(head.neurones.summary())
print("#################### MUTATING ###########################################")
head.mutate()
print(head.neurones.get_weights())
##THINK
print("#################### THINKING ############################################")
print(head.think(test_input))
##LEARN
print(head.neurones.trainable_variables)
print("#################### LEARNING ############################################")
memory = [np.array([[1.0,1.0,10.0,10.0]]),np.array([2.0])]
head.learn(memory)
|
[
"tensorflow.keras.layers.Dense",
"random.uniform",
"numpy.expand_dims",
"numpy.random.randint",
"numpy.array",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.Sequential"
] |
[((284, 302), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (300, 302), False, 'from tensorflow import keras\n'), ((941, 982), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (962, 982), False, 'from tensorflow import keras\n'), ((1545, 1567), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (1553, 1567), True, 'import numpy as np\n'), ((603, 659), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['brain_spec[-1]'], {'activation': '"""softmax"""'}), "(brain_spec[-1], activation='softmax')\n", (621, 659), False, 'from tensorflow import keras\n'), ((2561, 2595), 'numpy.array', 'np.array', (['[[1.0, 1.0, 10.0, 10.0]]'], {}), '([[1.0, 1.0, 10.0, 10.0]])\n', (2569, 2595), True, 'import numpy as np\n'), ((2593, 2608), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (2601, 2608), True, 'import numpy as np\n'), ((471, 561), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['brain_spec[i + 1]'], {'activation': '"""elu"""', 'input_shape': '(brain_spec[i],)'}), "(brain_spec[i + 1], activation='elu', input_shape=(\n brain_spec[i],))\n", (489, 561), False, 'from tensorflow import keras\n'), ((1279, 1317), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'layer.shape'}), '(2, size=layer.shape)\n', (1296, 1317), True, 'import numpy as np\n'), ((1230, 1283), 'random.uniform', 'rd.uniform', (['(-1 * mutation_factor)', '(1 * mutation_factor)'], {}), '(-1 * mutation_factor, 1 * mutation_factor)\n', (1240, 1283), True, 'import random as rd\n'), ((1039, 1064), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1053, 1064), True, 'import numpy as np\n')]
|
import time
from abc import abstractmethod
from typing import List
from alfi.models import LFM
import torch
import numpy as np
import gpytorch
from torch.utils.data.dataloader import DataLoader
from alfi.utilities.torch import is_cuda
from alfi.datasets import LFMDataset
class Trainer:
"""
An abstract LFM trainer. Subclasses must implement the `single_epoch` function.
Parameters
----------
lfm: The Latent Force Model.
optimizers: list of `torch.optim.Optimizer`s. For when natural gradients are used for variational models.
dataset: Dataset where t_observed (D, T), m_observed (J, T).
give_output: whether the trainers should give the first output (y_0) as initial value to the model `forward()`
track_parameters: the keys into `named_parameters()` of parameters that the trainer should track. The
tracked parameters can be accessed from `parameter_trace`
train_mask: boolean mask
"""
def __init__(self,
lfm: LFM,
optimizers: List[torch.optim.Optimizer],
dataset: LFMDataset,
batch_size=1,
give_output=False,
track_parameters=None,
train_mask=None,
checkpoint_dir=None):
self.lfm = lfm
self.num_epochs = 0
self.optimizers = optimizers
self.use_natural_gradient = len(self.optimizers) > 1
self.batch_size = batch_size
self.data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
self.losses = None
self.give_output = give_output
self.train_mask = train_mask
self.checkpoint_dir = checkpoint_dir
self.parameter_trace = None
if track_parameters is not None:
named_params = dict(lfm.named_parameters())
self.parameter_trace = {key: [named_params[key].detach()] for key in track_parameters}
def train(self, epochs=20, report_interval=1, reporter_callback=None, **kwargs):
"""
Parameters:
reporter_callback: function called every report_interval
"""
self.lfm.train()
losses = list()
times = list()
end_epoch = self.num_epochs+epochs
for epoch in range(epochs):
epoch_loss, split_loss = self.single_epoch(epoch=self.num_epochs, **kwargs)
t = time.time()
times.append((t, epoch_loss))
if (epoch % report_interval) == 0:
if reporter_callback is not None:
reporter_callback(self.num_epochs)
print('Epoch %03d/%03d - Loss: %.2f (' % (
self.num_epochs + 1, end_epoch, epoch_loss), end='')
print(' '.join(map(lambda l: '%.2f' % l, split_loss)), end='')
if isinstance(self.lfm, gpytorch.models.GP):
kernel = self.lfm.covar_module
print(f') λ: {str(kernel.lengthscale.view(-1).detach().numpy())}', end='')
elif hasattr(self.lfm, 'gp_model'):
print(f') kernel: {self.lfm.summarise_gp_hyp()}', end='')
else:
print(')', end='')
self.print_extra()
if self.checkpoint_dir is not None:
self.lfm.save(self.checkpoint_dir / f'epoch{epoch}')
losses.append(split_loss)
self.after_epoch()
self.num_epochs += 1
losses = torch.tensor(losses).cpu().numpy()
if self.losses is None:
self.losses = np.empty((0, losses.shape[1]))
self.losses = np.concatenate([self.losses, losses], axis=0)
return times
@abstractmethod
def single_epoch(self, epoch=0, **kwargs):
raise NotImplementedError
def set_optimizers(self, optimizers):
self.optimizers = optimizers
def print_extra(self):
print('')
def after_epoch(self):
if self.parameter_trace is not None:
params = dict(self.lfm.named_parameters())
for key in params:
if key in self.parameter_trace:
self.parameter_trace[key].append(params[key].detach().clone())
|
[
"numpy.concatenate",
"numpy.empty",
"time.time",
"torch.utils.data.dataloader.DataLoader",
"torch.tensor"
] |
[((1499, 1556), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=batch_size, shuffle=False)\n', (1509, 1556), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3636, 3681), 'numpy.concatenate', 'np.concatenate', (['[self.losses, losses]'], {'axis': '(0)'}), '([self.losses, losses], axis=0)\n', (3650, 3681), True, 'import numpy as np\n'), ((2393, 2404), 'time.time', 'time.time', ([], {}), '()\n', (2402, 2404), False, 'import time\n'), ((3583, 3613), 'numpy.empty', 'np.empty', (['(0, losses.shape[1])'], {}), '((0, losses.shape[1]))\n', (3591, 3613), True, 'import numpy as np\n'), ((3490, 3510), 'torch.tensor', 'torch.tensor', (['losses'], {}), '(losses)\n', (3502, 3510), False, 'import torch\n')]
|
import os
import torch
import pickle
import numpy as np
from lib import inteutil
from lib import posematcher
from lib.models import networkinte
from tqdm import tqdm
from TorchSUL import Model as M
from collections import defaultdict
if __name__=='__main__':
## step 1: match the poses
print('Matching poses from two branches...')
matcher = posematcher.PoseMatcher(top_down_path='./mupots/pred/',
btm_up_path='./mupots/MUPOTS_Preds_btmup_transformed.pkl')
matcher.match(pts_out_path='./mupots/pred_bu/', dep_out_path='./mupots/pred_dep_bu/',
gt_dep_path='./mupots/depths/')
## step 2: infer the integrated results
print('Inferring the integrated poses...')
# create data loader
data = inteutil.InteDataset(bu_path='./mupots/pred_bu/', bu_dep_path='./mupots/pred_dep_bu/',
td_path='./mupots/pred/', td_dep_path='./mupots/pred_dep/')
# initialize the network
net = networkinte.IntegrationNet()
pts_dumb = torch.zeros(2, 102)
dep_dumb = torch.zeros(2, 2)
net(pts_dumb, dep_dumb)
M.Saver(net).restore('./ckpts/model_inte/')
net.cuda()
# create paths
if not os.path.exists('./mupots/pred_inte/'):
os.makedirs('./mupots/pred_inte/')
if not os.path.exists('./mupots/pred_dep_inte/'):
os.makedirs('./mupots/pred_dep_inte/')
with torch.no_grad():
all_pts = defaultdict(list)
for src_pts,src_dep,vid_inst in tqdm(data):
src_pts = torch.from_numpy(src_pts).cuda()
src_dep = torch.from_numpy(src_dep).cuda()
res_pts, res_dep = net(src_pts, src_dep)
res_pts = res_pts.cpu().numpy()
res_dep = res_dep.squeeze().cpu().numpy() * 1000 # the depth is scaled 1000
# save results
i,j = vid_inst
all_pts[i].insert(j, res_pts)
pickle.dump(res_dep, open('./mupots/pred_dep_inte/%02d_%02d.pkl'%(i,j), 'wb'))
for k in all_pts:
result = np.stack(all_pts[k], axis=1)
pickle.dump(result, open('./mupots/pred_inte/%d.pkl'%(k+1), 'wb'))
|
[
"numpy.stack",
"tqdm.tqdm",
"torch.from_numpy",
"os.makedirs",
"lib.models.networkinte.IntegrationNet",
"os.path.exists",
"lib.posematcher.PoseMatcher",
"collections.defaultdict",
"TorchSUL.Model.Saver",
"torch.zeros",
"torch.no_grad",
"lib.inteutil.InteDataset"
] |
[((354, 473), 'lib.posematcher.PoseMatcher', 'posematcher.PoseMatcher', ([], {'top_down_path': '"""./mupots/pred/"""', 'btm_up_path': '"""./mupots/MUPOTS_Preds_btmup_transformed.pkl"""'}), "(top_down_path='./mupots/pred/', btm_up_path=\n './mupots/MUPOTS_Preds_btmup_transformed.pkl')\n", (377, 473), False, 'from lib import posematcher\n'), ((720, 876), 'lib.inteutil.InteDataset', 'inteutil.InteDataset', ([], {'bu_path': '"""./mupots/pred_bu/"""', 'bu_dep_path': '"""./mupots/pred_dep_bu/"""', 'td_path': '"""./mupots/pred/"""', 'td_dep_path': '"""./mupots/pred_dep/"""'}), "(bu_path='./mupots/pred_bu/', bu_dep_path=\n './mupots/pred_dep_bu/', td_path='./mupots/pred/', td_dep_path=\n './mupots/pred_dep/')\n", (740, 876), False, 'from lib import inteutil\n'), ((908, 936), 'lib.models.networkinte.IntegrationNet', 'networkinte.IntegrationNet', ([], {}), '()\n', (934, 936), False, 'from lib.models import networkinte\n'), ((949, 968), 'torch.zeros', 'torch.zeros', (['(2)', '(102)'], {}), '(2, 102)\n', (960, 968), False, 'import torch\n'), ((981, 998), 'torch.zeros', 'torch.zeros', (['(2)', '(2)'], {}), '(2, 2)\n', (992, 998), False, 'import torch\n'), ((1107, 1144), 'os.path.exists', 'os.path.exists', (['"""./mupots/pred_inte/"""'], {}), "('./mupots/pred_inte/')\n", (1121, 1144), False, 'import os\n'), ((1148, 1182), 'os.makedirs', 'os.makedirs', (['"""./mupots/pred_inte/"""'], {}), "('./mupots/pred_inte/')\n", (1159, 1182), False, 'import os\n'), ((1191, 1232), 'os.path.exists', 'os.path.exists', (['"""./mupots/pred_dep_inte/"""'], {}), "('./mupots/pred_dep_inte/')\n", (1205, 1232), False, 'import os\n'), ((1236, 1274), 'os.makedirs', 'os.makedirs', (['"""./mupots/pred_dep_inte/"""'], {}), "('./mupots/pred_dep_inte/')\n", (1247, 1274), False, 'import os\n'), ((1282, 1297), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1295, 1297), False, 'import torch\n'), ((1311, 1328), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1322, 1328), False, 'from collections import defaultdict\n'), ((1363, 1373), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (1367, 1373), False, 'from tqdm import tqdm\n'), ((1025, 1037), 'TorchSUL.Model.Saver', 'M.Saver', (['net'], {}), '(net)\n', (1032, 1037), True, 'from TorchSUL import Model as M\n'), ((1812, 1840), 'numpy.stack', 'np.stack', (['all_pts[k]'], {'axis': '(1)'}), '(all_pts[k], axis=1)\n', (1820, 1840), True, 'import numpy as np\n'), ((1388, 1413), 'torch.from_numpy', 'torch.from_numpy', (['src_pts'], {}), '(src_pts)\n', (1404, 1413), False, 'import torch\n'), ((1434, 1459), 'torch.from_numpy', 'torch.from_numpy', (['src_dep'], {}), '(src_dep)\n', (1450, 1459), False, 'import torch\n')]
|
import numpy as np
from PIL import Image
from src.data.rand_augment import RandAugmentMC
import torchvision.transforms as transforms
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border)], mode='reflect')
class RandomPadandCrop(object):
"""Crop randomly the image.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, width=4, output_size=None):
self.width = width
if output_size is None:
self.output_size = output_size
# assert isinstance(output_size, (int, tuple))
elif isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, x):
old_h, old_w = x.size[:2]
x = np.transpose(x, (2, 0, 1))
x = pad(x, self.width)
h, w = x.shape[1:]
if self.output_size is None:
new_h, new_w = old_h, old_w
else:
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
x = x[:, top: top + new_h, left: left + new_w]
return Image.fromarray(np.transpose(x, (1, 2, 0)))
# TODO Implement TransformKTimes
class TransformTwice:
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return out1, out2
class TransformFix(object):
def __init__(self, base_transform):
self.weak = base_transform
# Inserting strong augmentation
self.strong = []
for transform in base_transform.transforms:
if isinstance(transform, transforms.ToTensor):
self.strong.append(RandAugmentMC(n=2, m=10))
self.strong.append(transform)
self.strong = transforms.Compose(self.strong)
def __call__(self, inp):
weak = self.weak(inp)
strong = self.strong(inp)
return weak, strong
def build_transforms(normalize=None, center_crop=None, image_size=None,
random_crop=None, flip=None, random_resize_crop=None):
"""
Args:
normalize (tuple or transforms.Normalize): Parameters for data normalization.
center_crop (int): Size for center crop.
image_size (int): Size for image size.
random_crop (int): Size for image random crop.
flip (bool): Randomly flip the data horizontally.
random_resize_crop (dict): Random resize crop the image.
Returns:
Transforms
"""
transform_ = []
if image_size:
if isinstance(image_size, int):
image_size = (image_size, image_size)
transform_.append(transforms.Resize(image_size))
if random_resize_crop:
transform_.append(transforms.RandomResizedCrop(random_resize_crop['size'], random_resize_crop['scale']))
elif random_crop:
transform_.append(transforms.RandomCrop(random_crop))
elif center_crop:
transform_.append(transforms.CenterCrop(center_crop))
if flip:
transform_.append(transforms.RandomHorizontalFlip())
transform_.append(transforms.ToTensor())
if normalize:
if isinstance(normalize, transforms.Normalize):
transform_.append(normalize)
else:
transform_.append(transforms.Normalize(*normalize))
transform = transforms.Compose(transform_)
return transform
|
[
"numpy.pad",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Normalize",
"numpy.transpose",
"torchvision.transforms.RandomResizedCrop",
"numpy.random.randint",
"torchvision.transforms.Compose",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Resize",
"src.data.rand_augment.RandAugmentMC",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ToTensor"
] |
[((168, 239), 'numpy.pad', 'np.pad', (['x', '[(0, 0), (border, border), (border, border)]'], {'mode': '"""reflect"""'}), "(x, [(0, 0), (border, border), (border, border)], mode='reflect')\n", (174, 239), True, 'import numpy as np\n'), ((3549, 3579), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_'], {}), '(transform_)\n', (3567, 3579), True, 'import torchvision.transforms as transforms\n'), ((905, 931), 'numpy.transpose', 'np.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (917, 931), True, 'import numpy as np\n'), ((1141, 1172), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - new_h)'], {}), '(0, h - new_h)\n', (1158, 1172), True, 'import numpy as np\n'), ((1188, 1219), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - new_w)'], {}), '(0, w - new_w)\n', (1205, 1219), True, 'import numpy as np\n'), ((1996, 2027), 'torchvision.transforms.Compose', 'transforms.Compose', (['self.strong'], {}), '(self.strong)\n', (2014, 2027), True, 'import torchvision.transforms as transforms\n'), ((3316, 3337), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3335, 3337), True, 'import torchvision.transforms as transforms\n'), ((1308, 1334), 'numpy.transpose', 'np.transpose', (['x', '(1, 2, 0)'], {}), '(x, (1, 2, 0))\n', (1320, 1334), True, 'import numpy as np\n'), ((2878, 2907), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_size'], {}), '(image_size)\n', (2895, 2907), True, 'import torchvision.transforms as transforms\n'), ((2963, 3053), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (["random_resize_crop['size']", "random_resize_crop['scale']"], {}), "(random_resize_crop['size'], random_resize_crop\n ['scale'])\n", (2991, 3053), True, 'import torchvision.transforms as transforms\n'), ((3258, 3291), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3289, 3291), True, 'import torchvision.transforms as transforms\n'), ((3098, 3132), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['random_crop'], {}), '(random_crop)\n', (3119, 3132), True, 'import torchvision.transforms as transforms\n'), ((3499, 3531), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['*normalize'], {}), '(*normalize)\n', (3519, 3531), True, 'import torchvision.transforms as transforms\n'), ((1906, 1930), 'src.data.rand_augment.RandAugmentMC', 'RandAugmentMC', ([], {'n': '(2)', 'm': '(10)'}), '(n=2, m=10)\n', (1919, 1930), False, 'from src.data.rand_augment import RandAugmentMC\n'), ((3182, 3216), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['center_crop'], {}), '(center_crop)\n', (3203, 3216), True, 'import torchvision.transforms as transforms\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 14:13:10 2018
@author: antony
"""
import numpy as np
import pandas as pd
import sys
import matplotlib
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import libplot
import matplotlib.gridspec as gridspec
# http://arep.med.harvard.edu/N-Regulation/Tolonen2006/GSEA/index.html
class ExtGSEA(object):
def __init__(self, ranked_gene_list, ranked_score, permutations=1000, w=1):
self.__w = w
self.__np = permutations
l = len(ranked_gene_list)
rk = np.concatenate((ranked_gene_list, ranked_gene_list))
rsc = np.concatenate((ranked_score, -ranked_score), axis=0)
ix = np.argsort(rsc)[::-1]
print(np.sort(rsc)[::-1])
pn = np.concatenate((np.ones(l), -np.ones(l)), axis=0)
self.__rk = ranked_gene_list
self.__rs = ranked_score
self.__rkc = rk[ix]
self.__rsc = rsc[ix]
self.__pn = pn[ix]
# Defaults if nothing found
self.__es = -1
self.__nes = -1
self.__pv = -1
self.__ledge = []
self.__bg = {}
self.__gsn1 = 'n1'
self.__gsn2 = 'n2'
self.__run = False
def enrichment_score(self, gs1):
l = len(self.__rk)
hits = np.zeros(l)
for i in range(0, l):
if self.__rk[i] in gs1:
hits[i] = 1
# Compute ES
if self.__w != 1:
score_hit = np.cumsum(np.abs(self.__rs * hits) ** self.__w)
else:
score_hit = np.cumsum(np.abs(self.__rs * hits))
score_hit = score_hit / score_hit[-1]
score_miss = np.cumsum(1 - hits)
score_miss = score_miss / score_miss[-1]
es_all = score_hit - score_miss
es = np.max(es_all) + np.min(es_all)
isen = np.zeros(l)
if es < 0:
ixpk = np.where(es_all == np.min(es_all))[0][0]
isen[ixpk:] = 1
ledge = self.__rk[(isen == 1) & (hits == 1)]
ledge = ledge[::-1]
else:
ixpk = np.where(es_all == np.max(es_all))[0][0]
print(ixpk)
isen[0:(ixpk + 1)] = 1
ledge = self.__rk[(isen == 1) & (hits == 1)]
return es, es_all, hits, ledge
def ext_gsea(self, gs1, gs2, name1='Gene set 1', name2='Gene set 2'):
self.__gs1 = gs1
self.__gs2 = gs2
self.__gsn1 = name1
self.__gsn2 = name2
l = len(self.__rk)
self.__hits1 = np.zeros(l)
self.__hits2 = np.zeros(l)
for i in range(0, l):
if self.__rk[i] in gs1:
self.__hits1[i] = 1
if self.__rk[i] in gs2:
self.__hits2[i] = 1
l = len(self.__rkc)
self.__isgs = np.zeros(l)
for i in range(0, l):
if (self.__pn[i] > 0 and self.__rkc[i] in gs1) or (self.__pn[i] < 0 and self.__rkc[i] in gs2):
self.__isgs[i] = 1
# Compute ES
if self.__w != 1:
self.__score_hit = np.cumsum(np.abs(self.__rsc * self.__isgs) ** self.__w)
else:
self.__score_hit = np.cumsum(np.abs(self.__rsc * self.__isgs))
self.__score_hit = self.__score_hit / self.__score_hit[-1]
self.__score_miss = np.cumsum(1 - self.__isgs)
self.__score_miss = self.__score_miss / self.__score_miss[-1]
self.__es_all = self.__score_hit - self.__score_miss
self.__es = np.max(self.__es_all) + np.min(self.__es_all)
isen = np.zeros(l)
if self.__es < 0:
ixpk = np.where(self.__es_all == np.min(self.__es_all))[0][0]
isen[ixpk:] = 1
self.__ledge = self.__rkc[(isen == 1) & (self.__isgs == 1)]
self.__ledge = self.__ledge[::-1]
else:
ixpk = np.where(self.__es_all == np.max(self.__es_all))[0][0]
isen[0:(ixpk + 1)] = 1
self.__ledge = self.__rkc[(isen == 1) & (self.__isgs == 1)]
if self.__np > 0:
self.__bg['es'] = np.zeros(self.__np)
for i in range(0, self.__np):
self.__bg['isgs'] = self.__isgs[np.random.permutation(l)];
if self.__w != 1:
self.__bg['hit'] = np.cumsum((np.abs(self.__rsc * self.__bg['isgs'])) ** self.__w)
else:
self.__bg['hit'] = np.cumsum(np.abs(self.__rsc * self.__bg['isgs']))
self.__bg['hit'] = self.__bg['hit'] / self.__bg['hit'][-1]
self.__bg['miss'] = np.cumsum(1 - self.__bg['isgs']);
self.__bg['miss'] = self.__bg['miss'] / self.__bg['miss'][-1]
self.__bg['all'] = self.__bg['hit'] - self.__bg['miss'];
self.__bg['es'][i] = max(self.__bg['all']) + min(self.__bg['all']);
if self.__es < 0:
self.__pv = np.sum(self.__bg['es'] <= self.__es) / self.__np
self.__nes = self.__es / np.abs(np.mean(self.__bg['es'][self.__bg['es'] < 0]))
else:
self.__pv = np.sum(self.__bg['es'] >= self.__es) / self.__np
self.__nes = self.__es / np.abs(np.mean(self.__bg['es'][self.__bg['es'] > 0]))
else:
self.__pv = -1
self.__nes = -1
self.__run = True
return self.__es, self.__nes, self.__pv, self.__ledge
@property
def bg(self):
return self.__bg
@property
def score_hit(self):
return self.__score_hit
@property
def isgs(self):
return self.__isgs
@property
def es(self):
return self.__es
@property
def es_all(self):
return self.__es_all
@property
def score_miss(self):
return self.__score_miss
def plot(self, title=None, out=None):
"""
Replot existing GSEA plot to make it better for publications
"""
if not self.__run:
return
libplot.setup()
# output truetype
#plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})
# in most case, we will have mangy plots, so do not display plots
# It's also convinient to run this script on command line.
fig = libplot.new_base_fig(w=10, h=7)
# GSEA Plots
gs = gridspec.GridSpec(16, 1)
x = np.array(list(range(0, len(self.__rk))))
es1, es_all1, hits1, ledge1 = self.enrichment_score(self.__gs1)
es2, es_all2, hits2, ledge2 = self.enrichment_score(self.__gs2)
# Ranked Metric Scores Plot
ix = list(range(0, len(x), 100))
print(ix)
x1 = x[ix]
y1 = self.__rs[ix]
print(hits1)
ax1 = fig.add_subplot(gs[10:])
ax1.fill_between(x1, y1=y1, y2=0, color='#2c5aa0')
ax1.set_ylabel("Ranked list metric", fontsize=14)
ax1.text(.05, .9, self.__gsn1, color='black', horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes)
ax1.text(.95, .05, self.__gsn2, color='red', horizontalalignment='right', verticalalignment='bottom',
transform=ax1.transAxes)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.set_xlim((0, len(x)))
#
# Hits
#
# gene hits
ax2 = fig.add_subplot(gs[8:9], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
ax2.vlines(np.where(hits1 == 1)[0], 0, 1, linewidth=.5, transform=trans2, color ='black')
libplot.invisible_axes(ax2)
ax3 = fig.add_subplot(gs[9:10], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans3 = transforms.blended_transform_factory(ax3.transData, ax3.transAxes)
ax3.vlines(np.where(hits2 == 1)[0], 0, 1, linewidth=.5,transform=trans3, color ='red')
libplot.invisible_axes(ax3)
#
# Enrichment score plot
#
ax4 = fig.add_subplot(gs[:8], sharex=ax1)
# max es
y2 = np.max(es_all1)
x1 = np.where(es_all1 == y2)[0]
print(x1, y2)
ax4.vlines(x1, 0, y2, linewidth=.5, color='grey')
y2 = np.min(es_all2)
x1 = np.where(es_all2 == y2)[0]
print(x1, y2)
ax4.vlines(x1, 0, y2, linewidth=.5, color='grey')
y1 = es_all1
y2 = es_all2
ax4.plot(x, y1, linewidth=3, color ='black')
ax4.plot(x, y2, linewidth=3, color ='red')
ax4.tick_params(axis='both', which='both', color='dimgray')
#ax4.spines['left'].set_color('dimgray')
ax4.spines['bottom'].set_visible(False) #set_color('dimgray')
# the y coords of this transformation are data, and the x coord are axes
trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
ax4.set_xlim(min(x), max(x))
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')
ax4.locator_params(axis='y', nbins=5)
# FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.
ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )
if title is not None:
fig.suptitle(title)
fig.tight_layout(pad=2) #rect=[o, o, w, w])
if out is not None:
plt.savefig(out, dpi=600)
|
[
"numpy.abs",
"numpy.sum",
"numpy.ones",
"numpy.argsort",
"numpy.mean",
"libplot.setup",
"numpy.cumsum",
"numpy.max",
"matplotlib.transforms.blended_transform_factory",
"libplot.new_base_fig",
"numpy.sort",
"libplot.invisible_axes",
"numpy.min",
"numpy.random.permutation",
"numpy.concatenate",
"numpy.zeros",
"numpy.where",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.savefig"
] |
[((643, 695), 'numpy.concatenate', 'np.concatenate', (['(ranked_gene_list, ranked_gene_list)'], {}), '((ranked_gene_list, ranked_gene_list))\n', (657, 695), True, 'import numpy as np\n'), ((710, 763), 'numpy.concatenate', 'np.concatenate', (['(ranked_score, -ranked_score)'], {'axis': '(0)'}), '((ranked_score, -ranked_score), axis=0)\n', (724, 763), True, 'import numpy as np\n'), ((1446, 1457), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (1454, 1457), True, 'import numpy as np\n'), ((1848, 1867), 'numpy.cumsum', 'np.cumsum', (['(1 - hits)'], {}), '(1 - hits)\n', (1857, 1867), True, 'import numpy as np\n'), ((2035, 2046), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2043, 2046), True, 'import numpy as np\n'), ((2751, 2762), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2759, 2762), True, 'import numpy as np\n'), ((2786, 2797), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2794, 2797), True, 'import numpy as np\n'), ((3071, 3082), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (3079, 3082), True, 'import numpy as np\n'), ((3647, 3673), 'numpy.cumsum', 'np.cumsum', (['(1 - self.__isgs)'], {}), '(1 - self.__isgs)\n', (3656, 3673), True, 'import numpy as np\n'), ((3904, 3915), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (3912, 3915), True, 'import numpy as np\n'), ((6479, 6494), 'libplot.setup', 'libplot.setup', ([], {}), '()\n', (6492, 6494), False, 'import libplot\n'), ((6761, 6792), 'libplot.new_base_fig', 'libplot.new_base_fig', ([], {'w': '(10)', 'h': '(7)'}), '(w=10, h=7)\n', (6781, 6792), False, 'import libplot\n'), ((6836, 6860), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(16)', '(1)'], {}), '(16, 1)\n', (6853, 6860), True, 'import matplotlib.gridspec as gridspec\n'), ((8139, 8205), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax2.transData', 'ax2.transAxes'], {}), '(ax2.transData, ax2.transAxes)\n', (8175, 8205), True, 'import matplotlib.transforms as transforms\n'), ((8312, 8339), 'libplot.invisible_axes', 'libplot.invisible_axes', (['ax2'], {}), '(ax2)\n', (8334, 8339), False, 'import libplot\n'), ((8508, 8574), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax3.transData', 'ax3.transAxes'], {}), '(ax3.transData, ax3.transAxes)\n', (8544, 8574), True, 'import matplotlib.transforms as transforms\n'), ((8678, 8705), 'libplot.invisible_axes', 'libplot.invisible_axes', (['ax3'], {}), '(ax3)\n', (8700, 8705), False, 'import libplot\n'), ((8874, 8889), 'numpy.max', 'np.max', (['es_all1'], {}), '(es_all1)\n', (8880, 8889), True, 'import numpy as np\n'), ((9032, 9047), 'numpy.min', 'np.min', (['es_all2'], {}), '(es_all2)\n', (9038, 9047), True, 'import numpy as np\n'), ((9662, 9728), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax4.transAxes', 'ax4.transData'], {}), '(ax4.transAxes, ax4.transData)\n', (9698, 9728), True, 'import matplotlib.transforms as transforms\n'), ((777, 792), 'numpy.argsort', 'np.argsort', (['rsc'], {}), '(rsc)\n', (787, 792), True, 'import numpy as np\n'), ((1979, 1993), 'numpy.max', 'np.max', (['es_all'], {}), '(es_all)\n', (1985, 1993), True, 'import numpy as np\n'), ((1996, 2010), 'numpy.min', 'np.min', (['es_all'], {}), '(es_all)\n', (2002, 2010), True, 'import numpy as np\n'), ((3834, 3855), 'numpy.max', 'np.max', (['self.__es_all'], {}), '(self.__es_all)\n', (3840, 3855), True, 'import numpy as np\n'), ((3858, 3879), 'numpy.min', 'np.min', (['self.__es_all'], {}), '(self.__es_all)\n', (3864, 3879), True, 'import numpy as np\n'), ((4435, 4454), 'numpy.zeros', 'np.zeros', (['self.__np'], {}), '(self.__np)\n', (4443, 4454), True, 'import numpy as np\n'), ((8903, 8926), 'numpy.where', 'np.where', (['(es_all1 == y2)'], {}), '(es_all1 == y2)\n', (8911, 8926), True, 'import numpy as np\n'), ((9061, 9084), 'numpy.where', 'np.where', (['(es_all2 == y2)'], {}), '(es_all2 == y2)\n', (9069, 9084), True, 'import numpy as np\n'), ((10608, 10633), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out'], {'dpi': '(600)'}), '(out, dpi=600)\n', (10619, 10633), True, 'import matplotlib.pyplot as plt\n'), ((822, 834), 'numpy.sort', 'np.sort', (['rsc'], {}), '(rsc)\n', (829, 834), True, 'import numpy as np\n'), ((880, 890), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (887, 890), True, 'import numpy as np\n'), ((1738, 1762), 'numpy.abs', 'np.abs', (['(self.__rs * hits)'], {}), '(self.__rs * hits)\n', (1744, 1762), True, 'import numpy as np\n'), ((3501, 3533), 'numpy.abs', 'np.abs', (['(self.__rsc * self.__isgs)'], {}), '(self.__rsc * self.__isgs)\n', (3507, 3533), True, 'import numpy as np\n'), ((4985, 5017), 'numpy.cumsum', 'np.cumsum', (["(1 - self.__bg['isgs'])"], {}), "(1 - self.__bg['isgs'])\n", (4994, 5017), True, 'import numpy as np\n'), ((8225, 8245), 'numpy.where', 'np.where', (['(hits1 == 1)'], {}), '(hits1 == 1)\n', (8233, 8245), True, 'import numpy as np\n'), ((8594, 8614), 'numpy.where', 'np.where', (['(hits2 == 1)'], {}), '(hits2 == 1)\n', (8602, 8614), True, 'import numpy as np\n'), ((893, 903), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (900, 903), True, 'import numpy as np\n'), ((1652, 1676), 'numpy.abs', 'np.abs', (['(self.__rs * hits)'], {}), '(self.__rs * hits)\n', (1658, 1676), True, 'import numpy as np\n'), ((3400, 3432), 'numpy.abs', 'np.abs', (['(self.__rsc * self.__isgs)'], {}), '(self.__rsc * self.__isgs)\n', (3406, 3432), True, 'import numpy as np\n'), ((4559, 4583), 'numpy.random.permutation', 'np.random.permutation', (['l'], {}), '(l)\n', (4580, 4583), True, 'import numpy as np\n'), ((5314, 5350), 'numpy.sum', 'np.sum', (["(self.__bg['es'] <= self.__es)"], {}), "(self.__bg['es'] <= self.__es)\n", (5320, 5350), True, 'import numpy as np\n'), ((5505, 5541), 'numpy.sum', 'np.sum', (["(self.__bg['es'] >= self.__es)"], {}), "(self.__bg['es'] >= self.__es)\n", (5511, 5541), True, 'import numpy as np\n'), ((4813, 4851), 'numpy.abs', 'np.abs', (["(self.__rsc * self.__bg['isgs'])"], {}), "(self.__rsc * self.__bg['isgs'])\n", (4819, 4851), True, 'import numpy as np\n'), ((5411, 5456), 'numpy.mean', 'np.mean', (["self.__bg['es'][self.__bg['es'] < 0]"], {}), "(self.__bg['es'][self.__bg['es'] < 0])\n", (5418, 5456), True, 'import numpy as np\n'), ((5602, 5647), 'numpy.mean', 'np.mean', (["self.__bg['es'][self.__bg['es'] > 0]"], {}), "(self.__bg['es'][self.__bg['es'] > 0])\n", (5609, 5647), True, 'import numpy as np\n'), ((2113, 2127), 'numpy.min', 'np.min', (['es_all'], {}), '(es_all)\n', (2119, 2127), True, 'import numpy as np\n'), ((2304, 2318), 'numpy.max', 'np.max', (['es_all'], {}), '(es_all)\n', (2310, 2318), True, 'import numpy as np\n'), ((3996, 4017), 'numpy.min', 'np.min', (['self.__es_all'], {}), '(self.__es_all)\n', (4002, 4017), True, 'import numpy as np\n'), ((4230, 4251), 'numpy.max', 'np.max', (['self.__es_all'], {}), '(self.__es_all)\n', (4236, 4251), True, 'import numpy as np\n'), ((4689, 4727), 'numpy.abs', 'np.abs', (["(self.__rsc * self.__bg['isgs'])"], {}), "(self.__rsc * self.__bg['isgs'])\n", (4695, 4727), True, 'import numpy as np\n')]
|
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler
import numpy as np
import torch
import os
from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter
from model_test import MSE_test_GPapprox, MSE_test
from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader
from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen
from validation import validate
def hensman_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, latent_dim, covar_module0,
covar_module1, likelihoods, m, H, zt_list, P, T, varying_T, Q, weight, id_covariate, loss_function,
natural_gradient=False, natural_gradient_lr=0.01, subjects_per_batch=20, memory_dbg=False,
eps=1e-6, results_path=None, validation_dataset=None, generation_dataset=None,
prediction_dataset=None, gp_model=None, csv_file_test_data=None, csv_file_test_label=None,
test_mask_file=None, data_source_path=None):
"""
Perform training with minibatching and Stochastic Variational Inference [Hensman et. al, 2013]. See L-VAE supplementary
materials
:param nnet_model: encoder/decoder neural network model
:param type_nnet: type of encoder/decoder
:param epochs: numner of epochs
:param dataset: dataset to use in training
:param optimiser: optimiser to be used
:param type_KL: type of KL divergenve computation to use
:param num_samples: number of samples to use
:param latent_dim: number of latent dimensions
:param covar_module0: additive kernel (sum of cross-covariances) without id covariate
:param covar_module1: additive kernel (sum of cross-covariances) with id covariate
:param likelihoods: GPyTorch likelihood model
:param m: variational mean
:param H: variational variance
:param zt_list: list of inducing points
:param P: number of unique instances
:param T: number of longitudinal samples per individual
:param Q: number of covariates
:param weight: value for the weight
:param id_covariate: covariate number of the id
:param loss_function: selected loss function
:param natural_gradient: use of natural gradients
:param natural_gradient_lr: natural gradients learning rate
:param subject_per_batch; number of subjects per batch (vectorisation)
:param memory_dbg: enable debugging
:param eps: jitter
:param results_path: path to results
:param validation_dataset: dataset for vaildation set
:param generation_dataset: dataset to help with sample image generation
:param prediction_dataset; dataset with subjects for prediction
:param gp_mode: GPyTorch gp model
:param csv_file_test_data: path to test data
:param csv_file_test_label: path to test label
:param test_mask_file: path to test mask
:param data_source_path: path to data source
:return trained models and resulting losses
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
N = len(dataset)
assert type_KL == 'GPapprox_closed'
if varying_T:
n_batches = (P + subjects_per_batch - 1)//subjects_per_batch
dataloader = HensmanDataLoader(dataset, batch_sampler=VaryingLengthBatchSampler(VaryingLengthSubjectSampler(dataset, id_covariate), subjects_per_batch), num_workers=4)
else:
batch_size = subjects_per_batch*T
n_batches = (P*T + batch_size - 1)//(batch_size)
dataloader = HensmanDataLoader(dataset, batch_sampler=BatchSampler(SubjectSampler(dataset, P, T), batch_size, drop_last=False), num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
kld_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
best_val_pred_mse = np.Inf
best_epoch = 0
for epoch in range(1, epochs + 1):
recon_loss_sum = 0
nll_loss_sum = 0
kld_loss_sum = 0
net_loss_sum = 0
iid_kld_sum = 0
for batch_idx, sample_batched in enumerate(dataloader):
optimiser.zero_grad()
nnet_model.train()
covar_module0.train()
covar_module1.train()
indices = sample_batched['idx']
data = sample_batched['digit'].double().to(device)
train_x = sample_batched['label'].double().to(device)
mask = sample_batched['mask'].double().to(device)
N_batch = data.shape[0]
covariates = torch.cat((train_x[:, :id_covariate], train_x[:, id_covariate+1:]), dim=1)
recon_batch, mu, log_var = nnet_model(data)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
PSD_H = H if natural_gradient else torch.matmul(H, H.transpose(-1, -2))
if varying_T:
P_in_current_batch = torch.unique(train_x[:, id_covariate]).shape[0]
kld_loss, grad_m, grad_H = minibatch_KLD_upper_bound_iter(covar_module0, covar_module1, likelihoods, latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P, P_in_current_batch, N, natural_gradient, id_covariate, eps)
else:
P_in_current_batch = N_batch // T
kld_loss, grad_m, grad_H = minibatch_KLD_upper_bound(covar_module0, covar_module1, likelihoods, latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P, P_in_current_batch, T, natural_gradient, eps)
recon_loss = recon_loss * P/P_in_current_batch
nll_loss = nll_loss * P/P_in_current_batch
if loss_function == 'nll':
net_loss = nll_loss + kld_loss
elif loss_function == 'mse':
kld_loss = kld_loss / latent_dim
net_loss = recon_loss + weight * kld_loss
net_loss.backward()
optimiser.step()
if natural_gradient:
LH = torch.cholesky(H)
iH = torch.cholesky_solve(torch.eye(H.shape[-1], dtype=torch.double).to(device), LH)
iH_new = iH + natural_gradient_lr*(grad_H + grad_H.transpose(-1,-2))
LiH_new = torch.cholesky(iH_new)
H = torch.cholesky_solve(torch.eye(H.shape[-1], dtype=torch.double).to(device), LiH_new).detach()
m = torch.matmul(H, torch.matmul(iH, m) - natural_gradient_lr*(grad_m - 2*torch.matmul(grad_H, m))).detach()
net_loss_sum += net_loss.item() / n_batches
recon_loss_sum += recon_loss.item() / n_batches
nll_loss_sum += nll_loss.item() / n_batches
kld_loss_sum += kld_loss.item() / n_batches
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss_sum, kld_loss_sum, nll_loss_sum, recon_loss_sum), flush=True)
penalty_term_arr = np.append(penalty_term_arr, 0.0)
net_train_loss_arr = np.append(net_train_loss_arr, net_loss_sum)
recon_loss_arr = np.append(recon_loss_arr, recon_loss_sum)
nll_loss_arr = np.append(nll_loss_arr, nll_loss_sum)
kld_loss_arr = np.append(kld_loss_arr, kld_loss_sum)
if (not epoch % 25) and epoch != epochs:
with torch.no_grad():
nnet_model.eval()
covar_module0.eval()
covar_module1.eval()
if validation_dataset is not None:
full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double).to(device)
prediction_x = torch.zeros(len(dataset), Q, dtype=torch.double).to(device)
for batch_idx, sample_batched in enumerate(dataloader):
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat((prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate+1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
val_pred_mse = validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, full_mu, prediction_x, id_covariate, loss_function, eps=1e-6)
if val_pred_mse < best_val_pred_mse:
best_val_pred_mse = val_pred_mse
best_epoch = epoch
prediction_dataloader = DataLoader(prediction_dataset, batch_sampler=VaryingLengthBatchSampler(
VaryingLengthSubjectSampler(prediction_dataset, id_covariate), subjects_per_batch),
num_workers=4)
full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(device)
prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)
with torch.no_grad():
for batch_idx, sample_batched in enumerate(prediction_dataloader):
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat(
(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate + 1:]),
dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
covar_module0.eval()
covar_module1.eval()
if type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
MSE_test_GPapprox(csv_file_test_data, csv_file_test_label, test_mask_file,
data_source_path, type_nnet,
nnet_model, covar_module0, covar_module1, likelihoods, results_path,
latent_dim, prediction_x,
full_mu, zt_list, P, T, id_covariate, varying_T,
save_file='result_error_best.csv')
print('Saving better model')
try:
torch.save(nnet_model.state_dict(), os.path.join(results_path, 'nnet_model_best.pth'))
torch.save(gp_model.state_dict(), os.path.join(results_path, 'gp_model_best.pth'))
torch.save(zt_list, os.path.join(results_path, 'zt_list_best.pth'))
torch.save(m, os.path.join(results_path, 'm_best.pth'))
torch.save(H, os.path.join(results_path, 'H_best.pth'))
if results_path and generation_dataset:
prediction_dataloader = DataLoader(prediction_dataset,
batch_sampler=VaryingLengthBatchSampler(
VaryingLengthSubjectSampler(prediction_dataset,
id_covariate),
subjects_per_batch), num_workers=4)
full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(
device)
prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)
for batch_idx, sample_batched in enumerate(prediction_dataloader):
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat((prediction_x[label_id, :id_covariate],
prediction_x[label_id, id_covariate + 1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
recon_complete_gen(generation_dataset, nnet_model, type_nnet,
results_path, covar_module0,
covar_module1, likelihoods, latent_dim,
'./data', prediction_x, full_mu, epoch,
zt_list, P, T, id_covariate, varying_T)
except e:
print(e)
print('Saving intermediate model failed!')
pass
if torch.cuda.is_available():
torch.cuda.empty_cache()
return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, kld_loss_arr, m, H, best_epoch
def minibatch_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, latent_dim,
covar_module0, covar_module1, likelihoods, zt_list, P, T, Q, weight, id_covariate,
loss_function, memory_dbg=False, eps=1e-6, results_path=None, validation_dataset=None,
generation_dataset=None, prediction_dataset=None):
"""
Perform training with minibatching (psuedo-minibatching) similar to GPPVAE [Casale el. al, 2018]. See L-VAE supplementary
materials
:param nnet_model: encoder/decoder neural network model
:param type_nnet: type of encoder/decoder
:param epochs: numner of epochs
:param dataset: dataset to use in training
:param optimiser: optimiser to be used
:param type_KL: type of KL divergenve computation to use
:param num_samples: number of samples to use
:param latent_dim: number of latent dimensions
:param covar_module0: additive kernel (sum of cross-covariances) without id covariate
:param covar_module1: additive kernel (sum of cross-covariances) with id covariate
:param likelihoods: GPyTorch likelihood model
:param zt_list: list of inducing points
:param P: number of unique instances
:param T: number of longitudinal samples per individual
:param Q: number of covariates
:param weight: value for the weight
:param id_covariate: covariate number of the id
:param loss_function: selected loss function
:param memory_dbg: enable debugging
:param eps: jitter
:param results_path: path to results
:param validation_dataset: dataset for vaildation set
:param generation_dataset: dataset to help with sample image generation
:param prediction_dataset; dataset with subjects for prediction
:return trained models and resulting losses
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = T
assert (type_KL == 'GPapprox_closed' or type_KL == 'GPapprox')
# set up Data Loader for training
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
gp_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
for epoch in range(1, epochs + 1):
optimiser.zero_grad()
full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
full_log_var = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
train_x = torch.zeros(len(dataset), Q, dtype=torch.double, requires_grad=False).to(device)
#Step 1: Encode the sample data to obtain \bar{\mu} and diag(W)
with torch.no_grad():
for batch_idx, sample_batched in enumerate(dataloader):
indices = sample_batched['idx']
data = sample_batched['digit'].double().to(device)
train_x[indices] = sample_batched['label'].double().to(device)
covariates = torch.cat((train_x[indices, :id_covariate], train_x[indices, id_covariate+1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[indices] = mu
full_log_var[indices] = log_var
mu_grads = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
log_var_grads = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
gp_losses = 0
gp_loss_sum = 0
param_list = []
#Steps 2 & 3: compute d and E, compute gradients of KLD w.r.t S and theta
if type_KL == 'GPapprox':
for sample in range(0, num_samples):
Z = nnet_model.sample_latent(full_mu, full_log_var)
for i in range(0, latent_dim):
Z_dim = Z[:, i]
gp_loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], train_x, Z_dim,
zt_list[i].to(device), P, T, eps)
gp_loss_sum = gp_loss.item() + gp_loss_sum
gp_losses = gp_losses + gp_loss
gp_losses = gp_losses / num_samples
gp_loss_sum /= num_samples
elif type_KL == 'GPapprox_closed':
for i in range(0, latent_dim):
mu_sliced = full_mu[:, i]
log_var_sliced = full_log_var[:, i]
gp_loss = deviance_upper_bound(covar_module0[i], covar_module1[i],
likelihoods[i], train_x,
mu_sliced, log_var_sliced,
zt_list[i].to(device), P,
T, eps)
gp_loss_sum = gp_loss.item() + gp_loss_sum
gp_losses = gp_losses + gp_loss
for i in range(0, latent_dim):
param_list += list(covar_module0[i].parameters())
param_list += list(covar_module1[i].parameters())
# param_list.append(zt_list[i])
if loss_function == 'mse':
gp_losses = weight*gp_losses/latent_dim
gp_loss_sum /= latent_dim
mu_grads = torch.autograd.grad(gp_losses, full_mu, retain_graph=True)[0]
log_var_grads = torch.autograd.grad(gp_losses, full_log_var, retain_graph=True)[0]
grads = torch.autograd.grad(gp_losses, param_list)
for ind, p in enumerate(param_list):
p.grad = grads[ind]
recon_loss_sum = 0
nll_loss_sum = 0
#Step 4: compute reconstruction losses w.r.t phi and psi, add dKLD/dphi to the gradients
for batch_idx, sample_batched in enumerate(dataloader):
data = sample_batched['digit'].double().to(device)
mask = sample_batched['mask'].double().to(device)
indices = sample_batched['idx']
label = sample_batched['label'].double().to(device)
covariates = torch.cat((label[:, :id_covariate], label[:, id_covariate+1:]), dim=1)
recon_batch, mu, log_var = nnet_model(data)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll = torch.sum(nll)
mu.backward(mu_grads[indices], retain_graph = True)
log_var.backward(log_var_grads[indices], retain_graph = True)
if loss_function == 'mse':
recon_loss.backward()
elif loss_function == 'nll':
nll.backward()
recon_loss_sum = recon_loss_sum + recon_loss.item()
nll_loss_sum = nll_loss_sum + nll.item()
#Do logging
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, recon_loss_sum + weight*gp_loss_sum, gp_loss_sum, nll_loss_sum, recon_loss_sum))
penalty_term_arr = np.append(penalty_term_arr, 0.0)
net_train_loss_arr = np.append(net_train_loss_arr, recon_loss_sum + weight*gp_loss_sum)
nll_loss_arr = np.append(nll_loss_arr, nll_loss_sum)
recon_loss_arr = np.append(recon_loss_arr, recon_loss_sum)
gp_loss_arr = np.append(gp_loss_arr, gp_loss_sum)
#Step 5: apply gradients using an Adam optimiser
optimiser.step()
if (not epoch % 100) and epoch != epochs:
if validation_dataset is not None:
validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, full_mu, train_x, id_covariate, loss_function, eps=1e-6)
if torch.cuda.is_available():
torch.cuda.empty_cache()
if results_path and generation_dataset:
prediction_dataloader = DataLoader(prediction_dataset, batch_size=1000, shuffle=False, num_workers=4)
full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(device)
prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)
with torch.no_grad():
for batch_idx, sample_batched in enumerate(prediction_dataloader):
# no mini-batching. Instead get a batch of dataset size
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat((prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate+1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
recon_complete_gen(generation_dataset, nnet_model, type_nnet,
results_path, covar_module0,
covar_module1, likelihoods, latent_dim,
'./data', prediction_x, full_mu, epoch,
zt_list, P, T, id_covariate)
return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, gp_loss_arr
def standard_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples,
latent_dim, covar_modules, likelihoods, zt_list, id_covariate, P, T, Q, weight, constrain_scales,
loss_function, memory_dbg=False, eps=1e-6, validation_dataset=None, generation_dataset=None, prediction_dataset=None):
"""
Perform training without minibatching.
:param nnet_model: encoder/decoder neural network model
:param type_nnet: type of encoder/decoder
:param epochs: numner of epochs
:param dataset: dataset to use in training
:param optimiser: optimiser to be used
:param type_KL: type of KL divergenve computation to use
:param num_samples: number of samples to use
:param latent_dim: number of latent dimensions
:param covar_modules: additive kernel (sum of cross-covariances)
:param likelihoods: GPyTorch likelihood model
:param zt_list: list of inducing points
:param id_covariate: covariate number of the id
:param P: number of unique instances
:param T: number of longitudinal samples per individual
:param Q: number of covariates
:param weight: value for the weight
:param constrain_scales: boolean to constrain scales to 1
:param loss_function: selected loss function
:param memory_dbg: enable debugging
:param eps: jitter
:param validation_dataset: dataset for vaildation set
:param generation_dataset: dataset to help with sample image generation
:param prediction_dataset; dataset with subjects for prediction
:return trained models and resulting losses
"""
if type_KL == 'closed':
covar_module = covar_modules[0]
elif type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
covar_module0 = covar_modules[0]
covar_module1 = covar_modules[1]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set up Data Loader for training
dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
gp_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
for epoch in range(1, epochs + 1):
for batch_idx, sample_batched in enumerate(dataloader):
# no mini-batching. Instead get a batch of dataset size.
optimiser.zero_grad() # clear gradients
label_id = sample_batched['idx']
label = sample_batched['label']
data = sample_batched['digit']
data = data.double().to(device)
mask = sample_batched['mask']
mask = mask.to(device)
train_x = label.double().to(device)
covariates = torch.cat((train_x[:, :id_covariate], train_x[:, id_covariate+1:]), dim=1)
# encode data
recon_batch, mu, log_var = nnet_model(data)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
gp_loss_avg = torch.tensor([0.0]).to(device)
net_loss = torch.tensor([0.0]).to(device)
penalty_term = torch.tensor([0.0]).to(device)
for sample_iter in range(0, num_samples):
# Iterate over specified number of samples. Default: num_samples = 1.
Z = nnet_model.sample_latent(mu, log_var)
gp_loss = torch.tensor([0.0]).to(device)
for i in range(0, latent_dim):
Z_dim = Z[:, i].view(-1).type(torch.DoubleTensor).to(device)
if type_KL == 'closed':
# Closed-form KL divergence formula
kld1 = KL_closed(covar_module[i], train_x, likelihoods[i], data, mu[:, i], log_var[:, i])
gp_loss = gp_loss + kld1
elif type_KL == 'conj_gradient':
# GPyTorch default: use modified batch conjugate gradients
# See: https://arxiv.org/abs/1809.11165
gp_models[i].set_train_data(train_x.to(device), Z_dim.to(device))
gp_loss = gp_loss - mlls[i](gp_models[i](train_x.to(device)), Z_dim)
elif type_KL == 'GPapprox':
# Our proposed efficient approximate GP inference scheme
# See: http://arxiv.org/abs/2006.09763
loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], train_x, Z_dim,
zt_list[i].to(device), P, T, eps)
gp_loss = gp_loss + loss
elif type_KL == 'GPapprox_closed':
# A variant of our proposed efficient approximate GP inference scheme.
# The key difference with GPapprox is the direct use of the variational mean and variance,
# instead of a sample from Z. We can call this a deviance upper bound.
# See the L-VAE supplement for more details: http://arxiv.org/abs/2006.09763
loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], train_x,
mu[:, i].view(-1), log_var[:, i].view(-1), zt_list[i].to(device), P,
T, eps)
gp_loss = gp_loss + loss
if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
if loss_function == 'mse':
gp_loss_avg = gp_loss_avg + (gp_loss / latent_dim)
elif loss_function == 'nll':
gp_loss_avg = gp_loss_avg + gp_loss
elif type_KL == 'conj_gradient':
if loss_function == 'mse':
gp_loss = gp_loss * data.shape[0] / latent_dim
elif loss_function == 'nll':
gp_loss = gp_loss * data.shape[0]
gp_loss_avg = gp_loss_avg + gp_loss
if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
gp_loss_avg = gp_loss_avg / num_samples
if loss_function == 'mse':
net_loss = recon_loss + weight * gp_loss_avg
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg
elif type_KL == 'conj_gradient':
gp_loss_avg = gp_loss_avg / num_samples
penalty_term = -0.5 * log_var.sum() / latent_dim
if loss_function == 'mse':
net_loss = recon_loss + weight * (gp_loss_avg + penalty_term)
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg + penalty_term
net_loss.backward()
if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss.item(), gp_loss_avg.item(), nll_loss.item(), recon_loss.item()))
elif type_KL == 'conj_gradient':
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - Penalty: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss.item(), gp_loss_avg.item(), penalty_term.item(), nll_loss.item(), recon_loss.item()))
penalty_term_arr = np.append(penalty_term_arr, penalty_term.cpu().item())
net_train_loss_arr = np.append(net_train_loss_arr, net_loss.cpu().item())
recon_loss_arr = np.append(recon_loss_arr, recon_loss.cpu().item())
nll_loss_arr = np.append(nll_loss_arr, nll_loss.cpu().item())
gp_loss_arr = np.append(gp_loss_arr, gp_loss_avg.cpu().item())
optimiser.step()
if constrain_scales:
for i in range(0, latent_dim):
likelihoods[i].noise = torch.tensor([1], dtype=torch.float).to(device)
if (not epoch % 100) and epoch != epochs:
if validation_dataset is not None:
standard_validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, mu, train_x, id_covariate, loss_function, eps=1e-6)
if torch.cuda.is_available():
torch.cuda.empty_cache()
return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, gp_loss_arr
def variational_inference_optimization(nnet_model, type_nnet, epochs, dataset, prediction_dataset, optimiser,
latent_dim, covar_module0, covar_module1, likelihoods, zt_list, P, T, Q, weight, constrain_scales,
id_covariate, loss_function, memory_dbg=False, eps=1e-6, results_path=None, save_path=None, gp_model_folder=None,
generation_dataset=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set up Data Loader for training
dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
gp_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
for batch_idx, sample_batched in enumerate(dataloader):
label_id = sample_batched['idx']
label = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
mask = sample_batched['mask'].double().to(device)
covariates = torch.cat((label[:, :id_covariate], label[:, id_covariate+1:]), dim=1)
# encode data
mu, log_var = nnet_model.encode(data)
mu = torch.nn.Parameter(mu.clone().detach(), requires_grad=True)
log_var = torch.nn.Parameter(log_var.clone().detach(), requires_grad=True)
try:
mu = torch.load(os.path.join(gp_model_folder, 'mu.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
log_var = torch.load(os.path.join(gp_model_foder, 'log_var.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
except:
pass
optimiser.add_param_group({'params': mu})
optimiser.add_param_group({'params': log_var})
for epoch in range(1, epochs + 1):
optimiser.zero_grad()
Z = nnet_model.sample_latent(mu, log_var)
recon_batch = nnet_model.decode(Z)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
gp_loss_avg = torch.tensor([0.0]).to(device)
net_loss = torch.tensor([0.0]).to(device)
penalty_term = torch.tensor([0.0]).to(device)
for i in range(0, latent_dim):
loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], label,
mu[:, i].view(-1), log_var[:, i].view(-1), zt_list[i].to(device), P,
T, eps)
gp_loss_avg = gp_loss_avg + loss / latent_dim
if loss_function == 'mse':
net_loss = recon_loss + weight * gp_loss_avg
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg
net_loss.backward()
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss.item(), gp_loss_avg.item(), nll_loss.item(), recon_loss.item()),
flush=True)
penalty_term_arr = np.append(penalty_term_arr, penalty_term.cpu().item())
net_train_loss_arr = np.append(net_train_loss_arr, net_loss.cpu().item())
recon_loss_arr = np.append(recon_loss_arr, recon_loss.cpu().item())
nll_loss_arr = np.append(nll_loss_arr, nll_loss.cpu().item())
gp_loss_arr = np.append(gp_loss_arr, gp_loss_avg.cpu().item())
optimiser.step()
if not epoch % 100:
sv_pth = os.path.join(save_path, 'recon_' + str(epoch) + '.pdf')
gen_rotated_mnist_plot(data[1920:2080].cpu().detach(), recon_batch[1920:2080].cpu().detach(), label[1920:2080].cpu().detach(), seq_length=20, num_sets=8, save_file=sv_pth)
torch.save(nnet_model.state_dict(), os.path.join(save_path, 'final-vae_model.pth'))
torch.save(mu, os.path.join(save_path, 'mu.pth'))
torch.save(log_var, os.path.join(save_path, 'log_var.pth'))
for i in range(0, latent_dim):
torch.save(covar_module0[i].state_dict(), os.path.join(save_path, 'cov_module0_' + str(i) + '.pth'))
torch.save(covar_module1[i].state_dict(), os.path.join(save_path, 'cov_module1_' + str(i) + '.pth'))
prediction_dataloader = DataLoader(prediction_dataset, batch_size=len(prediction_dataset), shuffle=False, num_workers=1)
for batch_idx, sample_batched in enumerate(prediction_dataloader):
label_pred = sample_batched['label'].double().to(device)
data_pred = sample_batched['digit'].double().to(device)
mask_pred = sample_batched['mask'].double().to(device)
covariates = torch.cat((label_pred[:, :id_covariate], label_pred[:, id_covariate+1:]), dim=1)
# encode data
mu_pred, log_var_pred = nnet_model.encode(data_pred)
break
try:
mu_pred = torch.load(os.path.join(gp_model_folder, 'mu_pred.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
log_var_pred = torch.load(os.path.join(gp_model_folder, 'log_var_pred.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
except:
pass
mu_pred = torch.nn.Parameter(mu_pred.clone().detach(), requires_grad=True)
log_var_pred = torch.nn.Parameter(log_var_pred.clone().detach(), requires_grad=True)
adam_param_list = []
adam_param_list.append({'params': mu_pred})
adam_param_list.append({'params': log_var_pred})
optimiser_pred = torch.optim.Adam(adam_param_list, lr=1e-3)
for epoch in range(1, 1001):
optimiser_pred.zero_grad()
Z = nnet_model.sample_latent(mu_pred, log_var_pred)
recon_batch = nnet_model.decode(Z)
[recon_loss, nll] = nnet_model.loss_function(recon_batch,
data_pred,
mask_pred)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
gp_loss_avg = torch.tensor([0.0]).to(device)
prediction_mu = torch.cat((mu_pred, mu), dim=0)
prediction_log_var = torch.cat((log_var_pred, log_var), dim=0)
prediction_x = torch.cat((label_pred, label), dim=0)
for i in range(0, latent_dim):
loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], prediction_x,
prediction_mu[:, i].view(-1), prediction_log_var[:, i].view(-1),
zt_list[i].to(device), P+8, T, eps)
gp_loss_avg = gp_loss_avg + loss / latent_dim
if loss_function == 'mse':
net_loss = recon_loss + weight * gp_loss_avg
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg
net_loss.backward()
print('Iter %d/1000 - Total Loss: %.3f - GP Loss: %.3f - Recon Loss: %.3f' % (
epoch, net_loss.item(), gp_loss_avg.item(), recon_loss.item()),
flush=True)
optimiser_pred.step()
torch.save(mu_pred, os.path.join(save_path, 'mu_pred.pth'))
torch.save(log_var_pred, os.path.join(save_path, 'log_var_pred.pth'))
l = [i*20 + k for i in range(0,8) for k in range(0,5)]
prediction_x = torch.cat((label_pred[l],
label))
prediction_mu = torch.cat((mu_pred[l],
mu))
if generation_dataset:
variational_complete_gen(generation_dataset, nnet_model, type_nnet,
results_path, covar_module0,
covar_module1, likelihoods, latent_dim,
'./data', prediction_x, prediction_mu, 'final',
zt_list, P, T, id_covariate)
exit(0)
|
[
"elbo_functions.minibatch_KLD_upper_bound_iter",
"torch.eye",
"torch.autograd.grad",
"numpy.empty",
"torch.cat",
"model_test.MSE_test_GPapprox",
"predict_HealthMNIST.variational_complete_gen",
"torch.device",
"torch.no_grad",
"os.path.join",
"predict_HealthMNIST.recon_complete_gen",
"torch.utils.data.DataLoader",
"numpy.append",
"torch.matmul",
"elbo_functions.minibatch_KLD_upper_bound",
"torch.unique",
"elbo_functions.KL_closed",
"torch.optim.Adam",
"torch.cuda.is_available",
"torch.sum",
"utils.SubjectSampler",
"validation.validate",
"utils.VaryingLengthSubjectSampler",
"torch.cholesky",
"torch.cuda.empty_cache",
"torch.tensor"
] |
[((3853, 3869), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3861, 3869), True, 'import numpy as np\n'), ((3891, 3907), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3899, 3907), True, 'import numpy as np\n'), ((3927, 3943), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3935, 3943), True, 'import numpy as np\n'), ((3963, 3979), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3971, 3979), True, 'import numpy as np\n'), ((4003, 4019), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (4011, 4019), True, 'import numpy as np\n'), ((15921, 15993), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n', (15931, 15993), False, 'from torch.utils.data import DataLoader\n'), ((16024, 16040), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16032, 16040), True, 'import numpy as np\n'), ((16062, 16078), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16070, 16078), True, 'import numpy as np\n'), ((16098, 16114), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16106, 16114), True, 'import numpy as np\n'), ((16133, 16149), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16141, 16149), True, 'import numpy as np\n'), ((16173, 16189), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16181, 16189), True, 'import numpy as np\n'), ((25318, 25334), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25326, 25334), True, 'import numpy as np\n'), ((25356, 25372), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25364, 25372), True, 'import numpy as np\n'), ((25392, 25408), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25400, 25408), True, 'import numpy as np\n'), ((25427, 25443), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25435, 25443), True, 'import numpy as np\n'), ((25467, 25483), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25475, 25483), True, 'import numpy as np\n'), ((32624, 32640), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32632, 32640), True, 'import numpy as np\n'), ((32662, 32678), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32670, 32678), True, 'import numpy as np\n'), ((32698, 32714), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32706, 32714), True, 'import numpy as np\n'), ((32733, 32749), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32741, 32749), True, 'import numpy as np\n'), ((32773, 32789), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32781, 32789), True, 'import numpy as np\n'), ((37476, 37519), 'torch.optim.Adam', 'torch.optim.Adam', (['adam_param_list'], {'lr': '(0.001)'}), '(adam_param_list, lr=0.001)\n', (37492, 37519), False, 'import torch\n'), ((39240, 39273), 'torch.cat', 'torch.cat', (['(label_pred[l], label)'], {}), '((label_pred[l], label))\n', (39249, 39273), False, 'import torch\n'), ((39325, 39352), 'torch.cat', 'torch.cat', (['(mu_pred[l], mu)'], {}), '((mu_pred[l], mu))\n', (39334, 39352), False, 'import torch\n'), ((7154, 7186), 'numpy.append', 'np.append', (['penalty_term_arr', '(0.0)'], {}), '(penalty_term_arr, 0.0)\n', (7163, 7186), True, 'import numpy as np\n'), ((7216, 7259), 'numpy.append', 'np.append', (['net_train_loss_arr', 'net_loss_sum'], {}), '(net_train_loss_arr, net_loss_sum)\n', (7225, 7259), True, 'import numpy as np\n'), ((7286, 7327), 'numpy.append', 'np.append', (['recon_loss_arr', 'recon_loss_sum'], {}), '(recon_loss_arr, recon_loss_sum)\n', (7295, 7327), True, 'import numpy as np\n'), ((7351, 7388), 'numpy.append', 'np.append', (['nll_loss_arr', 'nll_loss_sum'], {}), '(nll_loss_arr, nll_loss_sum)\n', (7360, 7388), True, 'import numpy as np\n'), ((7412, 7449), 'numpy.append', 'np.append', (['kld_loss_arr', 'kld_loss_sum'], {}), '(kld_loss_arr, kld_loss_sum)\n', (7421, 7449), True, 'import numpy as np\n'), ((19373, 19415), 'torch.autograd.grad', 'torch.autograd.grad', (['gp_losses', 'param_list'], {}), '(gp_losses, param_list)\n', (19392, 19415), False, 'import torch\n'), ((20941, 20973), 'numpy.append', 'np.append', (['penalty_term_arr', '(0.0)'], {}), '(penalty_term_arr, 0.0)\n', (20950, 20973), True, 'import numpy as np\n'), ((21003, 21071), 'numpy.append', 'np.append', (['net_train_loss_arr', '(recon_loss_sum + weight * gp_loss_sum)'], {}), '(net_train_loss_arr, recon_loss_sum + weight * gp_loss_sum)\n', (21012, 21071), True, 'import numpy as np\n'), ((21094, 21131), 'numpy.append', 'np.append', (['nll_loss_arr', 'nll_loss_sum'], {}), '(nll_loss_arr, nll_loss_sum)\n', (21103, 21131), True, 'import numpy as np\n'), ((21157, 21198), 'numpy.append', 'np.append', (['recon_loss_arr', 'recon_loss_sum'], {}), '(recon_loss_arr, recon_loss_sum)\n', (21166, 21198), True, 'import numpy as np\n'), ((21221, 21256), 'numpy.append', 'np.append', (['gp_loss_arr', 'gp_loss_sum'], {}), '(gp_loss_arr, gp_loss_sum)\n', (21230, 21256), True, 'import numpy as np\n'), ((33091, 33163), 'torch.cat', 'torch.cat', (['(label[:, :id_covariate], label[:, id_covariate + 1:])'], {'dim': '(1)'}), '((label[:, :id_covariate], label[:, id_covariate + 1:]), dim=1)\n', (33100, 33163), False, 'import torch\n'), ((34066, 34087), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (34075, 34087), False, 'import torch\n'), ((34107, 34121), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (34116, 34121), False, 'import torch\n'), ((35806, 35852), 'os.path.join', 'os.path.join', (['save_path', '"""final-vae_model.pth"""'], {}), "(save_path, 'final-vae_model.pth')\n", (35818, 35852), False, 'import os\n'), ((35873, 35906), 'os.path.join', 'os.path.join', (['save_path', '"""mu.pth"""'], {}), "(save_path, 'mu.pth')\n", (35885, 35906), False, 'import os\n'), ((35932, 35970), 'os.path.join', 'os.path.join', (['save_path', '"""log_var.pth"""'], {}), "(save_path, 'log_var.pth')\n", (35944, 35970), False, 'import os\n'), ((36635, 36721), 'torch.cat', 'torch.cat', (['(label_pred[:, :id_covariate], label_pred[:, id_covariate + 1:])'], {'dim': '(1)'}), '((label_pred[:, :id_covariate], label_pred[:, id_covariate + 1:]),\n dim=1)\n', (36644, 36721), False, 'import torch\n'), ((37908, 37929), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (37917, 37929), False, 'import torch\n'), ((37949, 37963), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (37958, 37963), False, 'import torch\n'), ((38043, 38074), 'torch.cat', 'torch.cat', (['(mu_pred, mu)'], {'dim': '(0)'}), '((mu_pred, mu), dim=0)\n', (38052, 38074), False, 'import torch\n'), ((38104, 38145), 'torch.cat', 'torch.cat', (['(log_var_pred, log_var)'], {'dim': '(0)'}), '((log_var_pred, log_var), dim=0)\n', (38113, 38145), False, 'import torch\n'), ((38169, 38206), 'torch.cat', 'torch.cat', (['(label_pred, label)'], {'dim': '(0)'}), '((label_pred, label), dim=0)\n', (38178, 38206), False, 'import torch\n'), ((39047, 39085), 'os.path.join', 'os.path.join', (['save_path', '"""mu_pred.pth"""'], {}), "(save_path, 'mu_pred.pth')\n", (39059, 39085), False, 'import os\n'), ((39116, 39159), 'os.path.join', 'os.path.join', (['save_path', '"""log_var_pred.pth"""'], {}), "(save_path, 'log_var_pred.pth')\n", (39128, 39159), False, 'import os\n'), ((39420, 39646), 'predict_HealthMNIST.variational_complete_gen', 'variational_complete_gen', (['generation_dataset', 'nnet_model', 'type_nnet', 'results_path', 'covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', '"""./data"""', 'prediction_x', 'prediction_mu', '"""final"""', 'zt_list', 'P', 'T', 'id_covariate'], {}), "(generation_dataset, nnet_model, type_nnet,\n results_path, covar_module0, covar_module1, likelihoods, latent_dim,\n './data', prediction_x, prediction_mu, 'final', zt_list, P, T, id_covariate\n )\n", (39444, 39646), False, 'from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\n'), ((3200, 3225), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3223, 3225), False, 'import torch\n'), ((4729, 4805), 'torch.cat', 'torch.cat', (['(train_x[:, :id_covariate], train_x[:, id_covariate + 1:])'], {'dim': '(1)'}), '((train_x[:, :id_covariate], train_x[:, id_covariate + 1:]), dim=1)\n', (4738, 4805), False, 'import torch\n'), ((4968, 4989), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (4977, 4989), False, 'import torch\n'), ((5013, 5027), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (5022, 5027), False, 'import torch\n'), ((15740, 15765), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15763, 15765), False, 'import torch\n'), ((16666, 16681), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16679, 16681), False, 'import torch\n'), ((19204, 19262), 'torch.autograd.grad', 'torch.autograd.grad', (['gp_losses', 'full_mu'], {'retain_graph': '(True)'}), '(gp_losses, full_mu, retain_graph=True)\n', (19223, 19262), False, 'import torch\n'), ((19290, 19353), 'torch.autograd.grad', 'torch.autograd.grad', (['gp_losses', 'full_log_var'], {'retain_graph': '(True)'}), '(gp_losses, full_log_var, retain_graph=True)\n', (19309, 19353), False, 'import torch\n'), ((19967, 20039), 'torch.cat', 'torch.cat', (['(label[:, :id_covariate], label[:, id_covariate + 1:])'], {'dim': '(1)'}), '((label[:, :id_covariate], label[:, id_covariate + 1:]), dim=1)\n', (19976, 20039), False, 'import torch\n'), ((20214, 20235), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (20223, 20235), False, 'import torch\n'), ((20254, 20268), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (20263, 20268), False, 'import torch\n'), ((25123, 25148), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25146, 25148), False, 'import torch\n'), ((26075, 26151), 'torch.cat', 'torch.cat', (['(train_x[:, :id_covariate], train_x[:, id_covariate + 1:])'], {'dim': '(1)'}), '((train_x[:, :id_covariate], train_x[:, id_covariate + 1:]), dim=1)\n', (26084, 26151), False, 'import torch\n'), ((26341, 26362), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (26350, 26362), False, 'import torch\n'), ((26386, 26400), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (26395, 26400), False, 'import torch\n'), ((32429, 32454), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32452, 32454), False, 'import torch\n'), ((5268, 5465), 'elbo_functions.minibatch_KLD_upper_bound_iter', 'minibatch_KLD_upper_bound_iter', (['covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', 'm', 'PSD_H', 'train_x', 'mu', 'log_var', 'zt_list', 'P', 'P_in_current_batch', 'N', 'natural_gradient', 'id_covariate', 'eps'], {}), '(covar_module0, covar_module1, likelihoods,\n latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P,\n P_in_current_batch, N, natural_gradient, id_covariate, eps)\n', (5298, 5465), False, 'from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\n'), ((5569, 5747), 'elbo_functions.minibatch_KLD_upper_bound', 'minibatch_KLD_upper_bound', (['covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', 'm', 'PSD_H', 'train_x', 'mu', 'log_var', 'zt_list', 'P', 'P_in_current_batch', 'T', 'natural_gradient', 'eps'], {}), '(covar_module0, covar_module1, likelihoods,\n latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P,\n P_in_current_batch, T, natural_gradient, eps)\n', (5594, 5747), False, 'from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\n'), ((6207, 6224), 'torch.cholesky', 'torch.cholesky', (['H'], {}), '(H)\n', (6221, 6224), False, 'import torch\n'), ((6437, 6459), 'torch.cholesky', 'torch.cholesky', (['iH_new'], {}), '(iH_new)\n', (6451, 6459), False, 'import torch\n'), ((7517, 7532), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7530, 7532), False, 'import torch\n'), ((16975, 17067), 'torch.cat', 'torch.cat', (['(train_x[indices, :id_covariate], train_x[indices, id_covariate + 1:])'], {'dim': '(1)'}), '((train_x[indices, :id_covariate], train_x[indices, id_covariate +\n 1:]), dim=1)\n', (16984, 17067), False, 'import torch\n'), ((21454, 21668), 'validation.validate', 'validate', (['nnet_model', 'type_nnet', 'validation_dataset', 'type_KL', 'num_samples', 'latent_dim', 'covar_module0', 'covar_module1', 'likelihoods', 'zt_list', 'T', 'weight', 'full_mu', 'train_x', 'id_covariate', 'loss_function'], {'eps': '(1e-06)'}), '(nnet_model, type_nnet, validation_dataset, type_KL, num_samples,\n latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T,\n weight, full_mu, train_x, id_covariate, loss_function, eps=1e-06)\n', (21462, 21668), False, 'from validation import validate\n'), ((21679, 21704), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21702, 21704), False, 'import torch\n'), ((21844, 21921), 'torch.utils.data.DataLoader', 'DataLoader', (['prediction_dataset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(prediction_dataset, batch_size=1000, shuffle=False, num_workers=4)\n', (21854, 21921), False, 'from torch.utils.data import DataLoader\n'), ((34145, 34164), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (34157, 34164), False, 'import torch\n'), ((34195, 34214), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (34207, 34214), False, 'import torch\n'), ((34249, 34268), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (34261, 34268), False, 'import torch\n'), ((37987, 38006), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (37999, 38006), False, 'import torch\n'), ((3475, 3525), 'utils.VaryingLengthSubjectSampler', 'VaryingLengthSubjectSampler', (['dataset', 'id_covariate'], {}), '(dataset, id_covariate)\n', (3502, 3525), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n'), ((3751, 3780), 'utils.SubjectSampler', 'SubjectSampler', (['dataset', 'P', 'T'], {}), '(dataset, P, T)\n', (3765, 3780), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n'), ((8469, 8688), 'validation.validate', 'validate', (['nnet_model', 'type_nnet', 'validation_dataset', 'type_KL', 'num_samples', 'latent_dim', 'covar_module0', 'covar_module1', 'likelihoods', 'zt_list', 'T', 'weight', 'full_mu', 'prediction_x', 'id_covariate', 'loss_function'], {'eps': '(1e-06)'}), '(nnet_model, type_nnet, validation_dataset, type_KL, num_samples,\n latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T,\n weight, full_mu, prediction_x, id_covariate, loss_function, eps=1e-06)\n', (8477, 8688), False, 'from validation import validate\n'), ((13657, 13682), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13680, 13682), False, 'import torch\n'), ((21726, 21750), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (21748, 21750), False, 'import torch\n'), ((22151, 22166), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22164, 22166), False, 'import torch\n'), ((22828, 23035), 'predict_HealthMNIST.recon_complete_gen', 'recon_complete_gen', (['generation_dataset', 'nnet_model', 'type_nnet', 'results_path', 'covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', '"""./data"""', 'prediction_x', 'full_mu', 'epoch', 'zt_list', 'P', 'T', 'id_covariate'], {}), "(generation_dataset, nnet_model, type_nnet, results_path,\n covar_module0, covar_module1, likelihoods, latent_dim, './data',\n prediction_x, full_mu, epoch, zt_list, P, T, id_covariate)\n", (22846, 23035), False, 'from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\n'), ((26428, 26447), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26440, 26447), False, 'import torch\n'), ((26482, 26501), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26494, 26501), False, 'import torch\n'), ((26540, 26559), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26552, 26559), False, 'import torch\n'), ((31860, 31885), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (31883, 31885), False, 'import torch\n'), ((5177, 5215), 'torch.unique', 'torch.unique', (['train_x[:, id_covariate]'], {}), '(train_x[:, id_covariate])\n', (5189, 5215), False, 'import torch\n'), ((8225, 8330), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (8234, 8330), False, 'import torch\n'), ((13708, 13732), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13730, 13732), False, 'import torch\n'), ((22597, 22702), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (22606, 22702), False, 'import torch\n'), ((26797, 26816), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26809, 26816), False, 'import torch\n'), ((27094, 27181), 'elbo_functions.KL_closed', 'KL_closed', (['covar_module[i]', 'train_x', 'likelihoods[i]', 'data', 'mu[:, i]', 'log_var[:, i]'], {}), '(covar_module[i], train_x, likelihoods[i], data, mu[:, i], log_var\n [:, i])\n', (27103, 27181), False, 'from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\n'), ((31911, 31935), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (31933, 31935), False, 'import torch\n'), ((6267, 6309), 'torch.eye', 'torch.eye', (['H.shape[-1]'], {'dtype': 'torch.double'}), '(H.shape[-1], dtype=torch.double)\n', (6276, 6309), False, 'import torch\n'), ((9398, 9413), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9411, 9413), False, 'import torch\n'), ((31453, 31489), 'torch.tensor', 'torch.tensor', (['[1]'], {'dtype': 'torch.float'}), '([1], dtype=torch.float)\n', (31465, 31489), False, 'import torch\n'), ((6610, 6629), 'torch.matmul', 'torch.matmul', (['iH', 'm'], {}), '(iH, m)\n', (6622, 6629), False, 'import torch\n'), ((9804, 9909), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (9813, 9909), False, 'import torch\n'), ((10320, 10614), 'model_test.MSE_test_GPapprox', 'MSE_test_GPapprox', (['csv_file_test_data', 'csv_file_test_label', 'test_mask_file', 'data_source_path', 'type_nnet', 'nnet_model', 'covar_module0', 'covar_module1', 'likelihoods', 'results_path', 'latent_dim', 'prediction_x', 'full_mu', 'zt_list', 'P', 'T', 'id_covariate', 'varying_T'], {'save_file': '"""result_error_best.csv"""'}), "(csv_file_test_data, csv_file_test_label, test_mask_file,\n data_source_path, type_nnet, nnet_model, covar_module0, covar_module1,\n likelihoods, results_path, latent_dim, prediction_x, full_mu, zt_list,\n P, T, id_covariate, varying_T, save_file='result_error_best.csv')\n", (10337, 10614), False, 'from model_test import MSE_test_GPapprox, MSE_test\n'), ((11000, 11049), 'os.path.join', 'os.path.join', (['results_path', '"""nnet_model_best.pth"""'], {}), "(results_path, 'nnet_model_best.pth')\n", (11012, 11049), False, 'import os\n'), ((11113, 11160), 'os.path.join', 'os.path.join', (['results_path', '"""gp_model_best.pth"""'], {}), "(results_path, 'gp_model_best.pth')\n", (11125, 11160), False, 'import os\n'), ((11210, 11256), 'os.path.join', 'os.path.join', (['results_path', '"""zt_list_best.pth"""'], {}), "(results_path, 'zt_list_best.pth')\n", (11222, 11256), False, 'import os\n'), ((11300, 11340), 'os.path.join', 'os.path.join', (['results_path', '"""m_best.pth"""'], {}), "(results_path, 'm_best.pth')\n", (11312, 11340), False, 'import os\n'), ((11384, 11424), 'os.path.join', 'os.path.join', (['results_path', '"""H_best.pth"""'], {}), "(results_path, 'H_best.pth')\n", (11396, 11424), False, 'import os\n'), ((13044, 13262), 'predict_HealthMNIST.recon_complete_gen', 'recon_complete_gen', (['generation_dataset', 'nnet_model', 'type_nnet', 'results_path', 'covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', '"""./data"""', 'prediction_x', 'full_mu', 'epoch', 'zt_list', 'P', 'T', 'id_covariate', 'varying_T'], {}), "(generation_dataset, nnet_model, type_nnet, results_path,\n covar_module0, covar_module1, likelihoods, latent_dim, './data',\n prediction_x, full_mu, epoch, zt_list, P, T, id_covariate, varying_T)\n", (13062, 13262), False, 'from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\n'), ((6501, 6543), 'torch.eye', 'torch.eye', (['H.shape[-1]'], {'dtype': 'torch.double'}), '(H.shape[-1], dtype=torch.double)\n', (6510, 6543), False, 'import torch\n'), ((8986, 9047), 'utils.VaryingLengthSubjectSampler', 'VaryingLengthSubjectSampler', (['prediction_dataset', 'id_covariate'], {}), '(prediction_dataset, id_covariate)\n', (9013, 9047), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n'), ((12716, 12821), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (12725, 12821), False, 'import torch\n'), ((33414, 33453), 'os.path.join', 'os.path.join', (['gp_model_folder', '"""mu.pth"""'], {}), "(gp_model_folder, 'mu.pth')\n", (33426, 33453), False, 'import os\n'), ((33560, 33603), 'os.path.join', 'os.path.join', (['gp_model_foder', '"""log_var.pth"""'], {}), "(gp_model_foder, 'log_var.pth')\n", (33572, 33603), False, 'import os\n'), ((36852, 36896), 'os.path.join', 'os.path.join', (['gp_model_folder', '"""mu_pred.pth"""'], {}), "(gp_model_folder, 'mu_pred.pth')\n", (36864, 36896), False, 'import os\n'), ((37008, 37057), 'os.path.join', 'os.path.join', (['gp_model_folder', '"""log_var_pred.pth"""'], {}), "(gp_model_folder, 'log_var_pred.pth')\n", (37020, 37057), False, 'import os\n'), ((33468, 33488), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (33480, 33488), False, 'import torch\n'), ((33618, 33638), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (33630, 33638), False, 'import torch\n'), ((36911, 36931), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (36923, 36931), False, 'import torch\n'), ((37072, 37092), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (37084, 37092), False, 'import torch\n'), ((6664, 6687), 'torch.matmul', 'torch.matmul', (['grad_H', 'm'], {}), '(grad_H, m)\n', (6676, 6687), False, 'import torch\n'), ((11761, 11822), 'utils.VaryingLengthSubjectSampler', 'VaryingLengthSubjectSampler', (['prediction_dataset', 'id_covariate'], {}), '(prediction_dataset, id_covariate)\n', (11788, 11822), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n')]
|
"""
WebObs class
"""
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import os
import io
import wget
import requests
class WebObs(object):
"""
Class for AAVSO web observation.
fileoutput = aavso.html
filtername = vis|ccd
"""
def __init__(self, nameID, filtername='Vis', fileoutput='aavso.html'):
self.nameID = nameID
self.filter = filtername
self.fileoutput = fileoutput
self.titlename = ''
self.comment = ''
self.observation = Table()
self.html = BeautifulSoup()
self.available = False
self._period = 0
self.filter = self.isfilter(filtername)
self.read
@property
def read(self):
"""
Return html of observation
Ex: wget --no-check-certificate 'https://app.aavso.org/webobs/results/?star=' -O aavso.html
"""
if os.path.exists(self.fileoutput) :
os.remove(self.fileoutput)
if ' ' in self.nameID:
nameID = self.nameID.replace(' ','%20')
else:
nameID = self.nameID
if self.isccd:
filtername = 'ccd'
else:
filtername = 'vis'
url = 'https://app.aavso.org/webobs/results/?star=' + nameID + '&num_results=200' + '&obs_types=' + filtername
filedownload = wget.download(url,out=self.fileoutput,bar=None)
with open(filedownload) as fp:
self.html = BeautifulSoup(fp, 'html.parser')
if self.noerror == 0 :
self.available = True
self.title
self.comments
self.table
else:
self.available = False
@property
def title(self):
self.titlename = self.html.title.contents[0] + ' -- ' + self.nameID
return self.titlename
@property
def comments(self):
if self.available:
comment = self.html.find(id='obsinfo').contents[0].string
comment = comment + self.html.find(id='obsinfo').contents[1].string
comment = comment + self.html.find(id='obsinfo').contents[2].string.replace('\n \n','').replace('\n','').replace(' ',' ')
comment = comment + self.html.find(id='obsinfo').contents[3].string
comment = comment + self.html.find(id='obsinfo').contents[4].string.replace('\n \n \n \n ','')
comment = comment + self.html.find(id='obsinfo').contents[5].string
comment = comment + self.html.find(id='obsinfo').contents[6].string.replace('\n \n \n \n \n ','')
self.comment = comment
return self.comment
def isfilter(self,filtername='vis'):
"""
Return filter
"""
if filtername in ['Vis','I','R','B','V']:
f = filtername
else:
f = 'Vis'
return f
@property
def isccd(self):
"""
Return true if in ccd filter
"""
if self.filter in ['I','R','B','V']:
return True
else:
return False
@property
def data(self):
"""
Return data of html file observations
"""
data = []
if self.available:
data = self.html.table.contents[3].get_text().replace('\n','|').replace('Details...|||||||||Comp Star|Check Star|Transformed|Chart|Comment Codes|Notes|||||','').replace('|||||||||','<>').replace('|||||||','').replace('|||','').replace('| (','(').replace('| ','').split('<>')
return data
@property
def table(self):
"""
Return Table of html file observations
"""
Star = []
JD = []
Calendar_Date = []
Magnitude = []
Error = []
Filter = []
Observer = []
Comp_Star = []
Check_Star = []
Transformed = []
Chart = []
Comment_Codes = []
Notes = []
if self.available:
for ligne in self.data:
data = ligne.split('|')
if self.filter in data[5]:
Star.append(data[0])
JD.append(float(data[1]))
Calendar_Date.append(data[2])
if isinstance(data[3], int) or isinstance(data[3], float):
Magnitude.append(float(data[3]))
else:
Magnitude.append(float(data[3].replace('<','')))
Error.append(data[4])
Filter.append(data[5])
Observer.append(data[6])
Comp_Star.append(data[7])
Check_Star.append(data[8])
Transformed.append(data[9])
Chart.append(data[10])
Comment_Codes.append(data[11])
Notes.append(data[12])
if len(Star) > 0:
self.observation = Table([Star,JD,Calendar_Date,Magnitude,Error,Filter,Observer,Comp_Star,Check_Star,Transformed,Chart,Comment_Codes,Notes],
names=['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter', 'Observer', 'Comp Star', 'Check Star', 'Transformed', 'Chart', 'Comment Codes', 'Notes'])
self._period = self.observation['JD'][0] - self.observation['JD'][len(self.observation)-1]
return self.observation
@property
def period(self):
"""
Return period JD
"""
if self.observation:
return self._period
@property
def observations(self):
"""
Return observations table
"""
if self.observation:
return self.observation
@property
def JDMinMax(self):
"""
Return min and max JD in observations table
"""
if self.observation:
return self.observation['JD'][len(self.observation)-1],self.observation['JD'][0]
@property
def magnitudeMinMax(self):
"""
Return min and max of magnitude in observations table
"""
if self.observation:
return min(self.observation['Magnitude']),max(self.observation['Magnitude'])
def plot(self):
"""
Plot observations table
"""
if self.available:
jd_min,jd_max = self.JDMinMax
mv_min,mv_max = self.magnitudeMinMax
x = []
for value in self.observations:
x.append(value['JD']-jd_min)
y = self.observations['Magnitude']
mymodel = np.poly1d(np.polyfit(x, y, 5))
myline = np.linspace(0, jd_max-jd_min, 2000)
plt.xlim(-.5,round(jd_max-jd_min)+.5)
plt.ylim(round(mv_min)-0.5,round(mv_max)+0.5)
plt.gca().invert_yaxis()
plt.scatter(x, y, c = 'black', s = 2, alpha = 0.5)
plt.plot(myline, mymodel(myline))
plt.title(self.title, loc='center')
plt.xlabel(str(int(jd_min))+'\nJD', fontsize = 12)
if self.filter == 'Vis':
plt.ylabel(r'$m_v$', fontsize = 12)
else:
plt.ylabel(self.filter, fontsize = 12)
plt.show()
else:
print(self.comment)
@property
def noerror(self):
"""
Error handling
"""
error_code = 0
if 'errors' in self.html.p.get_text():
error_code = 404
self.comment = 'The star ' + self.nameID + ' cannot be found in our database.'
else:
if 'no results' in self.html.p.get_text():
error_code = 404
self.comment = 'The star ' + self.nameID + ' cannot be found in our database.'
return error_code
class datadownload(object):
"""
Class for AAVSO for data download (https://www.aavso.org/data-download).
fileinput = datadownload.csv
filtername = Vis|B|V|R|I|TG|CV
"""
def __init__(self, filtername='Vis.', fileinput='aavsodata.csv'):
self.nameID = ''
self.filter = filtername
self.fileinput = fileinput
self.titlename = ''
self.comment = ''
self.observation = Table()
self.JDline = _JDline()
self.available = False
self._period = 0
self.filter = self.isfilter(filtername)
self.read
def isfilter(self,filtername='Vis.'):
"""
Return filter
"""
if filtername in ['Vis.','I','R','B','V','CV','TG']:
f = filtername
else:
f = 'Vis.'
return f
@property
def read(self):
"""
Return table of observation
"""
self.observation = Table.read(self.fileinput, format='ascii.csv')
if len(self.observation) > 0:
self.available = True
self.title
self.period
self.comments
else:
self.available = False
def filtername(self, filtername='Vis.'):
"""
Update filter
"""
if self.available:
self.filter = self.isfilter(filtername)
@property
def Vis(self):
if self.available:
self.filter = 'Vis.'
@property
def I(self):
if self.available:
self.filter = 'I'
@property
def R(self):
if self.available:
self.filter = 'R'
@property
def V(self):
if self.available:
self.filter = 'V'
@property
def B(self):
if self.available:
self.filter = 'B'
@property
def CV(self):
if self.available:
self.filter = 'CV'
@property
def TG(self):
if self.available:
self.filter = 'TG'
@property
def period(self):
"""
Return period JD
"""
if self.available:
self._period = self.observation['JD'][len(self.observation)-1] - self.observation['JD'][0]
return self._period
@property
def title(self):
if self.available:
self.titlename = 'AAVSO -- data-download -- ' + self.observation['Star Name'][0]
return self.titlename
@property
def comments(self):
if self.available:
observers = []
for i in self.observation['Observer Code'] :
if i not in observers:
observers.append(i)
comment = 'Showing ' + str(len(self.observation)) + ' observations for ' + self.observation['Star Name'][0] + ' from ' + str(len(observers)) + ' observers'
self.comment = comment
return self.comment
@property
def observations(self):
"""
Return observations table
"""
if self.observation:
return self.observation
@property
def JDMinMax(self):
"""
Return min and max JD in observations table
"""
if self.observation:
return self.observation['JD'][0],self.observation['JD'][len(self.observation)-1]
@property
def magnitudeMinMax(self):
"""
Return min and max of magnitude in observations table
"""
if self.observation:
mv = []
for value in self.observations:
if self.filter == value['Band']:
if '<' not in value['Magnitude']:
mv.append(float(value['Magnitude']))
return min(mv),max(mv)
@property
def JulianDay(self):
"""
Return JD table
"""
return self.JDline.JulianDay
@JulianDay.setter
def JulianDay(self,JDtable):
"""
Create JD table
"""
self.JDline.JulianDay = JDtable
def plot(self):
"""
Plot observations table
"""
if self.available:
jd_min,jd_max = self.JDMinMax
mv_min,mv_max = self.magnitudeMinMax
x = []
y = []
for value in self.observations:
if self.filter == value['Band']:
if '<' not in value['Magnitude']:
x.append(value['JD'])
y.append(float(value['Magnitude']))
plt.xlim(round(jd_min)-5,round(jd_max)+5)
plt.ylim(round(mv_min)-1,round(mv_max)+1)
plt.gca().invert_yaxis()
plt.scatter(x, y, c = 'black', s = 2, alpha = 0.2)
self.JDline.plot()
plt.title(self.title, loc='center')
plt.xlabel('JD', fontsize = 12)
if self.filter == 'Vis':
plt.ylabel(r'$m_v$', fontsize = 12)
else:
plt.ylabel(self.filter, fontsize = 12)
plt.show()
class vsx(object):
"""
Class AAVSO VSX, return TABLE
"""
def __init__(self, nameID):
self.nameID = nameID
self.vsx_table = Table()
self.available = False
self.read
@property
def read(self):
"""
Return TABLE of Variable
"""
self.table
@property
def data(self):
"""
Return JSON data
Source : https://www.aavso.org/direct-web-query-vsxvsp
"""
if ' ' in self.nameID:
nameID = self.nameID.replace(' ','%20')
else:
nameID = self.nameID
url = "http://www.aavso.org/vsx/index.php"
params = {}
params['view']='api.object'
params['format']='json'
params['ident']=self.nameID
response = requests.get(url,params=params)
if (response.status_code > 400):
self.available = False
else:
self.available = True
return response.json()
@property
def table(self):
"""
Return data table
"""
result = self.data['VSXObject']
header = []
value = []
types = []
for item in result:
value.append(result[item])
header.append(item)
types.append('str')
self.vsx_table = Table(names = header, dtype = types)
self.vsx_table.add_row(value)
@property
def observations(self):
"""
Return vsx table
"""
if self.available:
return self.vsx_table
@property
def name(self):
"""
Return vsx name
"""
if self.available:
return self.vsx_table['Name'][0]
@property
def coordinates(self):
"""
Return vsx RA,DEC (degree,degree)
"""
if self.available:
return float(self.vsx_table['RA2000']), float(self.vsx_table['Declination2000'])
@property
def hourdegree(self):
"""
Return vsx RA,DEC (Hour,Degree)
"""
if self.available:
c = SkyCoord(ra=float(self.vsx_table['RA2000'])*u.degree, dec=float(self.vsx_table['Declination2000'])*u.degree)
return c.ra.hour, c.dec.degree
class _JDline(object):
"""
Class line Julian Day
"""
def __init__(self):
self.JDtable = []
@property
def JulianDay(self):
"""
Return JD table
"""
return self.JDtable
@JulianDay.setter
def JulianDay(self,JDtable):
"""
Add JD's
"""
if len(JDtable) > 0:
for number in JDtable:
self.JDtable.append(number)
else:
self.JDtable.clear()
def plot(self):
"""
Plot line of JD's
"""
plt.vlines(self.JDtable, -30,30 , linestyles = 'solid', colors = 'grey', alpha = 0.3)
|
[
"matplotlib.pyplot.title",
"os.remove",
"astropy.table.Table",
"matplotlib.pyplot.show",
"numpy.polyfit",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.scatter",
"os.path.exists",
"matplotlib.pyplot.vlines",
"wget.download",
"requests.get",
"numpy.linspace",
"bs4.BeautifulSoup",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"astropy.table.Table.read"
] |
[((647, 654), 'astropy.table.Table', 'Table', ([], {}), '()\n', (652, 654), False, 'from astropy.table import Table\n'), ((675, 690), 'bs4.BeautifulSoup', 'BeautifulSoup', ([], {}), '()\n', (688, 690), False, 'from bs4 import BeautifulSoup\n'), ((1024, 1055), 'os.path.exists', 'os.path.exists', (['self.fileoutput'], {}), '(self.fileoutput)\n', (1038, 1055), False, 'import os\n'), ((1486, 1535), 'wget.download', 'wget.download', (['url'], {'out': 'self.fileoutput', 'bar': 'None'}), '(url, out=self.fileoutput, bar=None)\n', (1499, 1535), False, 'import wget\n'), ((8417, 8424), 'astropy.table.Table', 'Table', ([], {}), '()\n', (8422, 8424), False, 'from astropy.table import Table\n'), ((8953, 8999), 'astropy.table.Table.read', 'Table.read', (['self.fileinput'], {'format': '"""ascii.csv"""'}), "(self.fileinput, format='ascii.csv')\n", (8963, 8999), False, 'from astropy.table import Table\n'), ((13314, 13321), 'astropy.table.Table', 'Table', ([], {}), '()\n', (13319, 13321), False, 'from astropy.table import Table\n'), ((13954, 13986), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (13966, 13986), False, 'import requests\n'), ((14494, 14526), 'astropy.table.Table', 'Table', ([], {'names': 'header', 'dtype': 'types'}), '(names=header, dtype=types)\n', (14499, 14526), False, 'from astropy.table import Table\n'), ((16029, 16108), 'matplotlib.pyplot.vlines', 'plt.vlines', (['self.JDtable', '(-30)', '(30)'], {'linestyles': '"""solid"""', 'colors': '"""grey"""', 'alpha': '(0.3)'}), "(self.JDtable, -30, 30, linestyles='solid', colors='grey', alpha=0.3)\n", (16039, 16108), True, 'import matplotlib.pyplot as plt\n'), ((1070, 1096), 'os.remove', 'os.remove', (['self.fileoutput'], {}), '(self.fileoutput)\n', (1079, 1096), False, 'import os\n'), ((1599, 1631), 'bs4.BeautifulSoup', 'BeautifulSoup', (['fp', '"""html.parser"""'], {}), "(fp, 'html.parser')\n", (1612, 1631), False, 'from bs4 import BeautifulSoup\n'), ((5111, 5421), 'astropy.table.Table', 'Table', (['[Star, JD, Calendar_Date, Magnitude, Error, Filter, Observer, Comp_Star,\n Check_Star, Transformed, Chart, Comment_Codes, Notes]'], {'names': "['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter', 'Observer',\n 'Comp Star', 'Check Star', 'Transformed', 'Chart', 'Comment Codes', 'Notes'\n ]"}), "([Star, JD, Calendar_Date, Magnitude, Error, Filter, Observer,\n Comp_Star, Check_Star, Transformed, Chart, Comment_Codes, Notes], names\n =['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter',\n 'Observer', 'Comp Star', 'Check Star', 'Transformed', 'Chart',\n 'Comment Codes', 'Notes'])\n", (5116, 5421), False, 'from astropy.table import Table\n'), ((6800, 6837), 'numpy.linspace', 'np.linspace', (['(0)', '(jd_max - jd_min)', '(2000)'], {}), '(0, jd_max - jd_min, 2000)\n', (6811, 6837), True, 'import numpy as np\n'), ((7006, 7050), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""black"""', 's': '(2)', 'alpha': '(0.5)'}), "(x, y, c='black', s=2, alpha=0.5)\n", (7017, 7050), True, 'import matplotlib.pyplot as plt\n'), ((7115, 7150), 'matplotlib.pyplot.title', 'plt.title', (['self.title'], {'loc': '"""center"""'}), "(self.title, loc='center')\n", (7124, 7150), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7396, 7398), True, 'import matplotlib.pyplot as plt\n'), ((12796, 12840), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""black"""', 's': '(2)', 'alpha': '(0.2)'}), "(x, y, c='black', s=2, alpha=0.2)\n", (12807, 12840), True, 'import matplotlib.pyplot as plt\n'), ((12890, 12925), 'matplotlib.pyplot.title', 'plt.title', (['self.title'], {'loc': '"""center"""'}), "(self.title, loc='center')\n", (12899, 12925), True, 'import matplotlib.pyplot as plt\n'), ((12938, 12967), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""JD"""'], {'fontsize': '(12)'}), "('JD', fontsize=12)\n", (12948, 12967), True, 'import matplotlib.pyplot as plt\n'), ((13144, 13154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13152, 13154), True, 'import matplotlib.pyplot as plt\n'), ((6758, 6777), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(5)'], {}), '(x, y, 5)\n', (6768, 6777), True, 'import numpy as np\n'), ((7267, 7299), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$m_v$"""'], {'fontsize': '(12)'}), "('$m_v$', fontsize=12)\n", (7277, 7299), True, 'import matplotlib.pyplot as plt\n'), ((7337, 7373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.filter'], {'fontsize': '(12)'}), '(self.filter, fontsize=12)\n', (7347, 7373), True, 'import matplotlib.pyplot as plt\n'), ((13023, 13055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$m_v$"""'], {'fontsize': '(12)'}), "('$m_v$', fontsize=12)\n", (13033, 13055), True, 'import matplotlib.pyplot as plt\n'), ((13093, 13129), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.filter'], {'fontsize': '(12)'}), '(self.filter, fontsize=12)\n', (13103, 13129), True, 'import matplotlib.pyplot as plt\n'), ((6969, 6978), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6976, 6978), True, 'import matplotlib.pyplot as plt\n'), ((12759, 12768), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12766, 12768), True, 'import matplotlib.pyplot as plt\n')]
|
import sys
sys.path.append('../core')
import argparse
import torch
import cv2
import numpy as np
from viz import sim3_visualization
from lietorch import SO3, SE3, Sim3
from networks.sim3_net import Sim3Net
def normalize_images(images):
images = images[:, :, [2,1,0]]
mean = torch.as_tensor([0.485, 0.456, 0.406], device=images.device)
std = torch.as_tensor([0.229, 0.224, 0.225], device=images.device)
return (images/255.0).sub_(mean[:, None, None]).div_(std[:, None, None])
def load_example(i=0):
""" get demo example """
DEPTH_SCALE = 5.0
if i==0:
image1 = cv2.imread('assets/image1.png')
image2 = cv2.imread('assets/image2.png')
depth1 = np.load('assets/depth1.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth2.npy') / DEPTH_SCALE
elif i==1:
image1 = cv2.imread('assets/image3.png')
image2 = cv2.imread('assets/image4.png')
depth1 = np.load('assets/depth3.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth4.npy') / DEPTH_SCALE
images = np.stack([image1, image2], 0)
images = torch.from_numpy(images).permute(0,3,1,2)
depths = np.stack([depth1, depth2], 0)
depths = torch.from_numpy(depths).float()
intrinsics = np.array([320.0, 320.0, 320.0, 240.0])
intrinsics = np.tile(intrinsics[None], (2,1))
intrinsics = torch.from_numpy(intrinsics).float()
return images[None].cuda(), depths[None].cuda(), intrinsics[None].cuda()
@torch.no_grad()
def demo(model, index=0):
images, depths, intrinsics = load_example(index)
# initial transformation estimate
if args.transformation == 'SE3':
Gs = SE3.Identity(1, 2, device='cuda')
elif args.transformation == 'Sim3':
Gs = Sim3.Identity(1, 2, device='cuda')
depths[:,0] *= 2**(2*torch.rand(1) - 1.0).cuda()
images1 = normalize_images(images)
ests, _ = model(Gs, images1, depths, intrinsics, num_steps=12)
# only care about last transformation
Gs = ests[-1]
T = Gs[:,0] * Gs[:,1].inv()
T = T[0].matrix().double().cpu().numpy()
sim3_visualization(T, images, depths, intrinsics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--transformation', default='SE3', help='checkpoint to restore')
parser.add_argument('--ckpt', help='checkpoint to restore')
args = parser.parse_args()
model = Sim3Net(args)
model.load_state_dict(torch.load(args.ckpt))
model.cuda()
model.eval()
# run two demos
demo(model, 0)
demo(model, 1)
|
[
"sys.path.append",
"numpy.stack",
"numpy.load",
"argparse.ArgumentParser",
"lietorch.SE3.Identity",
"lietorch.Sim3.Identity",
"torch.load",
"viz.sim3_visualization",
"cv2.imread",
"networks.sim3_net.Sim3Net",
"numpy.array",
"numpy.tile",
"torch.rand",
"torch.as_tensor",
"torch.no_grad",
"torch.from_numpy"
] |
[((11, 37), 'sys.path.append', 'sys.path.append', (['"""../core"""'], {}), "('../core')\n", (26, 37), False, 'import sys\n'), ((1469, 1484), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1482, 1484), False, 'import torch\n'), ((285, 345), 'torch.as_tensor', 'torch.as_tensor', (['[0.485, 0.456, 0.406]'], {'device': 'images.device'}), '([0.485, 0.456, 0.406], device=images.device)\n', (300, 345), False, 'import torch\n'), ((356, 416), 'torch.as_tensor', 'torch.as_tensor', (['[0.229, 0.224, 0.225]'], {'device': 'images.device'}), '([0.229, 0.224, 0.225], device=images.device)\n', (371, 416), False, 'import torch\n'), ((1052, 1081), 'numpy.stack', 'np.stack', (['[image1, image2]', '(0)'], {}), '([image1, image2], 0)\n', (1060, 1081), True, 'import numpy as np\n'), ((1151, 1180), 'numpy.stack', 'np.stack', (['[depth1, depth2]', '(0)'], {}), '([depth1, depth2], 0)\n', (1159, 1180), True, 'import numpy as np\n'), ((1245, 1283), 'numpy.array', 'np.array', (['[320.0, 320.0, 320.0, 240.0]'], {}), '([320.0, 320.0, 320.0, 240.0])\n', (1253, 1283), True, 'import numpy as np\n'), ((1301, 1334), 'numpy.tile', 'np.tile', (['intrinsics[None]', '(2, 1)'], {}), '(intrinsics[None], (2, 1))\n', (1308, 1334), True, 'import numpy as np\n'), ((2089, 2138), 'viz.sim3_visualization', 'sim3_visualization', (['T', 'images', 'depths', 'intrinsics'], {}), '(T, images, depths, intrinsics)\n', (2107, 2138), False, 'from viz import sim3_visualization\n'), ((2181, 2206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2204, 2206), False, 'import argparse\n'), ((2404, 2417), 'networks.sim3_net.Sim3Net', 'Sim3Net', (['args'], {}), '(args)\n', (2411, 2417), False, 'from networks.sim3_net import Sim3Net\n'), ((599, 630), 'cv2.imread', 'cv2.imread', (['"""assets/image1.png"""'], {}), "('assets/image1.png')\n", (609, 630), False, 'import cv2\n'), ((648, 679), 'cv2.imread', 'cv2.imread', (['"""assets/image2.png"""'], {}), "('assets/image2.png')\n", (658, 679), False, 'import cv2\n'), ((1654, 1687), 'lietorch.SE3.Identity', 'SE3.Identity', (['(1)', '(2)'], {'device': '"""cuda"""'}), "(1, 2, device='cuda')\n", (1666, 1687), False, 'from lietorch import SO3, SE3, Sim3\n'), ((2444, 2465), 'torch.load', 'torch.load', (['args.ckpt'], {}), '(args.ckpt)\n', (2454, 2465), False, 'import torch\n'), ((697, 725), 'numpy.load', 'np.load', (['"""assets/depth1.npy"""'], {}), "('assets/depth1.npy')\n", (704, 725), True, 'import numpy as np\n'), ((757, 785), 'numpy.load', 'np.load', (['"""assets/depth2.npy"""'], {}), "('assets/depth2.npy')\n", (764, 785), True, 'import numpy as np\n'), ((837, 868), 'cv2.imread', 'cv2.imread', (['"""assets/image3.png"""'], {}), "('assets/image3.png')\n", (847, 868), False, 'import cv2\n'), ((886, 917), 'cv2.imread', 'cv2.imread', (['"""assets/image4.png"""'], {}), "('assets/image4.png')\n", (896, 917), False, 'import cv2\n'), ((1095, 1119), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (1111, 1119), False, 'import torch\n'), ((1194, 1218), 'torch.from_numpy', 'torch.from_numpy', (['depths'], {}), '(depths)\n', (1210, 1218), False, 'import torch\n'), ((1351, 1379), 'torch.from_numpy', 'torch.from_numpy', (['intrinsics'], {}), '(intrinsics)\n', (1367, 1379), False, 'import torch\n'), ((1742, 1776), 'lietorch.Sim3.Identity', 'Sim3.Identity', (['(1)', '(2)'], {'device': '"""cuda"""'}), "(1, 2, device='cuda')\n", (1755, 1776), False, 'from lietorch import SO3, SE3, Sim3\n'), ((935, 963), 'numpy.load', 'np.load', (['"""assets/depth3.npy"""'], {}), "('assets/depth3.npy')\n", (942, 963), True, 'import numpy as np\n'), ((995, 1023), 'numpy.load', 'np.load', (['"""assets/depth4.npy"""'], {}), "('assets/depth4.npy')\n", (1002, 1023), True, 'import numpy as np\n'), ((1806, 1819), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1816, 1819), False, 'import torch\n')]
|
import numpy as np
import matplotlib.cm as cm
import matplotlib.pylab as plt
def generate_colors(count):
cm = plt.get_cmap('gist_rainbow')
return np.array([cm(1. * i / count) for i in range(count)]).astype(float)
def get_q_color(id, ant_num):
from PyQt4 import QtGui
r, g, b = get_color(id, ant_num)
return QtGui.QColor(r, g, b)
def get_color(id, ant_num):
colors = cm.rainbow(np.linspace(0, 1, ant_num))
return int(colors[id][0] * 255), int(colors[id][1] * 255), int(colors[id][2] * 255)
def get_opacity(current_depth, max_depth):
return float((max_depth - current_depth) + float(current_depth/max_depth))/max_depth/2
def get_contrast_color(r, g, b):
if (r+g+b)/3 < 128:
return 250, 250, 255
return 5, 0, 5
|
[
"PyQt4.QtGui.QColor",
"matplotlib.cm",
"numpy.linspace",
"matplotlib.pylab.get_cmap"
] |
[((116, 144), 'matplotlib.pylab.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (128, 144), True, 'import matplotlib.pylab as plt\n'), ((331, 352), 'PyQt4.QtGui.QColor', 'QtGui.QColor', (['r', 'g', 'b'], {}), '(r, g, b)\n', (343, 352), False, 'from PyQt4 import QtGui\n'), ((407, 433), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ant_num'], {}), '(0, 1, ant_num)\n', (418, 433), True, 'import numpy as np\n'), ((166, 185), 'matplotlib.cm', 'cm', (['(1.0 * i / count)'], {}), '(1.0 * i / count)\n', (168, 185), True, 'import matplotlib.cm as cm\n')]
|
import numpy as np
from SOM import SOM
data = np.loadtxt('Data/output.txt', delimiter=';', usecols=range(40))
###SOM
som = SOM(10, 10) # initialize the SOM
som.fit(data, 10000, decay='hill')
# som = SOM(10, 10) # initialize the SOM
# som.load('Data/SOM')
targets = np.loadtxt('Data/target.txt', dtype='int')
targets = targets - 1
names = ['Автомобиль',
'Грузовик 2',
'Грузовик 3',
'Грузовик 4+',
'Автобус 2',
'Автобус 3',
'<NAME>'
]
# now visualize the learned representation with the class labels
som.plot_point_map(data, targets, names, filename='images/SOM/som.png')
# for name in names:
# som.plot_class_density(data, targets, t=names.index(name), name=name, filename='images/SOM/density ' + name + '.png')
# som.save('SOM')
|
[
"SOM.SOM",
"numpy.loadtxt"
] |
[((126, 137), 'SOM.SOM', 'SOM', (['(10)', '(10)'], {}), '(10, 10)\n', (129, 137), False, 'from SOM import SOM\n'), ((272, 314), 'numpy.loadtxt', 'np.loadtxt', (['"""Data/target.txt"""'], {'dtype': '"""int"""'}), "('Data/target.txt', dtype='int')\n", (282, 314), True, 'import numpy as np\n')]
|
from .tractography import Tractography
from .trackvis import tractography_from_trackvis_file, tractography_to_trackvis_file
from warnings import warn
import numpy
__all__ = [
'Tractography',
'tractography_from_trackvis_file', 'tractography_to_trackvis_file',
'tractography_from_files',
'tractography_from_file', 'tractography_to_file',
]
try:
__all__ += [
'tractography_from_vtk_files', 'tractography_to_vtk_file',
'vtkPolyData_to_tracts', 'tracts_to_vtkPolyData'
]
from .vtkInterface import (
tractography_from_vtk_files, tractography_to_vtk_file,
vtkPolyData_to_tracts, tracts_to_vtkPolyData
)
except ImportError:
warn(
'VTK support not installed in this python distribution, '
'VTK files will not be read or written'
)
def tractography_from_files(filenames):
if isinstance(filenames, str):
filenames = [filenames]
tracts = tractography_from_file(filenames[0])
for filename in filenames[1:]:
tracts_ = tractography_from_file(filename)
tracts.append(tracts_.tracts(), tracts_.tracts_data())
return tracts
def tractography_from_file(filename):
if filename.endswith('trk'):
return tractography_from_trackvis_file(filename)
elif filename.endswith('vtk') or filename.endswith('vtp'):
if 'tractography_from_vtk_files' in __all__:
return tractography_from_vtk_files(filename)
else:
raise IOError("No VTK support installed, VTK files could not be read")
else:
raise IOError("File format not supported")
def tractography_to_file(filename, tractography, **kwargs):
if filename.endswith('trk'):
if 'affine' not in kwargs or kwargs['affine'] is None:
if (
hasattr(tractography, 'affine') and
tractography.affine is not None
):
kwargs['affine'] = tractography.affine
else:
warn('Setting affine of trk file to the identity')
kwargs['affine'] = numpy.eye(4)
if (
'image_dimensions' not in kwargs or
kwargs['image_dimensions'] is None
):
if (
hasattr(tractography, 'image_dims') and
tractography.image_dims is not None
):
kwargs['image_dimensions'] = tractography.image_dims
else:
warn('Setting image_dimensions of trk file to: 1 1 1')
kwargs['image_dimensions'] = numpy.ones(3)
return tractography_to_trackvis_file(filename, tractography, **kwargs)
elif filename.endswith('vtk') or filename.endswith('vtp'):
if 'tractography_from_vtk_files' in __all__:
return tractography_to_vtk_file(filename, tractography, **kwargs)
else:
raise IOError("No VTK support installed, VTK files could not be read")
else:
raise IOError("File format not supported")
|
[
"warnings.warn",
"numpy.ones",
"numpy.eye"
] |
[((688, 798), 'warnings.warn', 'warn', (['"""VTK support not installed in this python distribution, VTK files will not be read or written"""'], {}), "(\n 'VTK support not installed in this python distribution, VTK files will not be read or written'\n )\n", (692, 798), False, 'from warnings import warn\n'), ((1992, 2042), 'warnings.warn', 'warn', (['"""Setting affine of trk file to the identity"""'], {}), "('Setting affine of trk file to the identity')\n", (1996, 2042), False, 'from warnings import warn\n'), ((2078, 2090), 'numpy.eye', 'numpy.eye', (['(4)'], {}), '(4)\n', (2087, 2090), False, 'import numpy\n'), ((2462, 2516), 'warnings.warn', 'warn', (['"""Setting image_dimensions of trk file to: 1 1 1"""'], {}), "('Setting image_dimensions of trk file to: 1 1 1')\n", (2466, 2516), False, 'from warnings import warn\n'), ((2562, 2575), 'numpy.ones', 'numpy.ones', (['(3)'], {}), '(3)\n', (2572, 2575), False, 'import numpy\n')]
|
# encoding: utf-8
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/wdika/NeMo/blob/main/tests/core/test_optimizers_schedulers.py
import math
import os
import random
import shutil
from abc import ABC
import numpy as np
import omegaconf
import pytest
import pytorch_lightning as pl
import torch
import torch.optim
from mridc.core import optim
from mridc.core.conf import optimizers
from mridc.core.conf.optimizers import NovogradParams, SGDParams
from mridc.core.conf.schedulers import CosineAnnealingParams
from mridc.core.optim.lr_scheduler import AVAILABLE_SCHEDULERS, SquareRootAnnealing
from mridc.core.optim.novograd import Novograd
from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer
from mridc.utils import logging
class TempModel(torch.nn.Module):
"""Create a dummy model for testing."""
def __init__(self):
super(TempModel, self).__init__()
self.layer = torch.nn.Linear(5, 1)
def forward(self, x):
"""Forward pass."""
x = self.layer(x)
return x
class OptCounter(torch.optim.SGD):
"""A simple optimizer that counts the number of calls to step()."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for group in self.param_groups:
group.setdefault("count", 0)
def step(self, closure=None):
"""Performs a single optimization step."""
for group in self.param_groups:
group["count"] += 1
super().step(closure)
class RandomDataset(torch.utils.data.Dataset):
"""A dataset that returns random tensors."""
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.randn(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(pl.LightningModule, ABC):
"""A dummy model for testing."""
def __init__(self, batch_size, dataset_len, drop_last, max_steps):
super().__init__()
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
self.batch_size = batch_size
self.dataset_len = dataset_len
self.drop_last = drop_last
self.max_steps = max_steps
self.my_opt = None
def train_dataloader(self):
"""Return a training data loader."""
dataset = RandomDataset(self.dataset_len)
return torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, drop_last=self.drop_last)
def training_step(self, batch, batch_idx):
"""Set training step."""
output = self.l1(batch)
output = torch.nn.functional.l1_loss(output, torch.ones(output.size()).to(output.device))
return {"loss": output}
def configure_optimizers(self):
"""Configure optimizers for the model."""
self.my_opt = OptCounter(self.parameters(), lr=0.02)
return self.my_opt
class Callback(pl.callbacks.Callback):
"""A dummy callback for testing."""
@pl.utilities.distributed.rank_zero_only
def on_train_end(self, trainer, module):
"""On train end, check that the number of steps is correct"""
count = module.my_opt.param_groups[0]["count"]
if trainer.global_step != count or trainer.global_step != module.max_steps:
logging.debug(f"max_epochs: {trainer.max_epochs}")
logging.debug(f"accumulate_grad_batches: {trainer.accumulate_grad_batches}")
logging.debug(f"limit_train_batches: {trainer.limit_train_batches}")
logging.debug(f"num_processes: {trainer.num_processes}")
logging.debug(f"batch_size: {module.batch_size}")
logging.debug(f"dataset_len: {module.dataset_len}")
logging.debug(f"drop_last: {module.drop_last}")
logging.debug(f"{len(trainer.train_dataloader)}")
logging.debug(f"{trainer.num_training_batches}")
self.assert_counts(trainer, module, count)
@staticmethod
def assert_counts(trainer, module, count):
"""Assert that the number of steps is correct"""
if trainer.global_step != count:
raise AssertionError(f"{trainer.global_step} != {count} != {module.max_steps}")
if trainer.global_step != module.max_steps:
raise AssertionError(f"{trainer.global_step} != {count} != {module.max_steps}")
class SchedulerNoOpCallback(Callback):
"""A dummy callback for testing."""
@staticmethod
def on_train_batch_end(trainer: pl.Trainer, pl_module, outputs, batch, batch_idx):
"""On each training batch end"""
# pl_module.max_steps is "original" max steps without trainer extra steps.
if (trainer.global_step + 1) % 3 == 0 and (trainer.global_step + 1) < pl_module.max_steps:
schedulers = trainer.lr_schedulers
for scheduler in schedulers:
# Decrement the counter by 2, then perform a scheduler.step() to perform a no-up
# as well as update the optimizer lr in all param groups
scheduler["scheduler"].last_epoch -= 2
scheduler["scheduler"].step()
# Increase the max step count by 1
trainer.fit_loop.max_steps = trainer.fit_loop.max_steps + 1
def assert_counts(self, trainer, module, count):
"""This is a no-op callback, so the counts should not change"""
num_skips = torch.div(module.max_steps, 3, rounding_mode="trunc")
extra_steps = module.max_steps + num_skips
if trainer.global_step != count:
raise AssertionError(f"{trainer.global_step} != {count} != {extra_steps}")
if trainer.global_step != extra_steps:
raise AssertionError(f"{trainer.global_step} != {count} != {extra_steps}")
class TestOptimizersSchedulers:
"""Test the optimizers and schedulers."""
INITIAL_LR = 0.1
MIN_LR = 1e-3
MAX_STEPS = 10
# fused_adam is looking for CUDA and this test is being run on CPU only tests
@pytest.mark.unit
def test_get_optimizer(self):
"""Test that the optimizer is correctly created"""
model = TempModel()
for opt_name in AVAILABLE_OPTIMIZERS:
if opt_name == "fused_adam" and not torch.cuda.is_available():
continue
opt_cls = get_optimizer(opt_name)
if opt_name == "adafactor":
# Adafactor's default mode uses relative_step without any lr.
opt = opt_cls(model.parameters())
else:
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
if not isinstance(opt, AVAILABLE_OPTIMIZERS[opt_name]):
raise AssertionError
@pytest.mark.unit
def test_register_optimizer(self):
"""Test that we can register a new optimizer"""
class TempOpt(torch.optim.SGD):
"""A dummy optimizer"""
class TempOptParams(optimizers.SGDParams):
"""A dummy optimizer params"""
register_optimizer("TempOpt", TempOpt, TempOptParams)
model = TempModel()
opt_cls = get_optimizer("TempOpt")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
if not isinstance(opt, TempOpt):
raise AssertionError
@pytest.mark.unit
def test_optim_config_parse_bypass(self):
"""Test that the optimizer config is parsed correctly when the optimizer is not registered."""
basic_optim_config = {"weight_decay": 0.001, "betas": [0.8, 0.5]}
parsed_params = parse_optimizer_args("novograd", basic_optim_config)
if parsed_params["weight_decay"] != basic_optim_config["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != basic_optim_config["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != basic_optim_config["betas"][1]:
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = parse_optimizer_args("novograd", dict_config)
if parsed_params["weight_decay"] != dict_config["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != dict_config["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != dict_config["betas"][1]:
raise AssertionError
@pytest.mark.unit
def test_optim_config_parse_arg_by_target(self):
"""Test that the optimizer config is parsed correctly by target."""
basic_optim_config = {
"_target_": "mridc.core.conf.optimizers.NovogradParams",
"params": {"weight_decay": 0.001, "betas": [0.8, 0.5]},
}
basic_optim_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = parse_optimizer_args("novograd", basic_optim_config)
if parsed_params["weight_decay"] != basic_optim_config["params"]["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != basic_optim_config["params"]["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != basic_optim_config["params"]["betas"][1]:
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = parse_optimizer_args("novograd", dict_config)
if parsed_params["weight_decay"] != dict_config["params"]["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != dict_config["params"]["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != dict_config["params"]["betas"][1]:
raise AssertionError
# Names are ignored when passing class path
# This will be captured during optimizer instantiation
output_config = parse_optimizer_args("sgd", dict_config)
sgd_config = vars(SGDParams())
novograd_config = vars(NovogradParams())
if set(output_config.keys()) == set(sgd_config.keys()):
raise AssertionError
if set(output_config.keys()) != set(novograd_config):
raise AssertionError
@pytest.mark.unit
def test_get_scheduler(self):
"""Test that get_scheduler returns the correct scheduler class."""
model = TempModel()
optimizer = Novograd(model.parameters(), lr=self.INITIAL_LR)
for sched_name in AVAILABLE_SCHEDULERS:
sched_cls = optim.lr_scheduler.get_scheduler(sched_name)
try:
sched = sched_cls(optimizer)
if not isinstance(sched, AVAILABLE_SCHEDULERS[sched_name]):
raise AssertionError
continue
except Exception:
pass
try:
sched = sched_cls(optimizer, max_steps=self.MAX_STEPS)
if not isinstance(sched, AVAILABLE_SCHEDULERS[sched_name]):
raise AssertionError
continue
except Exception:
pass
@pytest.mark.unit
def test_register_scheduler(self):
"""Test registering a new scheduler"""
class TempSched(optim.lr_scheduler.CosineAnnealing):
"""Temporary scheduler class."""
class TempSchedParams(CosineAnnealingParams):
"""Temporary scheduler class."""
optim.lr_scheduler.register_scheduler("TempSched", TempSched, TempSchedParams)
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
sched_cls = optim.lr_scheduler.get_scheduler("TempSched")
sched = sched_cls(opt, max_steps=self.MAX_STEPS)
if not isinstance(sched, TempSched):
raise AssertionError
@pytest.mark.unit
def test_sched_config_parse_simple(self):
"""Test that scheduler config is parsed correctly"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
basic_sched_config = {"name": "CosineAnnealing", "max_steps": 10}
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
@pytest.mark.unit
def test_sched_config_parse_from_cls(self):
"""Test that we can parse a scheduler from a class"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
basic_sched_config = {
"_target_": "mridc.core.conf.schedulers.CosineAnnealingParams",
"params": {"min_lr": 0.1},
"max_steps": self.MAX_STEPS,
}
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
@pytest.mark.unit
def test_WarmupPolicy(self):
"""Test WarmupPolicy"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupPolicy(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.WarmupPolicy(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_WarmupHoldPolicy(self):
"""Test WarmupHoldPolicy"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupHoldPolicy(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# Warmup + Hold steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_WarmupAnnealing(self):
"""Test that the warmup annealing policy works as expected."""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.WarmupAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup + Hold steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_SquareAnnealing(self):
"""Test SquareAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.SquareAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.SquareAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_SquareRootAnnealing(self):
"""Test SquareRootAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = SquareRootAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.SquareRootAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_CosineAnnealing(self):
"""Test CosineAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.CosineAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.CosineAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup + Constant steps available
policy = optim.lr_scheduler.CosineAnnealing(
opt, warmup_steps=3, constant_steps=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 3:
if policy.get_last_lr()[0] > self.INITIAL_LR + 1e-5:
raise AssertionError
elif 3 < i <= 8:
if policy.get_last_lr()[0] != policy._get_lr(i)[0]:
raise AssertionError
elif policy.get_last_lr()[0] != self.MIN_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_PolynomialDecayAnnealing(self):
"""Test PolynomialDecayAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.PolynomialDecayAnnealing(
opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.PolynomialDecayAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_PolynomialHoldDecayAnnealing(self):
"""Test PolynomialHoldDecayAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, power=2, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
# Warmup + Hold steps available
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR, power=2
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif i <= 8:
if policy.get_last_lr()[0] < self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_InverseSquareRootAnnealing(self):
"""Test InverseSquareRootAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.InverseSquareRootAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.InverseSquareRootAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_CosineAnnealing_with_noop_steps(self):
"""Test CosineAnnealing with noop steps."""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.CosineAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
update_steps = 0
for i in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
# Perform a No-Op for scheduler every 2 steps
if i % 2 == 0:
policy.last_epoch -= 1
else:
update_steps += 1
policy.step()
update_steps += 1
if update_steps >= self.MAX_STEPS:
raise AssertionError
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# update step = true number of updates performed after some number of skipped steps
true_end_lr = policy._get_lr(step=update_steps)[0]
if final_lr != true_end_lr:
raise AssertionError
@pytest.mark.unit
@pytest.mark.run_only_on("CPU")
def test_max_step_computation(self):
"""Test that the max_step computation is correct."""
def train(
max_epochs, accumulate_grad_batches, limit_train_batches, num_processes, batch_size, dataset_len, drop_last
):
"""Set up the training environment"""
trainer = pl.Trainer(
max_epochs=max_epochs,
strategy="ddp_spawn",
accelerator="cpu",
num_processes=num_processes,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
enable_checkpointing=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
max_steps = optim.lr_scheduler.compute_max_steps(
max_epochs,
accumulate_grad_batches,
limit_train_batches,
num_processes,
dataset_len,
batch_size,
drop_last,
)
model = ExampleModel(batch_size, dataset_len, drop_last, max_steps)
trainer.callbacks.append(Callback())
trainer.fit(model)
# This test will break once we and lightning upgrade to pytorch 1.7.0 due to a bug fix in pytorch 1.7.0
train(
31,
accumulate_grad_batches=1,
limit_train_batches=1.0,
num_processes=9,
batch_size=60,
dataset_len=1613,
drop_last=True,
)
train(
5,
accumulate_grad_batches=1,
limit_train_batches=0.17382691901706027,
num_processes=4,
batch_size=97,
dataset_len=498,
drop_last=False,
)
train(
5,
accumulate_grad_batches=8,
limit_train_batches=0.1663306588594945,
num_processes=4,
batch_size=54,
dataset_len=629,
drop_last=True,
)
train(
5,
accumulate_grad_batches=1,
limit_train_batches=0.2121376533631948,
num_processes=1,
batch_size=68,
dataset_len=488,
drop_last=False,
)
for _ in range(5):
drop_last = bool(random.randint(0, 1))
accumulate_grad_batches = random.randint(1, 10)
limit_train_batches_int = random.randint(1, 10)
limit_train_batches_float = random.uniform(0, 1)
limit_train_batches = random.choice([limit_train_batches_int, limit_train_batches_float])
max_epochs = random.randint(4, 20)
num_processes = random.randint(1, 5)
dataset_len = random.randint(20, num_processes * 500)
batch_size = random.randint(
math.ceil(5.0 / num_processes), min(np.floor_divide(dataset_len, num_processes), 128)
)
train(
max_epochs,
accumulate_grad_batches,
limit_train_batches,
num_processes,
batch_size,
dataset_len,
drop_last,
)
@pytest.mark.unit
@pytest.mark.run_only_on("CPU")
def test_max_step_computation_with_sched_no_ops(self):
"""Test that max_step is computed correctly when scheduler has no_ops"""
def train(
max_steps, accumulate_grad_batches, limit_train_batches, num_processes, batch_size, dataset_len, drop_last
):
"""Set up trainer and model"""
trainer = pl.Trainer(
max_steps=max_steps,
strategy="ddp_spawn",
accelerator="cpu",
num_processes=num_processes,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
enable_checkpointing=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
model = ExampleModel(batch_size, dataset_len, drop_last, max_steps)
trainer.callbacks.append(SchedulerNoOpCallback())
trainer.fit(model)
# This test will break once we and lightning upgrade to pytorch 1.7.0 due to a bug fix in pytorch 1.7.0
train(
max_steps=20,
accumulate_grad_batches=1,
limit_train_batches=1.0,
num_processes=4,
batch_size=60,
dataset_len=2000,
drop_last=True,
)
@staticmethod
def test_remove_logs_left():
"""Remove logs left by the trainer."""
if os.path.exists(os.path.join(os.getcwd(), "lightning_logs")):
shutil.rmtree(os.path.join(os.getcwd(), "lightning_logs"))
|
[
"pytorch_lightning.Trainer",
"mridc.core.optim.lr_scheduler.register_scheduler",
"torch.randn",
"mridc.core.optim.optimizers.register_optimizer",
"mridc.core.optim.lr_scheduler.WarmupHoldPolicy",
"pytest.mark.run_only_on",
"mridc.core.optim.lr_scheduler.WarmupPolicy",
"mridc.core.optim.lr_scheduler.SquareAnnealing",
"mridc.core.optim.lr_scheduler.get_scheduler",
"mridc.core.optim.optimizers.get_optimizer",
"random.randint",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"mridc.core.optim.lr_scheduler.PolynomialDecayAnnealing",
"mridc.core.optim.lr_scheduler.compute_max_steps",
"torch.nn.modules.Linear",
"math.ceil",
"numpy.floor_divide",
"mridc.core.optim.lr_scheduler.SquareRootAnnealing",
"mridc.core.optim.lr_scheduler.CosineAnnealing",
"mridc.core.conf.optimizers.SGDParams",
"mridc.core.conf.optimizers.NovogradParams",
"torch.cuda.is_available",
"mridc.utils.logging.debug",
"mridc.core.optim.lr_scheduler.InverseSquareRootAnnealing",
"random.uniform",
"os.getcwd",
"mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing",
"random.choice",
"mridc.core.optim.optimizers.parse_optimizer_args",
"omegaconf.OmegaConf.create",
"mridc.core.optim.lr_scheduler.WarmupAnnealing",
"mridc.core.optim.lr_scheduler.prepare_lr_scheduler",
"torch.div"
] |
[((32233, 32263), 'pytest.mark.run_only_on', 'pytest.mark.run_only_on', (['"""CPU"""'], {}), "('CPU')\n", (32256, 32263), False, 'import pytest\n'), ((35520, 35550), 'pytest.mark.run_only_on', 'pytest.mark.run_only_on', (['"""CPU"""'], {}), "('CPU')\n", (35543, 35550), False, 'import pytest\n'), ((977, 998), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(1)'], {}), '(5, 1)\n', (992, 998), False, 'import torch\n'), ((1813, 1827), 'torch.randn', 'torch.randn', (['(2)'], {}), '(2)\n', (1824, 1827), False, 'import torch\n'), ((2087, 2141), 'torch.nn.modules.Linear', 'torch.nn.modules.Linear', ([], {'in_features': '(2)', 'out_features': '(1)'}), '(in_features=2, out_features=1)\n', (2110, 2141), False, 'import torch\n'), ((2459, 2554), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'drop_last': 'self.drop_last'}), '(dataset, batch_size=self.batch_size, drop_last=\n self.drop_last)\n', (2486, 2554), False, 'import torch\n'), ((5448, 5501), 'torch.div', 'torch.div', (['module.max_steps', '(3)'], {'rounding_mode': '"""trunc"""'}), "(module.max_steps, 3, rounding_mode='trunc')\n", (5457, 5501), False, 'import torch\n'), ((7034, 7087), 'mridc.core.optim.optimizers.register_optimizer', 'register_optimizer', (['"""TempOpt"""', 'TempOpt', 'TempOptParams'], {}), "('TempOpt', TempOpt, TempOptParams)\n", (7052, 7087), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((7135, 7159), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""TempOpt"""'], {}), "('TempOpt')\n", (7148, 7159), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((7567, 7619), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'basic_optim_config'], {}), "('novograd', basic_optim_config)\n", (7587, 7619), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((7966, 8012), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_optim_config'], {}), '(basic_optim_config)\n', (7992, 8012), False, 'import omegaconf\n'), ((8037, 8082), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'dict_config'], {}), "('novograd', dict_config)\n", (8057, 8082), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((8744, 8790), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_optim_config'], {}), '(basic_optim_config)\n', (8770, 8790), False, 'import omegaconf\n'), ((8815, 8867), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'basic_optim_config'], {}), "('novograd', basic_optim_config)\n", (8835, 8867), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((9244, 9290), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_optim_config'], {}), '(basic_optim_config)\n', (9270, 9290), False, 'import omegaconf\n'), ((9315, 9360), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'dict_config'], {}), "('novograd', dict_config)\n", (9335, 9360), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((9833, 9873), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""sgd"""', 'dict_config'], {}), "('sgd', dict_config)\n", (9853, 9873), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((11365, 11443), 'mridc.core.optim.lr_scheduler.register_scheduler', 'optim.lr_scheduler.register_scheduler', (['"""TempSched"""', 'TempSched', 'TempSchedParams'], {}), "('TempSched', TempSched, TempSchedParams)\n", (11402, 11443), False, 'from mridc.core import optim\n'), ((11491, 11516), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (11504, 11516), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((11599, 11644), 'mridc.core.optim.lr_scheduler.get_scheduler', 'optim.lr_scheduler.get_scheduler', (['"""TempSched"""'], {}), "('TempSched')\n", (11631, 11644), False, 'from mridc.core import optim\n'), ((11957, 11982), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (11970, 11982), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((12146, 12210), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'basic_sched_config'], {}), '(opt, basic_sched_config)\n', (12185, 12210), False, 'from mridc.core import optim\n'), ((12360, 12406), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_sched_config'], {}), '(basic_sched_config)\n', (12386, 12406), False, 'import omegaconf\n'), ((12433, 12490), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'dict_config'], {}), '(opt, dict_config)\n', (12472, 12490), False, 'from mridc.core import optim\n'), ((12796, 12821), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (12809, 12821), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((13108, 13172), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'basic_sched_config'], {}), '(opt, basic_sched_config)\n', (13147, 13172), False, 'from mridc.core import optim\n'), ((13322, 13368), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_sched_config'], {}), '(basic_sched_config)\n', (13348, 13368), False, 'import omegaconf\n'), ((13395, 13452), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'dict_config'], {}), '(opt, dict_config)\n', (13434, 13452), False, 'from mridc.core import optim\n'), ((13713, 13738), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (13726, 13738), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((13844, 13931), 'mridc.core.optim.lr_scheduler.WarmupPolicy', 'optim.lr_scheduler.WarmupPolicy', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=self.\n MIN_LR)\n', (13875, 13931), False, 'from mridc.core import optim\n'), ((14421, 14524), 'mridc.core.optim.lr_scheduler.WarmupPolicy', 'optim.lr_scheduler.WarmupPolicy', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (14452, 14524), False, 'from mridc.core import optim\n'), ((15233, 15258), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (15246, 15258), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((15364, 15455), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (15399, 15455), False, 'from mridc.core import optim\n'), ((15945, 16052), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (15980, 16052), False, 'from mridc.core import optim\n'), ((16678, 16798), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'warmup_steps': '(5)', 'hold_steps': '(3)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, hold_steps=3,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (16713, 16798), False, 'from mridc.core import optim\n'), ((17563, 17588), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (17576, 17588), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((17694, 17784), 'mridc.core.optim.lr_scheduler.WarmupAnnealing', 'optim.lr_scheduler.WarmupAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (17728, 17784), False, 'from mridc.core import optim\n'), ((18272, 18378), 'mridc.core.optim.lr_scheduler.WarmupAnnealing', 'optim.lr_scheduler.WarmupAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (18306, 18378), False, 'from mridc.core import optim\n'), ((19004, 19124), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'warmup_steps': '(5)', 'hold_steps': '(3)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, hold_steps=3,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (19039, 19124), False, 'from mridc.core import optim\n'), ((19853, 19878), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (19866, 19878), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((19984, 20074), 'mridc.core.optim.lr_scheduler.SquareAnnealing', 'optim.lr_scheduler.SquareAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (20018, 20074), False, 'from mridc.core import optim\n'), ((20563, 20669), 'mridc.core.optim.lr_scheduler.SquareAnnealing', 'optim.lr_scheduler.SquareAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (20597, 20669), False, 'from mridc.core import optim\n'), ((21385, 21410), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (21398, 21410), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((21516, 21586), 'mridc.core.optim.lr_scheduler.SquareRootAnnealing', 'SquareRootAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (21535, 21586), False, 'from mridc.core.optim.lr_scheduler import AVAILABLE_SCHEDULERS, SquareRootAnnealing\n'), ((22080, 22190), 'mridc.core.optim.lr_scheduler.SquareRootAnnealing', 'optim.lr_scheduler.SquareRootAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (22118, 22190), False, 'from mridc.core import optim\n'), ((22920, 22945), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (22933, 22945), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((23051, 23141), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (23085, 23141), False, 'from mridc.core import optim\n'), ((23630, 23736), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (23664, 23736), False, 'from mridc.core import optim\n'), ((24366, 24489), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'warmup_steps': '(3)', 'constant_steps': '(2)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=3, constant_steps=2,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (24400, 24489), False, 'from mridc.core import optim\n'), ((25379, 25404), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (25392, 25404), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((25510, 25618), 'mridc.core.optim.lr_scheduler.PolynomialDecayAnnealing', 'optim.lr_scheduler.PolynomialDecayAnnealing', (['opt'], {'power': '(2)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, power=2, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (25553, 25618), False, 'from mridc.core import optim\n'), ((26129, 26244), 'mridc.core.optim.lr_scheduler.PolynomialDecayAnnealing', 'optim.lr_scheduler.PolynomialDecayAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=\n self.MAX_STEPS, min_lr=self.MIN_LR)\n', (26172, 26244), False, 'from mridc.core import optim\n'), ((27000, 27025), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (27013, 27025), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((27131, 27243), 'mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing', 'optim.lr_scheduler.PolynomialHoldDecayAnnealing', (['opt'], {'power': '(2)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, power=2, max_steps=\n self.MAX_STEPS, min_lr=self.MIN_LR)\n', (27178, 27243), False, 'from mridc.core import optim\n'), ((27754, 27882), 'mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing', 'optim.lr_scheduler.PolynomialHoldDecayAnnealing', (['opt'], {'power': '(2)', 'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, power=2, warmup_steps=\n 5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (27801, 27882), False, 'from mridc.core import optim\n'), ((28400, 28541), 'mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing', 'optim.lr_scheduler.PolynomialHoldDecayAnnealing', (['opt'], {'warmup_steps': '(5)', 'hold_steps': '(3)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR', 'power': '(2)'}), '(opt, warmup_steps=5,\n hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR, power=2)\n', (28447, 28541), False, 'from mridc.core import optim\n'), ((29419, 29444), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (29432, 29444), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((29550, 29650), 'mridc.core.optim.lr_scheduler.InverseSquareRootAnnealing', 'optim.lr_scheduler.InverseSquareRootAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS,\n min_lr=self.MIN_LR)\n', (29595, 29650), False, 'from mridc.core import optim\n'), ((30140, 30256), 'mridc.core.optim.lr_scheduler.InverseSquareRootAnnealing', 'optim.lr_scheduler.InverseSquareRootAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (30185, 30256), False, 'from mridc.core import optim\n'), ((31020, 31045), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (31033, 31045), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((31151, 31241), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (31185, 31241), False, 'from mridc.core import optim\n'), ((3361, 3411), 'mridc.utils.logging.debug', 'logging.debug', (['f"""max_epochs: {trainer.max_epochs}"""'], {}), "(f'max_epochs: {trainer.max_epochs}')\n", (3374, 3411), False, 'from mridc.utils import logging\n'), ((3424, 3500), 'mridc.utils.logging.debug', 'logging.debug', (['f"""accumulate_grad_batches: {trainer.accumulate_grad_batches}"""'], {}), "(f'accumulate_grad_batches: {trainer.accumulate_grad_batches}')\n", (3437, 3500), False, 'from mridc.utils import logging\n'), ((3513, 3581), 'mridc.utils.logging.debug', 'logging.debug', (['f"""limit_train_batches: {trainer.limit_train_batches}"""'], {}), "(f'limit_train_batches: {trainer.limit_train_batches}')\n", (3526, 3581), False, 'from mridc.utils import logging\n'), ((3594, 3650), 'mridc.utils.logging.debug', 'logging.debug', (['f"""num_processes: {trainer.num_processes}"""'], {}), "(f'num_processes: {trainer.num_processes}')\n", (3607, 3650), False, 'from mridc.utils import logging\n'), ((3663, 3712), 'mridc.utils.logging.debug', 'logging.debug', (['f"""batch_size: {module.batch_size}"""'], {}), "(f'batch_size: {module.batch_size}')\n", (3676, 3712), False, 'from mridc.utils import logging\n'), ((3725, 3776), 'mridc.utils.logging.debug', 'logging.debug', (['f"""dataset_len: {module.dataset_len}"""'], {}), "(f'dataset_len: {module.dataset_len}')\n", (3738, 3776), False, 'from mridc.utils import logging\n'), ((3789, 3836), 'mridc.utils.logging.debug', 'logging.debug', (['f"""drop_last: {module.drop_last}"""'], {}), "(f'drop_last: {module.drop_last}')\n", (3802, 3836), False, 'from mridc.utils import logging\n'), ((3911, 3959), 'mridc.utils.logging.debug', 'logging.debug', (['f"""{trainer.num_training_batches}"""'], {}), "(f'{trainer.num_training_batches}')\n", (3924, 3959), False, 'from mridc.utils import logging\n'), ((6349, 6372), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['opt_name'], {}), '(opt_name)\n', (6362, 6372), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((9900, 9911), 'mridc.core.conf.optimizers.SGDParams', 'SGDParams', ([], {}), '()\n', (9909, 9911), False, 'from mridc.core.conf.optimizers import NovogradParams, SGDParams\n'), ((9944, 9960), 'mridc.core.conf.optimizers.NovogradParams', 'NovogradParams', ([], {}), '()\n', (9958, 9960), False, 'from mridc.core.conf.optimizers import NovogradParams, SGDParams\n'), ((10457, 10501), 'mridc.core.optim.lr_scheduler.get_scheduler', 'optim.lr_scheduler.get_scheduler', (['sched_name'], {}), '(sched_name)\n', (10489, 10501), False, 'from mridc.core import optim\n'), ((32589, 32878), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'max_epochs', 'strategy': '"""ddp_spawn"""', 'accelerator': '"""cpu"""', 'num_processes': 'num_processes', 'accumulate_grad_batches': 'accumulate_grad_batches', 'limit_train_batches': 'limit_train_batches', 'enable_checkpointing': '(False)', 'progress_bar_refresh_rate': '(0)', 'weights_summary': 'None'}), "(max_epochs=max_epochs, strategy='ddp_spawn', accelerator='cpu',\n num_processes=num_processes, accumulate_grad_batches=\n accumulate_grad_batches, limit_train_batches=limit_train_batches,\n enable_checkpointing=False, progress_bar_refresh_rate=0,\n weights_summary=None)\n", (32599, 32878), True, 'import pytorch_lightning as pl\n'), ((33045, 33194), 'mridc.core.optim.lr_scheduler.compute_max_steps', 'optim.lr_scheduler.compute_max_steps', (['max_epochs', 'accumulate_grad_batches', 'limit_train_batches', 'num_processes', 'dataset_len', 'batch_size', 'drop_last'], {}), '(max_epochs, accumulate_grad_batches,\n limit_train_batches, num_processes, dataset_len, batch_size, drop_last)\n', (33081, 33194), False, 'from mridc.core import optim\n'), ((34673, 34694), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (34687, 34694), False, 'import random\n'), ((34734, 34755), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (34748, 34755), False, 'import random\n'), ((34796, 34816), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (34810, 34816), False, 'import random\n'), ((34851, 34918), 'random.choice', 'random.choice', (['[limit_train_batches_int, limit_train_batches_float]'], {}), '([limit_train_batches_int, limit_train_batches_float])\n', (34864, 34918), False, 'import random\n'), ((34944, 34965), 'random.randint', 'random.randint', (['(4)', '(20)'], {}), '(4, 20)\n', (34958, 34965), False, 'import random\n'), ((34994, 35014), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (35008, 35014), False, 'import random\n'), ((35041, 35080), 'random.randint', 'random.randint', (['(20)', '(num_processes * 500)'], {}), '(20, num_processes * 500)\n', (35055, 35080), False, 'import random\n'), ((35906, 36193), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_steps': 'max_steps', 'strategy': '"""ddp_spawn"""', 'accelerator': '"""cpu"""', 'num_processes': 'num_processes', 'accumulate_grad_batches': 'accumulate_grad_batches', 'limit_train_batches': 'limit_train_batches', 'enable_checkpointing': '(False)', 'progress_bar_refresh_rate': '(0)', 'weights_summary': 'None'}), "(max_steps=max_steps, strategy='ddp_spawn', accelerator='cpu',\n num_processes=num_processes, accumulate_grad_batches=\n accumulate_grad_batches, limit_train_batches=limit_train_batches,\n enable_checkpointing=False, progress_bar_refresh_rate=0,\n weights_summary=None)\n", (35916, 36193), True, 'import pytorch_lightning as pl\n'), ((34613, 34633), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (34627, 34633), False, 'import random\n'), ((35138, 35168), 'math.ceil', 'math.ceil', (['(5.0 / num_processes)'], {}), '(5.0 / num_processes)\n', (35147, 35168), False, 'import math\n'), ((37001, 37012), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (37010, 37012), False, 'import os\n'), ((6275, 6300), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6298, 6300), False, 'import torch\n'), ((35174, 35217), 'numpy.floor_divide', 'np.floor_divide', (['dataset_len', 'num_processes'], {}), '(dataset_len, num_processes)\n', (35189, 35217), True, 'import numpy as np\n'), ((37073, 37084), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (37082, 37084), False, 'import os\n')]
|
#!/usr/bin/env python3
# Author: <NAME>
# INFO521 Homeword 3 Problem 6
import numpy as np
import matplotlib.pyplot as plt
# --------------------------------------------------
def true_function(x):
"""$t = 5x+x^2-0.5x^3$"""
return (5 * x) + x**2 - (0.5 * x**3)
# --------------------------------------------------
def sample_from_function(N=100, noise_var=1000, xmin=-5., xmax=5.):
""" Sample data from the true function.
N: Number of samples
Returns a noisy sample t_sample from the function
and the true function t. """
x = np.random.uniform(xmin, xmax, N)
t = true_function(x)
# add standard normal noise using np.random.randn
# (standard normal is a Gaussian N(0, 1.0) (i.e., mean 0, variance 1),
# so multiplying by np.sqrt(noise_var) make it N(0,standard_deviation))
t = t + np.random.randn(x.shape[0]) * np.sqrt(noise_var)
return x, t
# --------------------------------------------------
def main():
xmin = -4.
xmax = 5.
noise_var = 6
orders = [1, 3, 5, 9]
N = 25
num_samples = 20
# Make a set of N evenly-spaced x values between xmin and xmax
test_x = np.linspace(xmin, xmax, N)
true_y = true_function(test_x)
for i in orders:
plt.figure(0)
for _ in range(0, num_samples):
x, t = sample_from_function(
N=25, xmin=xmin, xmax=xmax, noise_var=noise_var)
X = np.zeros(shape=(x.shape[0], i + 1))
testX = np.zeros(shape=(test_x.shape[0], i + 1))
for k in range(i + 1):
X[:, k] = np.power(x, k)
testX[:, k] = np.power(test_x, k)
# fit model parameters
w = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, t))
# calculate predictions
prediction_t = np.dot(testX, w)
plt.plot(test_x, prediction_t, color='blue')
# Plot the true function in red so it will be visible
plt.plot(test_x, true_y, color='red', linewidth=3)
plt.xlabel('x')
plt.ylabel('t')
plt.title('Model order {} prediction of {}, $x \in [{},{}]$'.format(
i, true_function.__doc__, xmin, xmax))
plt.pause(.1) # required on some systems so that rendering can happen
outfile = 'model_bias-{}.png'.format(i)
plt.savefig(outfile, format='png')
plt.show()
# --------------------------------------------------
if __name__ == '__main__':
main()
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.power",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((571, 603), 'numpy.random.uniform', 'np.random.uniform', (['xmin', 'xmax', 'N'], {}), '(xmin, xmax, N)\n', (588, 603), True, 'import numpy as np\n'), ((1166, 1192), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'N'], {}), '(xmin, xmax, N)\n', (1177, 1192), True, 'import numpy as np\n'), ((1258, 1271), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1268, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1973, 2023), 'matplotlib.pyplot.plot', 'plt.plot', (['test_x', 'true_y'], {'color': '"""red"""', 'linewidth': '(3)'}), "(test_x, true_y, color='red', linewidth=3)\n", (1981, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2047), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2042, 2047), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {}), "('t')\n", (2066, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2222), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (2217, 2222), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2369), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'format': '"""png"""'}), "(outfile, format='png')\n", (2346, 2369), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2388), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2386, 2388), True, 'import matplotlib.pyplot as plt\n'), ((848, 875), 'numpy.random.randn', 'np.random.randn', (['x.shape[0]'], {}), '(x.shape[0])\n', (863, 875), True, 'import numpy as np\n'), ((878, 896), 'numpy.sqrt', 'np.sqrt', (['noise_var'], {}), '(noise_var)\n', (885, 896), True, 'import numpy as np\n'), ((1435, 1470), 'numpy.zeros', 'np.zeros', ([], {'shape': '(x.shape[0], i + 1)'}), '(shape=(x.shape[0], i + 1))\n', (1443, 1470), True, 'import numpy as np\n'), ((1491, 1531), 'numpy.zeros', 'np.zeros', ([], {'shape': '(test_x.shape[0], i + 1)'}), '(shape=(test_x.shape[0], i + 1))\n', (1499, 1531), True, 'import numpy as np\n'), ((1828, 1844), 'numpy.dot', 'np.dot', (['testX', 'w'], {}), '(testX, w)\n', (1834, 1844), True, 'import numpy as np\n'), ((1857, 1901), 'matplotlib.pyplot.plot', 'plt.plot', (['test_x', 'prediction_t'], {'color': '"""blue"""'}), "(test_x, prediction_t, color='blue')\n", (1865, 1901), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1607), 'numpy.power', 'np.power', (['x', 'k'], {}), '(x, k)\n', (1601, 1607), True, 'import numpy as np\n'), ((1638, 1657), 'numpy.power', 'np.power', (['test_x', 'k'], {}), '(test_x, k)\n', (1646, 1657), True, 'import numpy as np\n'), ((1748, 1762), 'numpy.dot', 'np.dot', (['X.T', 't'], {}), '(X.T, t)\n', (1754, 1762), True, 'import numpy as np\n'), ((1731, 1745), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (1737, 1745), True, 'import numpy as np\n')]
|
import os
import glob
import numpy as np
from datetime import datetime
from scipy.io import loadmat
from PIL import Image
np.random.seed(42)
def calc_age(taken, dob):
birth = datetime.fromordinal(max(int(dob) - 366, 1))
# assume the photo was taken in the middle of the year
if birth.month < 7:
return taken - birth.year
else:
return taken - birth.year - 1
def get_meta(mat_path, db):
meta = loadmat(mat_path)
full_path = meta[db][0, 0]["full_path"][0]
dob = meta[db][0, 0]["dob"][0] # Matlab serial date number
gender = meta[db][0, 0]["gender"][0]
photo_taken = meta[db][0, 0]["photo_taken"][0] # year
face_score = meta[db][0, 0]["face_score"][0]
second_face_score = meta[db][0, 0]["second_face_score"][0]
age = [calc_age(photo_taken[i], dob[i]) for i in range(len(dob))]
return full_path, dob, gender, photo_taken, face_score, second_face_score, age
def load_data(data_dir, db='imdb', split=0.1):
out_paths = []
out_ages = []
out_genders = []
db_names = db.split(',')
# Load utkface if need.
if 'utk' in db_names:
utk_dir = os.path.join(data_dir, 'utkface-new')
utk_paths, utk_ages, utk_genders = load_utk(utk_dir)
out_paths += utk_paths
out_ages += utk_ages
out_genders += utk_genders
for d in db_names:
image_dir = os.path.join(data_dir, '{}_crop'.format(d))
mat_path = os.path.join(image_dir, '{}.mat'.format(d))
full_path, dob, gender, photo_taken, face_score, second_face_score, age = get_meta(mat_path, d)
sample_num = len(face_score)
min_score = 1.
for i in range(sample_num):
if face_score[i] < min_score:
continue
if (~np.isnan(second_face_score[i])) and second_face_score[i] > 0.0:
continue
if ~(0 <= age[i] <= 100):
continue
if np.isnan(gender[i]):
continue
out_genders.append(int(gender[i]))
out_ages.append(age[i])
out_paths.append(os.path.join(image_dir, str(full_path[i][0])))
indices = np.arange(len(out_paths))
np.random.shuffle(indices)
out_paths = list(np.asarray(out_paths)[indices])
out_ages = list(np.asarray(out_ages)[indices])
out_genders = list(np.asarray(out_genders)[indices])
num_train = int(len(out_paths) * (1 - split))
train_paths, train_ages, train_genders = out_paths[:num_train], out_ages[:num_train], out_genders[:num_train]
val_paths, val_ages, val_genders = out_paths[num_train:], out_ages[num_train:], out_genders[num_train:]
return (train_paths, train_ages, train_genders), (val_paths, val_ages, val_genders)
def load_utk(data_dir):
"""Load UTKFace dataset."""
out_paths = []
out_ages = []
out_genders = []
paths = glob.glob(os.path.join(data_dir, 'crop_part1', '*'))
for path in paths:
filename = os.path.basename(path)
out_paths.append(path)
age, gender = filename.split('_')[:2]
age = int(age)
gender = 1 if int(gender) == 0 else 0
out_ages.append(age)
out_genders.append(gender)
return out_paths, out_ages, out_genders
def load_appa(data_dir, ignore_list_filename=None):
"""Load APPA-real dataset."""
out_paths = []
out_ages = []
ignore_filenames = set()
if ignore_list_filename is not None:
ignore_list_path = os.path.join(data_dir, ignore_list_filename)
ignore_filenames = set(x.strip() for x in open(ignore_list_path))
data_file = os.path.join(data_dir, 'gt_avg_train.csv')
image_dir = os.path.join(data_dir, 'train')
with open(data_file) as f:
lines = [x.strip() for x in f]
for line in lines[1:]:
filename, _, _, _, age = line.strip().split(',')
if filename in ignore_filenames:
continue
image_path = os.path.join(image_dir, filename + '_face.jpg')
age = int(age)
out_paths.append(image_path)
out_ages.append(age)
return out_paths, out_ages
def load_aligned_data(data_dir, split=0.1):
out_paths = []
out_ages = []
out_genders = []
paths = glob.glob(os.path.join(data_dir, '*'))
for path in paths:
filename = os.path.basename(path)
age, gender = filename.split('_')[-2:]
gender = gender.split('.')[0]
age = int(age)
gender = int(gender)
out_paths.append(path)
out_ages.append(age)
out_genders.append(gender)
indices = np.arange(len(out_paths))
np.random.shuffle(indices)
out_paths = np.asarray(out_paths)[indices]
out_ages = np.asarray(out_ages)[indices]
out_genders = np.asarray(out_genders)[indices]
num_train = int(len(out_paths) * (1 - split))
train_paths, train_ages, train_genders = out_paths[:num_train], out_ages[:num_train], out_genders[:num_train]
val_paths, val_ages, val_genders = out_paths[num_train:], out_ages[num_train:], out_genders[num_train:]
return (train_paths, train_ages, train_genders), (val_paths, val_ages, val_genders)
|
[
"numpy.random.seed",
"os.path.basename",
"scipy.io.loadmat",
"numpy.asarray",
"numpy.isnan",
"os.path.join",
"numpy.random.shuffle"
] |
[((124, 142), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (138, 142), True, 'import numpy as np\n'), ((435, 452), 'scipy.io.loadmat', 'loadmat', (['mat_path'], {}), '(mat_path)\n', (442, 452), False, 'from scipy.io import loadmat\n'), ((2190, 2216), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2207, 2216), True, 'import numpy as np\n'), ((3601, 3643), 'os.path.join', 'os.path.join', (['data_dir', '"""gt_avg_train.csv"""'], {}), "(data_dir, 'gt_avg_train.csv')\n", (3613, 3643), False, 'import os\n'), ((3660, 3691), 'os.path.join', 'os.path.join', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (3672, 3691), False, 'import os\n'), ((4629, 4655), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (4646, 4655), True, 'import numpy as np\n'), ((1139, 1176), 'os.path.join', 'os.path.join', (['data_dir', '"""utkface-new"""'], {}), "(data_dir, 'utkface-new')\n", (1151, 1176), False, 'import os\n'), ((2879, 2920), 'os.path.join', 'os.path.join', (['data_dir', '"""crop_part1"""', '"""*"""'], {}), "(data_dir, 'crop_part1', '*')\n", (2891, 2920), False, 'import os\n'), ((2964, 2986), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2980, 2986), False, 'import os\n'), ((3465, 3509), 'os.path.join', 'os.path.join', (['data_dir', 'ignore_list_filename'], {}), '(data_dir, ignore_list_filename)\n', (3477, 3509), False, 'import os\n'), ((4257, 4284), 'os.path.join', 'os.path.join', (['data_dir', '"""*"""'], {}), "(data_dir, '*')\n", (4269, 4284), False, 'import os\n'), ((4328, 4350), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (4344, 4350), False, 'import os\n'), ((4672, 4693), 'numpy.asarray', 'np.asarray', (['out_paths'], {}), '(out_paths)\n', (4682, 4693), True, 'import numpy as np\n'), ((4718, 4738), 'numpy.asarray', 'np.asarray', (['out_ages'], {}), '(out_ages)\n', (4728, 4738), True, 'import numpy as np\n'), ((4766, 4789), 'numpy.asarray', 'np.asarray', (['out_genders'], {}), '(out_genders)\n', (4776, 4789), True, 'import numpy as np\n'), ((1939, 1958), 'numpy.isnan', 'np.isnan', (['gender[i]'], {}), '(gender[i])\n', (1947, 1958), True, 'import numpy as np\n'), ((2238, 2259), 'numpy.asarray', 'np.asarray', (['out_paths'], {}), '(out_paths)\n', (2248, 2259), True, 'import numpy as np\n'), ((2290, 2310), 'numpy.asarray', 'np.asarray', (['out_ages'], {}), '(out_ages)\n', (2300, 2310), True, 'import numpy as np\n'), ((2344, 2367), 'numpy.asarray', 'np.asarray', (['out_genders'], {}), '(out_genders)\n', (2354, 2367), True, 'import numpy as np\n'), ((3949, 3996), 'os.path.join', 'os.path.join', (['image_dir', "(filename + '_face.jpg')"], {}), "(image_dir, filename + '_face.jpg')\n", (3961, 3996), False, 'import os\n'), ((1770, 1800), 'numpy.isnan', 'np.isnan', (['second_face_score[i]'], {}), '(second_face_score[i])\n', (1778, 1800), True, 'import numpy as np\n')]
|
import numpy as np
from fym.core import BaseEnv, BaseSystem
from fym.utils import rot
def hat(v):
v1, v2, v3 = v.squeeze()
return np.array([
[0, -v3, v2],
[v3, 0, -v1],
[-v2, v1, 0]
])
class Quadrotor(BaseEnv):
"""
Prof. <NAME>'s model for quadrotor UAV is used.
- https://www.math.ucsd.edu/~mleok/pdf/LeLeMc2010_quadrotor.pdf
Description:
- an NED frame is used for the inertia and body fixed frame.
Hence, `+z` direction is downward.
- ``pos`` and ``vel`` are resolved in the inertial frame,
whereas ``R`` and ``omega`` are resolved in the body frame
- ``fis`` is a vector of thrusts generated by the rotors.
Variables:
R: SO(3)
The rotation matrix from the body-fixed frame to the inertial frame
R = C_{i/b} = C_{b/i}^T
"""
g = 9.81 # m/s^2
e3 = np.vstack((0, 0, 1))
J = np.diag([0.0820, 0.0845, 0.1377])
m = 4.34 # Mass
d = 0.315 # The distance from the center of mass to the center of each rotor
ctf = 8.004e-4 # The torque coefficient. ``torque_i = (-1)^i ctf f_i``
B = np.array(
[[1, 1, 1, 1],
[0, -d, 0, d],
[d, 0, -d, 0],
[-ctf, ctf, -ctf, ctf]]
)
Binv = np.linalg.pinv(B)
name = "quadrotor"
def __init__(self,
pos=np.zeros((3, 1)),
vel=np.zeros((3, 1)),
R=np.eye(3),
omega=np.zeros((3, 1)),
config="Quadrotor"):
super().__init__()
self.pos = BaseSystem(pos)
self.vel = BaseSystem(vel)
self.R = BaseSystem(R)
self.omega = BaseSystem(omega)
def deriv(self, pos, vel, R, omega, fis):
m, g, J, e3 = self.m, self.g, self.J, self.e3
f, *M = self.fis2fM(fis)
M = np.vstack(M)
dpos = vel
dvel = g * e3 - f * R @ e3 / m
dR = R @ hat(omega)
domega = np.linalg.inv(J).dot(M - np.cross(omega, J.dot(omega), axis=0))
return dpos, dvel, dR, domega
def set_dot(self, t, fis):
pos, vel, R, omega = self.observe_list()
dots = self.deriv(pos, vel, R, omega, fis)
self.pos.dot, self.vel.dot, self.R.dot, self.omega.dot = dots
def fis2fM(self, fis):
"""Convert f_i's to force and moments
Parameters:
fis: (4, 1) array
Return:
f, M1, M2, M3: (4,) array of force and moments
"""
return (self.B @ fis).ravel()
def fM2fis(self, f, M1, M2, M3):
"""Convert force and moments to f_i's
Parameters:
f: scalar, the total thrust
M1, M2, M3: scalars, the moments
Return:
fis: (4, 1) array of f_i's
"""
return self.Binv @ np.vstack((f, M1, M2, M3))
def angle2R(self, angle):
"""angle: phi, theta, psi in radian"""
return rot.angle2dcm(*np.ravel(angle)[::-1]).T
def R2angle(self, R):
"""angle: phi, theta, psi in radian"""
return rot.dcm2angle(R.T)[::-1]
|
[
"numpy.eye",
"fym.core.BaseSystem",
"numpy.ravel",
"numpy.zeros",
"fym.utils.rot.dcm2angle",
"numpy.array",
"numpy.linalg.inv",
"numpy.diag",
"numpy.linalg.pinv",
"numpy.vstack"
] |
[((141, 193), 'numpy.array', 'np.array', (['[[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]]'], {}), '([[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]])\n', (149, 193), True, 'import numpy as np\n'), ((897, 917), 'numpy.vstack', 'np.vstack', (['(0, 0, 1)'], {}), '((0, 0, 1))\n', (906, 917), True, 'import numpy as np\n'), ((926, 958), 'numpy.diag', 'np.diag', (['[0.082, 0.0845, 0.1377]'], {}), '([0.082, 0.0845, 0.1377])\n', (933, 958), True, 'import numpy as np\n'), ((1147, 1225), 'numpy.array', 'np.array', (['[[1, 1, 1, 1], [0, -d, 0, d], [d, 0, -d, 0], [-ctf, ctf, -ctf, ctf]]'], {}), '([[1, 1, 1, 1], [0, -d, 0, d], [d, 0, -d, 0], [-ctf, ctf, -ctf, ctf]])\n', (1155, 1225), True, 'import numpy as np\n'), ((1278, 1295), 'numpy.linalg.pinv', 'np.linalg.pinv', (['B'], {}), '(B)\n', (1292, 1295), True, 'import numpy as np\n'), ((1365, 1381), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1373, 1381), True, 'import numpy as np\n'), ((1404, 1420), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1412, 1420), True, 'import numpy as np\n'), ((1441, 1450), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1447, 1450), True, 'import numpy as np\n'), ((1475, 1491), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1483, 1491), True, 'import numpy as np\n'), ((1577, 1592), 'fym.core.BaseSystem', 'BaseSystem', (['pos'], {}), '(pos)\n', (1587, 1592), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1612, 1627), 'fym.core.BaseSystem', 'BaseSystem', (['vel'], {}), '(vel)\n', (1622, 1627), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1645, 1658), 'fym.core.BaseSystem', 'BaseSystem', (['R'], {}), '(R)\n', (1655, 1658), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1680, 1697), 'fym.core.BaseSystem', 'BaseSystem', (['omega'], {}), '(omega)\n', (1690, 1697), False, 'from fym.core import BaseEnv, BaseSystem\n'), ((1845, 1857), 'numpy.vstack', 'np.vstack', (['M'], {}), '(M)\n', (1854, 1857), True, 'import numpy as np\n'), ((2798, 2824), 'numpy.vstack', 'np.vstack', (['(f, M1, M2, M3)'], {}), '((f, M1, M2, M3))\n', (2807, 2824), True, 'import numpy as np\n'), ((3047, 3065), 'fym.utils.rot.dcm2angle', 'rot.dcm2angle', (['R.T'], {}), '(R.T)\n', (3060, 3065), False, 'from fym.utils import rot\n'), ((1962, 1978), 'numpy.linalg.inv', 'np.linalg.inv', (['J'], {}), '(J)\n', (1975, 1978), True, 'import numpy as np\n'), ((2933, 2948), 'numpy.ravel', 'np.ravel', (['angle'], {}), '(angle)\n', (2941, 2948), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from skimage.measure import compare_psnr as ski_psnr
from skimage.measure import compare_ssim as ski_ssim
import os
import csv
import logging
from model import Network
import torch.nn.functional as F
from data_load_own import get_training_set, get_test_set
from data_load_mix import get_dataset_deform
import utils
class CNN_train():
def __init__(self, dataset_name, imgSize=63, batchsize=32):
self.imgSize = imgSize
self.batchsize = batchsize
self.dataset_name = dataset_name
# load dataset
if dataset_name == 'mix' or dataset_name == 'yourdata':
if dataset_name == 'mix':
self.num_work = 8
train_dir = '/dataset/train/'
val_dir = '/dataset/val/'
test_dir = '/dataset/test/'
train_set = get_dataset_deform(train_dir, val_dir, test_dir, 0)
val_set = get_dataset_deform(train_dir, val_dir, test_dir, 1)
# test_set = get_dataset_deform(train_dir, val_dir, test_dir, 2)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, pin_memory=True)
self.val_loader = DataLoader(dataset=val_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
# self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False, pin_memory=False)
elif dataset_name == 'yourdata':
self.num_work = 8
# Specify the path of your data
train_input_dir = '/dataset/yourdata_train/input/'
train_target_dir = '/dataset/yourdata_train/target/'
test_input_dir = '/dataset/yourdata_test/input/'
test_target_dir = '/dataset/yourdata_test/target/'
train_set = get_training_set(train_input_dir, train_target_dir, True)
test_set = get_training_set(test_input_dir, test_target_dir, False)
self.dataloader = DataLoader(dataset=train_set, num_workers=self.num_work, batch_size=self.batchsize, shuffle=True, drop_last=True)
self.test_dataloader = DataLoader(dataset=test_set, num_workers=self.num_work, batch_size=1, shuffle=False)
else:
print('\tInvalid input dataset name at CNN_train()')
exit(1)
def __call__(self, cgp, gpuID, epoch_num=150, gpu_num=1):
print('GPUID :', gpuID)
print('epoch_num:', epoch_num)
# define model
torch.manual_seed(2018)
torch.cuda.manual_seed(2018)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
L1_loss = nn.L1Loss()
L1_loss = L1_loss.cuda(gpuID)
model = Network(16, 10, L1_loss, gpuID=gpuID)
if gpu_num > 1:
device_ids = [i for i in range(gpu_num)]
model = torch.nn.DataParallel(model, device_ids=device_ids)
model = model.cuda(gpuID)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
print('Param:', utils.count_parameters_in_MB(model))
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epoch_num)
test_interval = 5
# for output images
if not os.path.exists('./results'):
os.makedirs('./results/Inputs')
os.makedirs('./results/Outputs')
os.makedirs('./results/Targets')
# Train loop
for epoch in range(1, epoch_num+1):
scheduler.step()
start_time = time.time()
print('epoch', epoch)
train_loss = 0
for module in model.children():
module.train(True)
for ite, (input, target) in enumerate(self.dataloader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
optimizer.zero_grad()
output = model(lr_patch)
l1_loss = L1_loss(output, hr_patch)
l1_loss.backward()
optimizer.step()
train_loss += l1_loss.item()
if ite % 500 == 0:
vutils.save_image(lr_patch.data, './input_sample%d.png' % gpuID, normalize=False)
vutils.save_image(hr_patch.data, './target_sample%d.png' % gpuID, normalize=False)
vutils.save_image(output.data, './output_sample%d.png' % gpuID, normalize=False)
print('Train set : Average loss: {:.4f}'.format(train_loss))
print('time ', time.time()-start_time)
# check val/test performance
if epoch % test_interval == 0:
with torch.no_grad():
print('------------------------')
for module in model.children():
module.train(False)
test_psnr = 0
test_ssim = 0
eps = 1e-10
test_ite = 0
for _, (input, target) in enumerate(self.val_loader):
lr_patch = Variable(input, requires_grad=False).cuda(gpuID)
hr_patch = Variable(target, requires_grad=False).cuda(gpuID)
output = model(lr_patch)
# save images
vutils.save_image(output.data, './results/Outputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(lr_patch.data, './results/Inputs/%05d.png' % (int(i)), padding=0, normalize=False)
vutils.save_image(hr_patch.data, './results/Targets/%05d.png' % (int(i)), padding=0, normalize=False)
# Calculation of SSIM and PSNR values
output = output.data.cpu().numpy()[0]
output[output>1] = 1
output[output<0] = 0
output = output.transpose((1,2,0))
hr_patch = hr_patch.data.cpu().numpy()[0]
hr_patch[hr_patch>1] = 1
hr_patch[hr_patch<0] = 0
hr_patch = hr_patch.transpose((1,2,0))
# SSIM
test_ssim+= ski_ssim(output, hr_patch, data_range=1, multichannel=True)
# PSNR
imdf = (output - hr_patch) ** 2
mse = np.mean(imdf) + eps
test_psnr+= 10 * math.log10(1.0/mse)
test_ite += 1
test_psnr /= (test_ite)
test_ssim /= (test_ite)
print('Valid PSNR: {:.4f}'.format(test_psnr))
print('Valid SSIM: {:.4f}'.format(test_ssim))
f = open('PSNR.txt', 'a')
writer = csv.writer(f, lineterminator='\n')
writer.writerow([epoch, test_psnr, test_ssim])
f.close()
print('------------------------')
torch.save(model.state_dict(), './model_%d.pth' % int(epoch))
return train_loss
|
[
"numpy.mean",
"torch.no_grad",
"data_load_mix.get_dataset_deform",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"math.log10",
"skimage.measure.compare_ssim",
"utils.count_parameters_in_MB",
"csv.writer",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.cuda.manual_seed",
"torchvision.utils.save_image",
"data_load_own.get_training_set",
"os.makedirs",
"torch.nn.L1Loss",
"time.time",
"model.Network",
"torch.nn.DataParallel"
] |
[((2975, 2998), 'torch.manual_seed', 'torch.manual_seed', (['(2018)'], {}), '(2018)\n', (2992, 2998), False, 'import torch\n'), ((3007, 3035), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(2018)'], {}), '(2018)\n', (3029, 3035), False, 'import torch\n'), ((3144, 3155), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (3153, 3155), True, 'import torch.nn as nn\n'), ((3210, 3247), 'model.Network', 'Network', (['(16)', '(10)', 'L1_loss'], {'gpuID': 'gpuID'}), '(16, 10, L1_loss, gpuID=gpuID)\n', (3217, 3247), False, 'from model import Network\n'), ((3672, 3736), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': 'epoch_num'}), '(optimizer, T_max=epoch_num)\n', (3708, 3736), True, 'import torch.optim as optim\n'), ((3345, 3396), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (3366, 3396), False, 'import torch\n'), ((3473, 3508), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (3501, 3508), False, 'import utils\n'), ((3534, 3569), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (3562, 3569), False, 'import utils\n'), ((3806, 3833), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (3820, 3833), False, 'import os\n'), ((3847, 3878), 'os.makedirs', 'os.makedirs', (['"""./results/Inputs"""'], {}), "('./results/Inputs')\n", (3858, 3878), False, 'import os\n'), ((3891, 3923), 'os.makedirs', 'os.makedirs', (['"""./results/Outputs"""'], {}), "('./results/Outputs')\n", (3902, 3923), False, 'import os\n'), ((3936, 3968), 'os.makedirs', 'os.makedirs', (['"""./results/Targets"""'], {}), "('./results/Targets')\n", (3947, 3968), False, 'import os\n'), ((4089, 4100), 'time.time', 'time.time', ([], {}), '()\n', (4098, 4100), False, 'import time\n'), ((1222, 1273), 'data_load_mix.get_dataset_deform', 'get_dataset_deform', (['train_dir', 'val_dir', 'test_dir', '(0)'], {}), '(train_dir, val_dir, test_dir, 0)\n', (1240, 1273), False, 'from data_load_mix import get_dataset_deform\n'), ((1300, 1351), 'data_load_mix.get_dataset_deform', 'get_dataset_deform', (['train_dir', 'val_dir', 'test_dir', '(1)'], {}), '(train_dir, val_dir, test_dir, 1)\n', (1318, 1351), False, 'from data_load_mix import get_dataset_deform\n'), ((1467, 1586), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'num_workers': 'self.num_work', 'batch_size': 'self.batchsize', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset=train_set, num_workers=self.num_work, batch_size=self.\n batchsize, shuffle=True, pin_memory=True)\n', (1477, 1586), False, 'from torch.utils.data import DataLoader\n'), ((1616, 1721), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'val_set', 'num_workers': 'self.num_work', 'batch_size': '(1)', 'shuffle': '(False)', 'pin_memory': '(False)'}), '(dataset=val_set, num_workers=self.num_work, batch_size=1,\n shuffle=False, pin_memory=False)\n', (1626, 1721), False, 'from torch.utils.data import DataLoader\n'), ((2285, 2342), 'data_load_own.get_training_set', 'get_training_set', (['train_input_dir', 'train_target_dir', '(True)'], {}), '(train_input_dir, train_target_dir, True)\n', (2301, 2342), False, 'from data_load_own import get_training_set, get_test_set\n'), ((2370, 2426), 'data_load_own.get_training_set', 'get_training_set', (['test_input_dir', 'test_target_dir', '(False)'], {}), '(test_input_dir, test_target_dir, False)\n', (2386, 2426), False, 'from data_load_own import get_training_set, get_test_set\n'), ((2461, 2579), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'num_workers': 'self.num_work', 'batch_size': 'self.batchsize', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset=train_set, num_workers=self.num_work, batch_size=self.\n batchsize, shuffle=True, drop_last=True)\n', (2471, 2579), False, 'from torch.utils.data import DataLoader\n'), ((2614, 2702), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'num_workers': 'self.num_work', 'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset=test_set, num_workers=self.num_work, batch_size=1,\n shuffle=False)\n', (2624, 2702), False, 'from torch.utils.data import DataLoader\n'), ((4761, 4847), 'torchvision.utils.save_image', 'vutils.save_image', (['lr_patch.data', "('./input_sample%d.png' % gpuID)"], {'normalize': '(False)'}), "(lr_patch.data, './input_sample%d.png' % gpuID, normalize=\n False)\n", (4778, 4847), True, 'import torchvision.utils as vutils\n'), ((4863, 4950), 'torchvision.utils.save_image', 'vutils.save_image', (['hr_patch.data', "('./target_sample%d.png' % gpuID)"], {'normalize': '(False)'}), "(hr_patch.data, './target_sample%d.png' % gpuID, normalize\n =False)\n", (4880, 4950), True, 'import torchvision.utils as vutils\n'), ((4966, 5051), 'torchvision.utils.save_image', 'vutils.save_image', (['output.data', "('./output_sample%d.png' % gpuID)"], {'normalize': '(False)'}), "(output.data, './output_sample%d.png' % gpuID, normalize=False\n )\n", (4983, 5051), True, 'import torchvision.utils as vutils\n'), ((5147, 5158), 'time.time', 'time.time', ([], {}), '()\n', (5156, 5158), False, 'import time\n'), ((5289, 5304), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5302, 5304), False, 'import torch\n'), ((7452, 7486), 'csv.writer', 'csv.writer', (['f'], {'lineterminator': '"""\n"""'}), "(f, lineterminator='\\n')\n", (7462, 7486), False, 'import csv\n'), ((4336, 4372), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(False)'}), '(input, requires_grad=False)\n', (4344, 4372), False, 'from torch.autograd import Variable\n'), ((4412, 4449), 'torch.autograd.Variable', 'Variable', (['target'], {'requires_grad': '(False)'}), '(target, requires_grad=False)\n', (4420, 4449), False, 'from torch.autograd import Variable\n'), ((6861, 6920), 'skimage.measure.compare_ssim', 'ski_ssim', (['output', 'hr_patch'], {'data_range': '(1)', 'multichannel': '(True)'}), '(output, hr_patch, data_range=1, multichannel=True)\n', (6869, 6920), True, 'from skimage.measure import compare_ssim as ski_ssim\n'), ((7038, 7051), 'numpy.mean', 'np.mean', (['imdf'], {}), '(imdf)\n', (7045, 7051), True, 'import numpy as np\n'), ((7099, 7120), 'math.log10', 'math.log10', (['(1.0 / mse)'], {}), '(1.0 / mse)\n', (7109, 7120), False, 'import math\n'), ((5698, 5734), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(False)'}), '(input, requires_grad=False)\n', (5706, 5734), False, 'from torch.autograd import Variable\n'), ((5782, 5819), 'torch.autograd.Variable', 'Variable', (['target'], {'requires_grad': '(False)'}), '(target, requires_grad=False)\n', (5790, 5819), False, 'from torch.autograd import Variable\n')]
|
"""
Script goal,
Open land cover data and build a simple cover map
"""
#==============================================================================
__title__ = "LandCover"
__author__ = "<NAME>"
__version__ = "v1.0(12.03.2021)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
# import geopandas as gpd
import argparse
import datetime as dt
import warnings as warn
import xarray as xr
import bottleneck as bn
import scipy as sp
import glob
import shutil
import time
from dask.diagnostics import ProgressBar
import rasterio
from collections import OrderedDict
# from scipy import stats
# from numba import jit
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
# import seaborn as sns
import matplotlib as mpl
import cartopy.crs as ccrs
import cartopy.feature as cpf
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import make_axes_locatable
import socket
# ========== Import my dunctions ==========
import myfunctions.corefunctions as cf
import myfunctions.PlotFunctions as pf
# import cartopy.feature as cpf
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# # Import debugging packages
# import pdb as ipdb
# import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
# ========== Setup the broad infomation
region = "SIBERIA"
box = [-10.0, 180, 40, 70]
# ========== Load in the different data from glc ==========
path = "./data/LandCover/"
# years = [2000, 2010]
legendfn = [f"{path}glc2000_v1_1/Tiff/Global_Legend.csv", f"{path}gez2010/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv"]
# geotiffn = [f"{path}glc2000_v1_1/Tiff/glc2000_v1_1.tif", f"{path}gez2010/OUTPUT.tif", f"{path}gez2010/IsBorealV3.tif"]
Down = ["MODIS", "esacci", "COPERN_BA"]
res = ["MODIS", "GFED", "TerraClimate", ] #"COPERN_BA", "esacci",
force = False
for dsres in res:
fnout = f"{path}Regridded_forestzone_{dsres}.nc"
if os.path.isfile(fnout) and not force:
print(f"{dsres} has an existing file")
continue
else:
print(dsres)
dataname = ["LandCover", "GlobalEcologicalZones", "DinersteinRegions", "BorealMask"]
if dsres in Down:
datares = "MODIS"
else:
datares = dsres
geotiffn = [f"{path}glc2000_v1_1/Tiff/glc2000_v1_1.tif", f"{path}Dinerstein_Aggregated/Masks/Boreal_climatic_{datares}.tif", f"{path}Dinerstein_Aggregated/Masks/BorealEco_2017_{datares}.tif", f"{path}Dinerstein_Aggregated/Masks/Boreal_buf_{datares}.tif"]
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsres)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
mask = ds_msk.datamask
# out_dic = OrderedDict()
outlist = []
key_dic = OrderedDict()
for dsnx, legfn, tiffn in zip(dataname, legendfn, geotiffn):
print(dsnx)
# +++++ open the dataarray +++++
key_dic[dsnx] = pd.read_csv(legfn)
da = xr.open_rasterio(tiffn).transpose("y", "x", "band").rename({"x":"longitude", "y":"latitude", "band":"time"}).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
da["time"] = [pd.Timestamp("2018-12-31")]
if da.longitude.shape > ds_msk.longitude.shape:
print(da.latitude.shape[0], ds_msk.latitude.shape[0])
print ("Coarsnening data started at: ", pd.Timestamp.now())
# breakpoint()
# Coarsen/ downscale
latscale = int(da.latitude.shape[0] / ds_msk.latitude.shape[0])
lonscale = int(da.longitude.shape[0] / ds_msk.longitude.shape[0])
da = da.coarsen(latitude=latscale, longitude=lonscale, boundary ="pad").median()
da = da.round()
da = da.reindex_like(mask, method="nearest")
delay = xr.Dataset({dsnx:da}).to_netcdf(f"/tmp/{dsres}_{dsnx}.nc", format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Creating temp netcdf for {dsres} {dsnx} at: {pd.Timestamp.now()}")
with ProgressBar():
delay.compute()
# out_dic[dsnx]
outlist.append(f"/tmp/{dsres}_{dsnx}.nc")
da = None
# ========== get the FAO climate zones ==========
# ds = xr.Dataset(out_dic)
ds = xr.open_mfdataset(outlist).transpose('time', 'latitude', 'longitude')
# breakpoint()
GlobalAttributes(ds, dsres, fnameout=fnout)
delayed_obj = ds.to_netcdf(fnout, format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Starting write of {dsres} data at: {pd.Timestamp.now()}")
with ProgressBar():
results = delayed_obj.compute()
print(f"{dsres} completed at: {pd.Timestamp.now()}")
if dsres == "MODIS":
for dsin in ["esacci", "COPERN_BA"]:
print(dsin)
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsin)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1]))).chunk()
mask = ds_msk.datamask
ds_out = ds.reindex_like(mask, method="nearest")
fnout = f"{path}Regridded_forestzone_{dsin}.nc"
delayed_obj = ds_out.to_netcdf(fnout, format = 'NETCDF4', unlimited_dims = ["time"], compute=False)
print(f"Starting write of {dsin} data at: {pd.Timestamp.now()}")
with ProgressBar():
results = delayed_obj.compute()
# breakpoint()
breakpoint()
for dsn in ["TerraClimate","GFED", "MODIS", "esacci", "COPERN_BA"]:
print(dsn)
mskfn = "./data/masks/broad/Hansen_GFC-2018-v1.6_%s_ProcessedTo%s.nc" % (region, dsn)
ds_msk = xr.open_dataset(mskfn).sel(dict(latitude=slice(box[3], box[2]), longitude=slice(box[0], box[1])))
# ds_mod = ds.reindex_like(ds_msk, method="nearest")
# mask = ds_msk.datamask
# # mask = ds_msk.datamask.reindex_like(ds, method="nearest")
# # boreal mask
# title = "FAO Boreal Zone"
# plotmaker(ds_mod.Boreal, title, mask)
# # Tree cover mask
# title = "Needle Leaf Tree species"
# plotmaker(((ds_mod.LandCover == 4)+(ds_mod.LandCover == 5)), title, mask)
# title = "Needle Leaf and mixed fores"
# plotmaker(((ds_mod.LandCover == 6)+(ds_mod.LandCover == 4)+(ds_mod.LandCover == 5)), title, mask)
# title = "Broadleaf forest"
# plotmaker(((ds_mod.LandCover == 1)+(ds_mod.LandCover == 2)+(ds_mod.LandCover == 3)), title, mask)
breakpoint()
breakpoint()
#==============================================================================
# def _lookupkeys():
# dataname = ["LandCover", "GlobalEcologicalZones", "DinersteinRegions", "BorealMask"]
# legendfn = ([f"{path}glc2000_v1_1/Tiff/Global_Legend.csv", f"{path}gez2010/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv", f"{path}Dinerstein_Aggregated/Lookup.csv"])
# for nm, lfn in zip(dataname, legendfn)
def GlobalAttributes(ds, dsn, fnameout=""):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
fnout: str
filename out
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
if ds is None:
attr = OrderedDict()
else:
attr = ds.attrs
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["FileName"] = fnameout
attr["title"] = "Datamasks"
attr["summary"] = "BorealForestCovermaks_%sData" % (dsn)
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s. FRI caluculated using %s data" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__, dsn)
if not ds is None:
attr["history"] += ds.history
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["Institution"] = "Woodwell"
attr["date_created"] = str(pd.Timestamp.now())
ds.longitude.attrs['units'] = 'degrees_east'
ds.latitude.attrs['units'] = 'degrees_north'
# ++++++++++ Netcdf Summary infomation ++++++++++
# attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1))
# attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31))
return attr
def _mode(da):
vals = sp.stats.mode(da, axis=None, nan_policy="omit")
return vals[0][0]
def plotmaker(ds_in, title, mask):
# breakpoint()
latiMid=np.mean([70.0, 40.0])
longMid=np.mean([-10.0, 180.0])
fig, ax = plt.subplots(1, 1, figsize=(20,12), subplot_kw={'projection': ccrs.Orthographic(longMid, latiMid)})
ds_in.where(mask==1).plot(transform=ccrs.PlateCarree(), ax=ax)
coast = cpf.GSHHSFeature(scale="intermediate")
ax.add_feature(cpf.BORDERS, linestyle='--', zorder=102)
ax.add_feature(cpf.LAND, facecolor='dimgrey', alpha=1, zorder=0)
ax.add_feature(coast, zorder=101, alpha=0.5)
# coast_50m = cpf.GSHHSFeature(scale="high")
ax.add_feature(cpf.OCEAN, facecolor="w", alpha=1, zorder=100)
ax.set_title(f"{title}")
plt.show()
#==============================================================================
if __name__ == '__main__':
main()
|
[
"pandas.Timestamp",
"matplotlib.pyplot.show",
"scipy.stats.mode",
"os.getcwd",
"pandas.read_csv",
"xarray.open_rasterio",
"cartopy.feature.GSHHSFeature",
"cartopy.crs.PlateCarree",
"xarray.open_dataset",
"dask.diagnostics.ProgressBar",
"xarray.Dataset",
"os.path.isfile",
"numpy.mean",
"pandas.Timestamp.now",
"collections.OrderedDict",
"cartopy.crs.Orthographic",
"xarray.open_mfdataset",
"os.chdir"
] |
[((705, 716), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (714, 716), False, 'import os\n'), ((9710, 9757), 'scipy.stats.mode', 'sp.stats.mode', (['da'], {'axis': 'None', 'nan_policy': '"""omit"""'}), "(da, axis=None, nan_policy='omit')\n", (9723, 9757), True, 'import scipy as sp\n'), ((9855, 9876), 'numpy.mean', 'np.mean', (['[70.0, 40.0]'], {}), '([70.0, 40.0])\n', (9862, 9876), True, 'import numpy as np\n'), ((9887, 9910), 'numpy.mean', 'np.mean', (['[-10.0, 180.0]'], {}), '([-10.0, 180.0])\n', (9894, 9910), True, 'import numpy as np\n'), ((10100, 10138), 'cartopy.feature.GSHHSFeature', 'cpf.GSHHSFeature', ([], {'scale': '"""intermediate"""'}), "(scale='intermediate')\n", (10116, 10138), True, 'import cartopy.feature as cpf\n'), ((10451, 10461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10459, 10461), True, 'import matplotlib.pyplot as plt\n'), ((503, 514), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (512, 514), False, 'import os\n'), ((570, 587), 'os.chdir', 'os.chdir', (['(p1 + p2)'], {}), '(p1 + p2)\n', (578, 587), False, 'import os\n'), ((3897, 3910), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3908, 3910), False, 'from collections import OrderedDict\n'), ((8401, 8414), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8412, 8414), False, 'from collections import OrderedDict\n'), ((9334, 9352), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (9350, 9352), True, 'import pandas as pd\n'), ((448, 459), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (457, 459), False, 'import os\n'), ((3061, 3082), 'os.path.isfile', 'os.path.isfile', (['fnout'], {}), '(fnout)\n', (3075, 3082), False, 'import os\n'), ((4048, 4066), 'pandas.read_csv', 'pd.read_csv', (['legfn'], {}), '(legfn)\n', (4059, 4066), True, 'import pandas as pd\n'), ((5602, 5615), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (5613, 5615), False, 'from dask.diagnostics import ProgressBar\n'), ((10063, 10081), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (10079, 10081), True, 'import cartopy.crs as ccrs\n'), ((532, 543), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (541, 543), False, 'import os\n'), ((4298, 4324), 'pandas.Timestamp', 'pd.Timestamp', (['"""2018-12-31"""'], {}), "('2018-12-31')\n", (4310, 4324), True, 'import pandas as pd\n'), ((5071, 5084), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (5082, 5084), False, 'from dask.diagnostics import ProgressBar\n'), ((5287, 5313), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['outlist'], {}), '(outlist)\n', (5304, 5313), True, 'import xarray as xr\n'), ((6618, 6640), 'xarray.open_dataset', 'xr.open_dataset', (['mskfn'], {}), '(mskfn)\n', (6633, 6640), True, 'import xarray as xr\n'), ((8977, 8995), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (8993, 8995), True, 'import pandas as pd\n'), ((9987, 10022), 'cartopy.crs.Orthographic', 'ccrs.Orthographic', (['longMid', 'latiMid'], {}), '(longMid, latiMid)\n', (10004, 10022), True, 'import cartopy.crs as ccrs\n'), ((4482, 4500), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (4498, 4500), True, 'import pandas as pd\n'), ((4862, 4884), 'xarray.Dataset', 'xr.Dataset', (['{dsnx: da}'], {}), '({dsnx: da})\n', (4872, 4884), True, 'import xarray as xr\n'), ((5572, 5590), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (5588, 5590), True, 'import pandas as pd\n'), ((5690, 5708), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (5706, 5708), True, 'import pandas as pd\n'), ((6332, 6345), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (6343, 6345), False, 'from dask.diagnostics import ProgressBar\n'), ((3703, 3725), 'xarray.open_dataset', 'xr.open_dataset', (['mskfn'], {}), '(mskfn)\n', (3718, 3725), True, 'import xarray as xr\n'), ((5040, 5058), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (5056, 5058), True, 'import pandas as pd\n'), ((6300, 6318), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (6316, 6318), True, 'import pandas as pd\n'), ((5904, 5926), 'xarray.open_dataset', 'xr.open_dataset', (['mskfn'], {}), '(mskfn)\n', (5919, 5926), True, 'import xarray as xr\n'), ((4086, 4109), 'xarray.open_rasterio', 'xr.open_rasterio', (['tiffn'], {}), '(tiffn)\n', (4102, 4109), True, 'import xarray as xr\n')]
|
import torch
import matplotlib as mpl
mpl.use('agg')
import numpy as np
import os
import scipy.integrate as integrate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.lines import Line2D
from matplotlib import rc
def plotPred(args, t, xT, uPred, uTarget, epoch, bidx=0):
'''
Plots a single prediction contour
'''
plt.close("all")
# Create figure
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
rc('text', usetex=False)
fig = plt.figure(figsize=(15, 8), dpi=150)
ax = []
ax.append(plt.subplot2grid((3, 15), (0, 0), colspan=14))
ax.append(plt.subplot2grid((3, 15), (1, 0), colspan=14))
ax.append(plt.subplot2grid((3, 15), (2, 0), colspan=14))
cmap = "inferno"
c0 = ax[1].imshow(uPred.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred.T)
c_min = np.min(uPred.T)
c0.set_clim(vmin=c_min, vmax=c_max)
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[0].get_position().get_points().flatten()
p1 = ax[1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.015, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
cmap = "viridis"
c0 = ax[2].imshow(np.abs(uPred.T - uTarget.T), interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
p0 = ax[2].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p0[2]+0.015, p0[1], 0.020, p0[3]-p0[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c0.norm.vmin, c0.norm.vmax, 5)
tickLabels = ["{:.2e}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
ax[0].set_ylabel('x', fontsize=14)
ax[1].set_ylabel('x', fontsize=14)
ax[2].set_ylabel('x', fontsize=14)
ax[2].set_xlabel('t', fontsize=14)
file_name = args.pred_dir+"/burgerPred-epoch{0:03d}-{1:01d}.png".format(epoch, bidx)
plt.savefig(file_name, bbox_inches='tight')
def plotSamples(args, t, xT, uPred, uTarget, epoch=0):
'''
Plots prediction contour of Baysian model samples
'''
plt.close("all")
# Create figure
mpl.rcParams['font.family'] = ['serif'] # default is sans-serif
# rc('text', usetex=True)
n_sample = uPred.shape[0] + 1
nrow = int(np.sqrt(n_sample))
ncol = 6*nrow + 1
fig = plt.figure(figsize=(20, 10), dpi=150)
ax = []
for i in range(nrow):
for j in range(nrow):
ax.append(plt.subplot2grid((nrow, ncol), (i, 6*j), colspan=5))
cmap = "inferno"
# Target in top left
uTarget = uTarget[:uPred.shape[1]]
c0 = ax[0].imshow(uTarget.T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c_max = np.max(uPred.T)
c_min = np.min(uPred.T)
c0.set_clim(vmin=c_min, vmax=c_max)
# Prediction samples
for i in range(1, len(ax)):
c0 = ax[i].imshow(uPred[i-1].T, interpolation='nearest', cmap=cmap, aspect='auto', extent=[t[0],t[-1],xT[0],xT[-1]])
c0.set_clim(vmin=c_min, vmax=c_max)
p0 = ax[nrow-1].get_position().get_points().flatten()
p1 = ax[-1].get_position().get_points().flatten()
ax_cbar = fig.add_axes([p1[2]+0.01, p1[1], 0.020, p0[3]-p1[1]])
ticks = np.linspace(0, 1, 5)
tickLabels = np.linspace(c_min, c_max, 5)
tickLabels = ["{:02.2f}".format(t0) for t0 in tickLabels]
cbar = mpl.colorbar.ColorbarBase(ax_cbar, cmap=cmap, orientation='vertical', ticks=ticks)
cbar.set_ticklabels(tickLabels)
# Axis labels
for i in range(len(ax)-nrow, len(ax)):
ax[i].set_xlabel('t')
for i in range(nrow):
ax[int(i*nrow)].set_ylabel('x')
file_name = args.pred_dir+"/burgerSamples_epoch{:03d}.png".format(epoch)
plt.savefig(file_name, bbox_inches='tight')
def calcR2score(uPred, uTarget, epoch=0, save=True):
'''
Calculates the total and time dependent average R2 score
Args:
uPred (torch.Tensor): [b x t x d] tensor of model predictions
uTarget (torch.Tensor): [b x t x d] tensor of corresponding target values
epoch (int): current training epoch (for logging)
'''
# Following:
# https://en.wikipedia.org/wiki/Coefficient_of_determination
# First total average
ybar = torch.mean(uTarget.view(uTarget.size(0),-1), dim=-1)
ss_tot = torch.sum(torch.pow(uTarget - ybar.unsqueeze(-1).unsqueeze(-1), 2).view(uTarget.size(0), -1), dim=-1)
ss_res = torch.sum(torch.pow(uTarget - uPred, 2).view(uTarget.size(0), -1), dim=-1)
r2_avg = torch.mean(1 - ss_res/ss_tot).cpu().numpy()
# Now time dependent
ybar = torch.mean(uTarget, dim=-1)
ss_tot = torch.sum(torch.pow(uTarget - ybar.unsqueeze(-1), 2), dim=-1)
ss_res = torch.sum(torch.pow(uTarget - uPred, 2), dim=-1)
r2_time = torch.mean(1 - ss_res/ss_tot, dim=0).cpu().numpy()
if(save):
f=open('r2score_time.dat','ab')
np.savetxt(f, np.insert(r2_time, 0, epoch)[np.newaxis,:], delimiter=',')
f.close()
f=open('r2score.dat','ab')
np.savetxt(f, np.insert(r2_avg, 0, epoch)[np.newaxis,:], delimiter=',')
f.close()
|
[
"torch.mean",
"matplotlib.rc",
"numpy.abs",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplot2grid",
"numpy.insert",
"numpy.max",
"matplotlib.use",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.linspace",
"torch.pow",
"matplotlib.colorbar.ColorbarBase",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((38, 52), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {}), "('agg')\n", (45, 52), True, 'import matplotlib as mpl\n'), ((357, 373), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (366, 373), True, 'import matplotlib.pyplot as plt\n'), ((467, 491), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (469, 491), False, 'from matplotlib import rc\n'), ((503, 539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 8)', 'dpi': '(150)'}), '(figsize=(15, 8), dpi=150)\n', (513, 539), True, 'import matplotlib.pyplot as plt\n'), ((885, 900), 'numpy.max', 'np.max', (['uPred.T'], {}), '(uPred.T)\n', (891, 900), True, 'import numpy as np\n'), ((913, 928), 'numpy.min', 'np.min', (['uPred.T'], {}), '(uPred.T)\n', (919, 928), True, 'import numpy as np\n'), ((1316, 1336), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1327, 1336), True, 'import numpy as np\n'), ((1354, 1382), 'numpy.linspace', 'np.linspace', (['c_min', 'c_max', '(5)'], {}), '(c_min, c_max, 5)\n', (1365, 1382), True, 'import numpy as np\n'), ((1456, 1543), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax_cbar'], {'cmap': 'cmap', 'orientation': '"""vertical"""', 'ticks': 'ticks'}), "(ax_cbar, cmap=cmap, orientation='vertical', ticks\n =ticks)\n", (1481, 1543), True, 'import matplotlib as mpl\n'), ((1867, 1887), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1878, 1887), True, 'import numpy as np\n'), ((1905, 1947), 'numpy.linspace', 'np.linspace', (['c0.norm.vmin', 'c0.norm.vmax', '(5)'], {}), '(c0.norm.vmin, c0.norm.vmax, 5)\n', (1916, 1947), True, 'import numpy as np\n'), ((2019, 2106), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax_cbar'], {'cmap': 'cmap', 'orientation': '"""vertical"""', 'ticks': 'ticks'}), "(ax_cbar, cmap=cmap, orientation='vertical', ticks\n =ticks)\n", (2044, 2106), True, 'import matplotlib as mpl\n'), ((2389, 2432), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'bbox_inches': '"""tight"""'}), "(file_name, bbox_inches='tight')\n", (2400, 2432), True, 'import matplotlib.pyplot as plt\n'), ((2563, 2579), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2572, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2836), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)', 'dpi': '(150)'}), '(figsize=(20, 10), dpi=150)\n', (2809, 2836), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3212), 'numpy.max', 'np.max', (['uPred.T'], {}), '(uPred.T)\n', (3203, 3212), True, 'import numpy as np\n'), ((3225, 3240), 'numpy.min', 'np.min', (['uPred.T'], {}), '(uPred.T)\n', (3231, 3240), True, 'import numpy as np\n'), ((3701, 3721), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (3712, 3721), True, 'import numpy as np\n'), ((3739, 3767), 'numpy.linspace', 'np.linspace', (['c_min', 'c_max', '(5)'], {}), '(c_min, c_max, 5)\n', (3750, 3767), True, 'import numpy as np\n'), ((3841, 3928), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax_cbar'], {'cmap': 'cmap', 'orientation': '"""vertical"""', 'ticks': 'ticks'}), "(ax_cbar, cmap=cmap, orientation='vertical', ticks\n =ticks)\n", (3866, 3928), True, 'import matplotlib as mpl\n'), ((4200, 4243), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {'bbox_inches': '"""tight"""'}), "(file_name, bbox_inches='tight')\n", (4211, 4243), True, 'import matplotlib.pyplot as plt\n'), ((5065, 5092), 'torch.mean', 'torch.mean', (['uTarget'], {'dim': '(-1)'}), '(uTarget, dim=-1)\n', (5075, 5092), False, 'import torch\n'), ((566, 611), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 15)', '(0, 0)'], {'colspan': '(14)'}), '((3, 15), (0, 0), colspan=14)\n', (582, 611), True, 'import matplotlib.pyplot as plt\n'), ((627, 672), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 15)', '(1, 0)'], {'colspan': '(14)'}), '((3, 15), (1, 0), colspan=14)\n', (643, 672), True, 'import matplotlib.pyplot as plt\n'), ((688, 733), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(3, 15)', '(2, 0)'], {'colspan': '(14)'}), '((3, 15), (2, 0), colspan=14)\n', (704, 733), True, 'import matplotlib.pyplot as plt\n'), ((1619, 1646), 'numpy.abs', 'np.abs', (['(uPred.T - uTarget.T)'], {}), '(uPred.T - uTarget.T)\n', (1625, 1646), True, 'import numpy as np\n'), ((2748, 2765), 'numpy.sqrt', 'np.sqrt', (['n_sample'], {}), '(n_sample)\n', (2755, 2765), True, 'import numpy as np\n'), ((5191, 5220), 'torch.pow', 'torch.pow', (['(uTarget - uPred)', '(2)'], {}), '(uTarget - uPred, 2)\n', (5200, 5220), False, 'import torch\n'), ((2927, 2980), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(nrow, ncol)', '(i, 6 * j)'], {'colspan': '(5)'}), '((nrow, ncol), (i, 6 * j), colspan=5)\n', (2943, 2980), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4934), 'torch.pow', 'torch.pow', (['(uTarget - uPred)', '(2)'], {}), '(uTarget - uPred, 2)\n', (4914, 4934), False, 'import torch\n'), ((5373, 5401), 'numpy.insert', 'np.insert', (['r2_time', '(0)', 'epoch'], {}), '(r2_time, 0, epoch)\n', (5382, 5401), True, 'import numpy as np\n'), ((5508, 5535), 'numpy.insert', 'np.insert', (['r2_avg', '(0)', 'epoch'], {}), '(r2_avg, 0, epoch)\n', (5517, 5535), True, 'import numpy as np\n'), ((4984, 5015), 'torch.mean', 'torch.mean', (['(1 - ss_res / ss_tot)'], {}), '(1 - ss_res / ss_tot)\n', (4994, 5015), False, 'import torch\n'), ((5245, 5283), 'torch.mean', 'torch.mean', (['(1 - ss_res / ss_tot)'], {'dim': '(0)'}), '(1 - ss_res / ss_tot, dim=0)\n', (5255, 5283), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import range
import utils
import argparse
import time
import os
import sys
import random
import math
import json
import codecs
import numpy as np
import utils
from utils import check_cuda_for_var, check_directory
parser = argparse.ArgumentParser(description=\
"Dialog2Vec Generator")
parser.add_argument('--data', type=str,\
help='location of the data corpus(json file)')
parser.add_argument('--validation_p', type=float, default=0.2,
help='percentage of validation data / all data')
parser.add_argument('--seed', type=int, default=55665566,
help='random seed')
parser.add_argument('--only_stat', type=bool, default=False,
help='only do statistic or not')
args = parser.parse_args()
random.seed(args.seed)
my_lang, document_list = utils.build_lang(args.data, dump_torch_variable=False)
# Statistic
dialog_len_count = {}
sentence_count = 0
total_word_count = 0
word_count = {}
for dialog in document_list:
dialog_len = len(dialog)
sentence_count += dialog_len
for sentence in dialog:
total_word_count += len(sentence)
for index in sentence:
word = my_lang.index2word[index]
word_count[word] = word_count.setdefault(word, 0) + 1
dialog_len_count[dialog_len] = dialog_len_count.setdefault(dialog_len, 0) + 1
print("total_word_count ", total_word_count)
print("sentence_count ", sentence_count)
print("dialog_len_count ", dialog_len_count)
print("word_count ", word_count)
if args.only_stat:
sys.exit(0)
#
random.shuffle(document_list)
cut = int(len(document_list) * args.validation_p)
training_data, validation_data = \
document_list[cut:], document_list[:cut]
# Training data for doc2vec
print("Training data for doc2vec")
gensim_train = []
for train_dialog in training_data:
doc = []
for sentence in train_dialog[:-1]:
doc += sentence
gensim_train.append(doc)
np.save("label/gensim_train.npy", gensim_train)
print("Label data for training")
label = []
dialog2vec = []
doc2vec = []
for train_dialog in training_data:
doc = []
dialog = []
for sentence in train_dialog:
if not sentence == train_dialog[-1]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(1)
doc = []
dialog = []
for sentence in train_dialog[:random.randint(1, len(train_dialog)-2)]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(0)
np.save("label/gensim_train_test.npy", doc2vec)
np.save("label/train_label.npy", label)
with codecs.open("label/dialog2vec_train.json", "w+", encoding="utf-8") as outfile:
json.dump(dialog2vec, outfile, indent=4, ensure_ascii=False)
print("Label data for testing")
label = []
dialog2vec = []
doc2vec = []
for validate_dialog in validation_data:
doc = []
dialog = []
for sentence in validate_dialog:
if not sentence == train_dialog[-1]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(1)
doc = []
dialog = []
for sentence in validate_dialog[:random.randint(1, len(validate_dialog)-2)]:
doc += sentence
if len(sentence) > 1:
dialog.append(my_lang.index2sentence(sentence[:-1]))
dialog2vec.append(dialog[:-1])
doc2vec.append(doc)
label.append(0)
np.save("label/gensim_test_test.npy", doc2vec)
np.save("label/test_label.npy", label)
with codecs.open("label/dialog2vec_test.json", "w+", encoding="utf-8") as outfile:
json.dump(dialog2vec, outfile, indent=4, ensure_ascii=False)
|
[
"json.dump",
"numpy.save",
"argparse.ArgumentParser",
"utils.build_lang",
"codecs.open",
"random.shuffle",
"random.seed",
"sys.exit"
] |
[((376, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Dialog2Vec Generator"""'}), "(description='Dialog2Vec Generator')\n", (399, 435), False, 'import argparse\n'), ((878, 900), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (889, 900), False, 'import random\n'), ((927, 981), 'utils.build_lang', 'utils.build_lang', (['args.data'], {'dump_torch_variable': '(False)'}), '(args.data, dump_torch_variable=False)\n', (943, 981), False, 'import utils\n'), ((1658, 1687), 'random.shuffle', 'random.shuffle', (['document_list'], {}), '(document_list)\n', (1672, 1687), False, 'import random\n'), ((2043, 2090), 'numpy.save', 'np.save', (['"""label/gensim_train.npy"""', 'gensim_train'], {}), "('label/gensim_train.npy', gensim_train)\n", (2050, 2090), True, 'import numpy as np\n'), ((2813, 2860), 'numpy.save', 'np.save', (['"""label/gensim_train_test.npy"""', 'doc2vec'], {}), "('label/gensim_train_test.npy', doc2vec)\n", (2820, 2860), True, 'import numpy as np\n'), ((2861, 2900), 'numpy.save', 'np.save', (['"""label/train_label.npy"""', 'label'], {}), "('label/train_label.npy', label)\n", (2868, 2900), True, 'import numpy as np\n'), ((3785, 3831), 'numpy.save', 'np.save', (['"""label/gensim_test_test.npy"""', 'doc2vec'], {}), "('label/gensim_test_test.npy', doc2vec)\n", (3792, 3831), True, 'import numpy as np\n'), ((3832, 3870), 'numpy.save', 'np.save', (['"""label/test_label.npy"""', 'label'], {}), "('label/test_label.npy', label)\n", (3839, 3870), True, 'import numpy as np\n'), ((1644, 1655), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1652, 1655), False, 'import sys\n'), ((2906, 2972), 'codecs.open', 'codecs.open', (['"""label/dialog2vec_train.json"""', '"""w+"""'], {'encoding': '"""utf-8"""'}), "('label/dialog2vec_train.json', 'w+', encoding='utf-8')\n", (2917, 2972), False, 'import codecs\n'), ((2989, 3049), 'json.dump', 'json.dump', (['dialog2vec', 'outfile'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(dialog2vec, outfile, indent=4, ensure_ascii=False)\n', (2998, 3049), False, 'import json\n'), ((3876, 3941), 'codecs.open', 'codecs.open', (['"""label/dialog2vec_test.json"""', '"""w+"""'], {'encoding': '"""utf-8"""'}), "('label/dialog2vec_test.json', 'w+', encoding='utf-8')\n", (3887, 3941), False, 'import codecs\n'), ((3958, 4018), 'json.dump', 'json.dump', (['dialog2vec', 'outfile'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(dialog2vec, outfile, indent=4, ensure_ascii=False)\n', (3967, 4018), False, 'import json\n')]
|
import more_itertools as mit
import functools as ftl
from recipes.testing import Expect
from astropy.io.fits.hdu.base import _BaseHDU
from pathlib import Path
from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU
import pytest
import numpy as np
import os
import tempfile as tmp
# TODO: old + new data all modes!!!
# TODO: all combinations of science, bias, dark, flats (+ masters)
# TODO:
# pylint: disable=C0111 # Missing %s docstring
# pylint: disable=R0201 # Method could be a function
# pretty sample images here:
DATA = Path(__file__).parent / 'data'
EX1 = DATA / 'AT2020hat'
CAL = DATA / 'calibration'
#
np.random.seed(12345)
# ---------------------------------- Helpers --------------------------------- #
def list_of_files():
# create text file with list of filenames for test load
fp, filename = tmp.mkstemp('.txt')
for name in EX1.glob('*.fits'):
os.write(fp, f'{name}{os.linesep}'.encode())
os.close(fp)
return filename
# --------------------------------- Fixtures --------------------------------- #
@pytest.fixture
def run():
return shocCampaign.load(EX1)
# run = shocCampaign.load(EX1)
# ----------------------------------- Tests ---------------------------------- #
class TestCampaign:
@pytest.mark.parametrize(
'pointer',
( # single file as a str
f'{EX1}/SHA_20200731.0001.fits',
# single file as a Path object
EX1 / 'SHA_20200731.0001.fits',
# file list
[f'{EX1}/SHA_20200731.0001.fits',
f'{EX1}/SHA_20200731.0002.fits'],
# globbing patterns
f'{EX1}/SHA_20200731.000[12].fits',
f'{EX1}/SHA_20200731.000*.fits',
# directory
EX1, str(EX1),
# pointer to text file with list of filenames
f'@{list_of_files()}'
)
)
def test_load(self, pointer):
run = shocCampaign.load(pointer)
def test_file_helper(self, run):
run.files
run.files.names
run.files.stems
run.files.nrs
@pytest.mark.parametrize(
'index',
( # simple indexing
0,
-1,
# by filename
'SHA_20200731.0007.fits',
'SHA_20200731.0007', # both should work
)
)
def test_single_index(self, run, index):
print(run[index].file.name)
assert isinstance(run[index], shocHDU)
@pytest.mark.parametrize(
'index,expected',
[ # slice
(slice(0, 4, 2),
['SHA_20200731.0001.fits', 'SHA_20200731.0003.fits']),
# sequences of ints
([0, 1, 3, -1],
['SHA_20200731.0001.fits', 'SHA_20200731.0002.fits',
'SHA_20200731.0004.fits', 'SHA_20200731.0022.fits']),
# array of ints
(np.arange(3),
['SHA_20200731.0001.fits', 'SHA_20200731.0002.fits',
'SHA_20200731.0003.fits']),
# boolean array
(np.random.randint(0, 2, 22).astype(bool),
['SHA_20200731.0002.fits', 'SHA_20200731.0003.fits',
'SHA_20200731.0004.fits', 'SHA_20200731.0006.fits',
'SHA_20200731.0009.fits', 'SHA_20200731.0011.fits',
'SHA_20200731.0012.fits', 'SHA_20200731.0014.fits',
'SHA_20200731.0015.fits', 'SHA_20200731.0017.fits',
'SHA_20200731.0018.fits', 'SHA_20200731.0019.fits']),
# by list of filenames
(('SHA_20200731.0007.fits', 'SHA_20200731.0008.fits'),
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits']),
# by globbing pattern
('SHA*[78].fits',
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits',
'SHA_20200731.0017.fits', 'SHA_20200731.0018.fits']),
# by brace expansion
('SHA*{7,8}.fits',
['SHA_20200731.0007.fits', 'SHA_20200731.0008.fits',
'SHA_20200731.0017.fits', 'SHA_20200731.0018.fits']),
# by filename sequence slice
('*0731.00[10:22].*',
['SHA_20200731.0010.fits', 'SHA_20200731.0011.fits',
'SHA_20200731.0012.fits', 'SHA_20200731.0013.fits',
'SHA_20200731.0014.fits', 'SHA_20200731.0015.fits',
'SHA_20200731.0016.fits', 'SHA_20200731.0017.fits',
'SHA_20200731.0018.fits', 'SHA_20200731.0019.fits',
'SHA_20200731.0020.fits', 'SHA_20200731.0021.fits'])
]
)
def test_multi_index(self, run, index, expected):
sub = run[index]
assert isinstance(sub, shocCampaign)
assert sub.files.names == expected
def test_pprint(self, run):
print(run, run.table(run), sep='\n\n')
# @pytest.mark.parametrize(
# 'filename,expected',
# [(CAL/'SHA_20200822.0005.fits', shocBiasHDU),
# (CAL/'SHA_20200801.0001.fits', shocFlatHDU),
# (EX1/'SHA_20200731.0022.fits', shocNewHDU)]
# )
# def test_hdu_type(filename, expected):
# obj = _BaseHDU.readfr
# @expected(
# (CAL/'SHA_20200822.0005.fits', shocBiasHDU,
# CAL/'SHA_20200801.0001.fits', shocFlatHDU,
# EX1/'SHA_20200731.0022.fits', shocNewHDU)
# )
def hdu_type(filename):
return _BaseHDU.readfrom(filename).__class__
# print('....', filename)
# print(obj)
# return obj
Expect(hdu_type)(
{CAL/'SHA_20200822.0005.fits': shocBiasHDU,
CAL/'SHA_20200801.0001.fits': shocFlatHDU,
EX1/'SHA_20200731.0022.fits': shocNewHDU},
globals())
# TODO: shocOldHDU, shocMasterBias, shocMasterFlat
# TODO
# def test_select
|
[
"numpy.random.seed",
"recipes.testing.Expect",
"tempfile.mkstemp",
"astropy.io.fits.hdu.base._BaseHDU.readfrom",
"pathlib.Path",
"numpy.random.randint",
"os.close",
"numpy.arange",
"pytest.mark.parametrize",
"pySHOC.shocCampaign.load"
] |
[((648, 669), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (662, 669), True, 'import numpy as np\n'), ((854, 873), 'tempfile.mkstemp', 'tmp.mkstemp', (['""".txt"""'], {}), "('.txt')\n", (865, 873), True, 'import tempfile as tmp\n'), ((967, 979), 'os.close', 'os.close', (['fp'], {}), '(fp)\n', (975, 979), False, 'import os\n'), ((1122, 1144), 'pySHOC.shocCampaign.load', 'shocCampaign.load', (['EX1'], {}), '(EX1)\n', (1139, 1144), False, 'from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU\n'), ((2103, 2195), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""index"""', "(0, -1, 'SHA_20200731.0007.fits', 'SHA_20200731.0007')"], {}), "('index', (0, -1, 'SHA_20200731.0007.fits',\n 'SHA_20200731.0007'))\n", (2126, 2195), False, 'import pytest\n'), ((5383, 5399), 'recipes.testing.Expect', 'Expect', (['hdu_type'], {}), '(hdu_type)\n', (5389, 5399), False, 'from recipes.testing import Expect\n'), ((562, 576), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (566, 576), False, 'from pathlib import Path\n'), ((1944, 1970), 'pySHOC.shocCampaign.load', 'shocCampaign.load', (['pointer'], {}), '(pointer)\n', (1961, 1970), False, 'from pySHOC import shocCampaign, shocHDU, shocNewHDU, shocBiasHDU, shocFlatHDU\n'), ((5279, 5306), 'astropy.io.fits.hdu.base._BaseHDU.readfrom', '_BaseHDU.readfrom', (['filename'], {}), '(filename)\n', (5296, 5306), False, 'from astropy.io.fits.hdu.base import _BaseHDU\n'), ((2882, 2894), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2891, 2894), True, 'import numpy as np\n'), ((3046, 3073), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(22)'], {}), '(0, 2, 22)\n', (3063, 3073), True, 'import numpy as np\n')]
|
import numpy as np
import open3d as o3d
import os
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--red", type = float, default = 0.5)
parser.add_argument("--blue", type = float, default = 0.4)
parser.add_argument("--green", type = float, default = 0.4)
parser.add_argument("--source_dir", type = str, default = "./scatters")
parser.add_argument("--render", action = "store_true", default = False)
args = parser.parse_args()
# Need to consider that some cases disturbance may exist
def segment_cloth(pcd):
color = np.array(pcd.colors)
mask = (color[:,0] > args.red) * (color[:, 1] < args.green) * (color[:,2] < args.blue)
points = np.asarray(pcd.points)
truncated_pcd = o3d.geometry.PointCloud()
truncated_pcd.points = o3d.utility.Vector3dVector(points[mask])
truncated_pcd.colors = o3d.utility.Vector3dVector(color[mask])
truncated_pcd.remove_statistical_outlier(nb_neighbors = 20, std_ratio = 0.04)
return truncated_pcd
# Source direcrtory is identical to target directory
files = os.listdir(f"./pointcloud_transformed/{args.source_dir}/")
for f in files:
filename = f"./pointcloud_transformed/{args.source_dir}/{f}"
pcd = o3d.io.read_point_cloud(filename)
cloth_pcd = segment_cloth(pcd)
o3d.io.write_point_cloud(f"./pointcloud_cloth/{args.source_dir}/{f}", cloth_pcd)
if args.render:
o3d.visualization.draw_geometries([cloth_pcd])
|
[
"argparse.ArgumentParser",
"numpy.asarray",
"open3d.geometry.PointCloud",
"open3d.io.read_point_cloud",
"open3d.io.write_point_cloud",
"open3d.visualization.draw_geometries",
"numpy.array",
"open3d.utility.Vector3dVector",
"os.listdir"
] |
[((96, 112), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (110, 112), False, 'from argparse import ArgumentParser\n'), ((1053, 1111), 'os.listdir', 'os.listdir', (['f"""./pointcloud_transformed/{args.source_dir}/"""'], {}), "(f'./pointcloud_transformed/{args.source_dir}/')\n", (1063, 1111), False, 'import os\n'), ((555, 575), 'numpy.array', 'np.array', (['pcd.colors'], {}), '(pcd.colors)\n', (563, 575), True, 'import numpy as np\n'), ((680, 702), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (690, 702), True, 'import numpy as np\n'), ((723, 748), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (746, 748), True, 'import open3d as o3d\n'), ((776, 816), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points[mask]'], {}), '(points[mask])\n', (802, 816), True, 'import open3d as o3d\n'), ((844, 883), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['color[mask]'], {}), '(color[mask])\n', (870, 883), True, 'import open3d as o3d\n'), ((1203, 1236), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['filename'], {}), '(filename)\n', (1226, 1236), True, 'import open3d as o3d\n'), ((1276, 1361), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['f"""./pointcloud_cloth/{args.source_dir}/{f}"""', 'cloth_pcd'], {}), "(f'./pointcloud_cloth/{args.source_dir}/{f}', cloth_pcd\n )\n", (1300, 1361), True, 'import open3d as o3d\n'), ((1385, 1431), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[cloth_pcd]'], {}), '([cloth_pcd])\n', (1418, 1431), True, 'import open3d as o3d\n')]
|
# Initial setup following http://docs.chainer.org/en/stable/tutorial/basic.html
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.training import extensions
import matplotlib.pyplot as plt
# Defining your own neural networks using `Chain` class
class MyChain(Chain):
def __init__(self):
super(MyChain, self).__init__(
# 第一个参数设为None,可以根据第一次输入的变量来确定他的大小
l1=L.Linear(None, 30),
l2=L.Linear(None, 30),
l3=L.Linear(None, 1)
)
def __call__(self, x):
h = self.l1(x)
h = self.l2(F.sigmoid(h))
return self.l3(F.sigmoid(h))
# Setup a model
model = MyChain()
model_save_path = 'mlp.model'
print('Loading model')
# --- use NPZ format ---
serializers.load_npz(model_save_path, model)
# --- use HDF5 format (need h5py library) ---
# %timeit serializers.load_hdf5(model_save_path, model)
# define target function
def target_func(x):
"""Target function to be predicted"""
return x ** 3 - x ** 2 + x ** -1 + x
# create efficient function to calculate target_func of numpy array in element wise
target_func_elementwise = np.frompyfunc(target_func, 1, 1)
# define data domain [xmin, xmax]
xmin = -3
xmax = 3
# number of training data
sample_num = 20
# calculate new data from model (predict value)
x_test_data = np.array(np.random.rand(sample_num) * (xmax - xmin) + xmin) # create 20
x_test = Variable(x_test_data.reshape(-1, 1).astype(np.float32))
y_test_data = model(x_test).data # this is predicted value
# calculate target function (true value)
x_detail_data = np.array(np.arange(xmin, xmax, 0.1))
y_detail_data = target_func_elementwise(x_detail_data)
plt.clf()
# plot model predict data
plt.scatter(x_test_data, y_test_data, color='k', label='Model predict value')
# plot target function
plt.plot(x_detail_data, y_detail_data, label='True value')
plt.legend(loc='lower right')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"chainer.serializers.load_npz",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.frompyfunc",
"numpy.arange",
"chainer.functions.sigmoid",
"numpy.random.rand",
"chainer.links.Linear"
] |
[((976, 1020), 'chainer.serializers.load_npz', 'serializers.load_npz', (['model_save_path', 'model'], {}), '(model_save_path, model)\n', (996, 1020), False, 'from chainer import datasets, iterators, optimizers, serializers\n'), ((1366, 1398), 'numpy.frompyfunc', 'np.frompyfunc', (['target_func', '(1)', '(1)'], {}), '(target_func, 1, 1)\n', (1379, 1398), True, 'import numpy as np\n'), ((1908, 1917), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1915, 1917), True, 'import matplotlib.pyplot as plt\n'), ((1944, 2021), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test_data', 'y_test_data'], {'color': '"""k"""', 'label': '"""Model predict value"""'}), "(x_test_data, y_test_data, color='k', label='Model predict value')\n", (1955, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2103), 'matplotlib.pyplot.plot', 'plt.plot', (['x_detail_data', 'y_detail_data'], {'label': '"""True value"""'}), "(x_detail_data, y_detail_data, label='True value')\n", (2053, 2103), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2133), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2114, 2133), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2142, 2144), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1850), 'numpy.arange', 'np.arange', (['xmin', 'xmax', '(0.1)'], {}), '(xmin, xmax, 0.1)\n', (1833, 1850), True, 'import numpy as np\n'), ((810, 822), 'chainer.functions.sigmoid', 'F.sigmoid', (['h'], {}), '(h)\n', (819, 822), True, 'import chainer.functions as F\n'), ((847, 859), 'chainer.functions.sigmoid', 'F.sigmoid', (['h'], {}), '(h)\n', (856, 859), True, 'import chainer.functions as F\n'), ((1568, 1594), 'numpy.random.rand', 'np.random.rand', (['sample_num'], {}), '(sample_num)\n', (1582, 1594), True, 'import numpy as np\n'), ((641, 659), 'chainer.links.Linear', 'L.Linear', (['None', '(30)'], {}), '(None, 30)\n', (649, 659), True, 'import chainer.links as L\n'), ((676, 694), 'chainer.links.Linear', 'L.Linear', (['None', '(30)'], {}), '(None, 30)\n', (684, 694), True, 'import chainer.links as L\n'), ((711, 728), 'chainer.links.Linear', 'L.Linear', (['None', '(1)'], {}), '(None, 1)\n', (719, 728), True, 'import chainer.links as L\n')]
|
import nibabel as nib
import glob
import os
import numpy as np
import tensorlayer as tl
'''
Before normalization, run N4 bias correction (https://www.ncbi.nlm.nih.gov/pubmed/20378467),
then save the data under folder ./CamCAN_unbiased/CamCAN
'''
modalities = ['T1w', 'T2w']
BraTS_modalities = ['T1w']
folders = ['HGG', 'LGG']
wd = './Data/CamCAN_unbiased/CamCAN'
thumbnail_idx = [60, 70, 80, 90]
for mod in modalities:
wd_mod = os.path.join(wd, str(mod))
os.chdir(wd_mod)
img_files = [i for i in glob.glob("*") if "_unbiased" in i]
for img in img_files:
print(img)
img_data = nib.load(img)
img_data = img_data.get_data()
mask = img.split("_unbiased")[0] + "_brain_mask.nii.gz"
mask_data = nib.load(mask).get_data()
img_data = np.transpose(img_data, [2, 0, 1])
mask_data = np.transpose(mask_data, [2, 0, 1])
idx = [s for s in range(img_data.shape[0]) if mask_data[s].sum() > 1]
img_data = img_data[idx, :, 17:215]
mask_data = mask_data[idx, :, 17:215]
img_data = np.pad(img_data, ((0, 0), (1, 2), (1, 1)), mode='edge')
mask_data = np.pad(mask_data, ((0, 0), (1, 2), (1, 1)), mode='edge')
img_data = np.rot90(img_data, 1, (2, 1))
mask_data = np.rot90(mask_data, 1, (2, 1))
ref_mean = np.mean(img_data[mask_data == 1])
ref_std = np.std(img_data[mask_data == 1])
normed_img = (img_data - ref_mean) / ref_std
normed_img[normed_img == normed_img.min()] = -3.5
x_nif = nib.Nifti1Image(normed_img, np.eye(4))
nib.save(x_nif, os.path.join(img.split("_unbiased")[0] + "_normalized_cropped_mask.nii.gz"))
x_nif = nib.Nifti1Image(mask_data, np.eye(4))
nib.save(x_nif, os.path.join(img.split("_unbiased")[0] + "_mask_cropped_mask.nii.gz"))
tl.visualize.save_images(normed_img[thumbnail_idx, :, :, np.newaxis], [2, 2],
"/scratch_net/bmicdl01/Data/CamCAN_unbiased/preview/" + str(mod)
+ "/" + img.split("_unbiased")[0] + "_normed_img.png")
print("---")
|
[
"numpy.pad",
"nibabel.load",
"numpy.std",
"numpy.transpose",
"numpy.rot90",
"numpy.mean",
"glob.glob",
"numpy.eye",
"os.chdir"
] |
[((468, 484), 'os.chdir', 'os.chdir', (['wd_mod'], {}), '(wd_mod)\n', (476, 484), False, 'import os\n'), ((614, 627), 'nibabel.load', 'nib.load', (['img'], {}), '(img)\n', (622, 627), True, 'import nibabel as nib\n'), ((797, 830), 'numpy.transpose', 'np.transpose', (['img_data', '[2, 0, 1]'], {}), '(img_data, [2, 0, 1])\n', (809, 830), True, 'import numpy as np\n'), ((851, 885), 'numpy.transpose', 'np.transpose', (['mask_data', '[2, 0, 1]'], {}), '(mask_data, [2, 0, 1])\n', (863, 885), True, 'import numpy as np\n'), ((1075, 1130), 'numpy.pad', 'np.pad', (['img_data', '((0, 0), (1, 2), (1, 1))'], {'mode': '"""edge"""'}), "(img_data, ((0, 0), (1, 2), (1, 1)), mode='edge')\n", (1081, 1130), True, 'import numpy as np\n'), ((1151, 1207), 'numpy.pad', 'np.pad', (['mask_data', '((0, 0), (1, 2), (1, 1))'], {'mode': '"""edge"""'}), "(mask_data, ((0, 0), (1, 2), (1, 1)), mode='edge')\n", (1157, 1207), True, 'import numpy as np\n'), ((1227, 1256), 'numpy.rot90', 'np.rot90', (['img_data', '(1)', '(2, 1)'], {}), '(img_data, 1, (2, 1))\n', (1235, 1256), True, 'import numpy as np\n'), ((1277, 1307), 'numpy.rot90', 'np.rot90', (['mask_data', '(1)', '(2, 1)'], {}), '(mask_data, 1, (2, 1))\n', (1285, 1307), True, 'import numpy as np\n'), ((1328, 1361), 'numpy.mean', 'np.mean', (['img_data[mask_data == 1]'], {}), '(img_data[mask_data == 1])\n', (1335, 1361), True, 'import numpy as np\n'), ((1380, 1412), 'numpy.std', 'np.std', (['img_data[mask_data == 1]'], {}), '(img_data[mask_data == 1])\n', (1386, 1412), True, 'import numpy as np\n'), ((513, 527), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (522, 527), False, 'import glob\n'), ((1570, 1579), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1576, 1579), True, 'import numpy as np\n'), ((1726, 1735), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1732, 1735), True, 'import numpy as np\n'), ((751, 765), 'nibabel.load', 'nib.load', (['mask'], {}), '(mask)\n', (759, 765), True, 'import nibabel as nib\n')]
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from pylab import cm
mpl.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [5.6, 4]
plt.rcParams['axes.titlesize'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 6
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['axes.linewidth'] = 1
colors = cm.get_cmap('Set1', 9)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis.set_tick_params(which='major', size=5, width=1,
direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=5, width=1,
direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', right='on')
e = 1.6e-19
x = np.loadtxt('out.dat', unpack=True)
ax.hist(x, color=colors(0), bins=500, histtype='step', density=True)
x = np.loadtxt('out2.dat', unpack=True)
ax.hist(x, color=colors(1), bins=500, histtype='step', density=True)
x = np.loadtxt('out3.dat', unpack=True)
ax.hist(x, color=colors(2), bins=500, histtype='step', density=True)
plt.tight_layout()
# plt.savefig('../figure/1a.pdf')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"pylab.cm.get_cmap",
"matplotlib.pyplot.tight_layout"
] |
[((558, 580), 'pylab.cm.get_cmap', 'cm.get_cmap', (['"""Set1"""', '(9)'], {}), "('Set1', 9)\n", (569, 580), False, 'from pylab import cm\n'), ((588, 600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((1086, 1120), 'numpy.loadtxt', 'np.loadtxt', (['"""out.dat"""'], {'unpack': '(True)'}), "('out.dat', unpack=True)\n", (1096, 1120), True, 'import numpy as np\n'), ((1194, 1229), 'numpy.loadtxt', 'np.loadtxt', (['"""out2.dat"""'], {'unpack': '(True)'}), "('out2.dat', unpack=True)\n", (1204, 1229), True, 'import numpy as np\n'), ((1303, 1338), 'numpy.loadtxt', 'np.loadtxt', (['"""out3.dat"""'], {'unpack': '(True)'}), "('out3.dat', unpack=True)\n", (1313, 1338), True, 'import numpy as np\n'), ((1409, 1427), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1472), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1470, 1472), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
#
# JSON osu! map analysis
#
import numpy as np;
def get_map_timing_array(map_json, length=-1, divisor=4):
if length == -1:
length = map_json["obj"][-1]["time"] + 1000; # it has an extra time interval after the last note
if map_json["obj"][-1]["type"] & 8: # spinner end
length = map_json["obj"][-1]["spinnerEndTime"] + 1000;
uts_a = map_json["timing"]["uts"];
out = [];
for i, uts in enumerate(uts_a):
begin_time = uts["beginTime"];
mspb = uts["tickLength"];
if i < len(uts_a)-1:
end_time = uts_a[i+1]["beginTime"];
else:
end_time = length;
arr = np.floor(np.arange(begin_time, end_time, mspb / divisor));
out = out + list(map(lambda f: int(f), arr));
return out;
def get_tick_len(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["tickLength"];
_out = 600;
for uts in uts_a:
if tick >= uts["beginTime"]:
_out = uts["tickLength"];
else:
return _out;
return _out;
def get_slider_len(map_json, tick):
ts_a = map_json["timing"]["ts"];
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_slider_len_ts(ts_a, tick):
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_end_time(note):
if note["type"] & 8:
return note["spinnerEndTime"];
elif note["type"] & 2:
return note["sliderData"]["endTime"];
#elif note["type"] & 128:
# return note["holdEndTime"];
else:
return note["time"];
# edited from uts to ts
def get_all_ticks_and_lengths_from_ts(uts_array, ts_array, end_time, divisor=4):
# Returns array of all timestamps, ticklens and sliderlens.
endtimes = ([uts["beginTime"] for uts in uts_array] + [end_time])[1:];
timestamps = [np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor) for i, uts in enumerate(uts_array)];
ticks_from_uts = [list(range(len(timestamp_group))) for timestamp_group in timestamps];
tick_len = [[uts["tickLength"]] * len(np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor)) for i, uts in enumerate(uts_array)];
# slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)];
slider_len = [get_slider_len_ts(ts_array, timestamp) for timestamp in np.concatenate(timestamps)];
return np.concatenate(ticks_from_uts), np.round(np.concatenate(timestamps)).astype(int), np.concatenate(tick_len), np.array(slider_len);
def get_end_point(note):
if note["type"] & 8:
return np.array([256, 192]);
elif note["type"] & 2:
return np.array(note["sliderData"]["endpoint"]);
else:
return np.array([note["x"], note["y"]]);
def get_input_vector(note, prev_note):
if note["type"] & 8:
return None;
#elif note["type"] & 2:
# return np.array(note["sliderData"]["dIn"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_output_vector(note, prev_note):
if note["type"] & 8:
return None;
elif note["type"] & 2:
return np.array(note["sliderData"]["dOut"]);
else:
vec = np.array([note["x"], note["y"]]) - get_end_point(prev_note);
return vec / max(0.001, np.sqrt(vec.dot(vec)));
def get_momentum(note, prev_note, slider_len):
"""
momentum = distance snap (distance / slider length).
for sliders, takes small value between from slider end or slider start to next note.
"""
v1 = np.array([note["x"], note["y"]]);
v0 = get_end_point(prev_note);
v = v1 - v0;
if note["time"] - get_end_time(prev_note) == 0 or note["time"] - prev_note["time"] == 0:
# it has the same time the previous note ends. either a bugged sliderend or a double note
return 0;
end_type_momentum = np.sqrt(v.dot(v)) / (note["time"] - get_end_time(prev_note)) / slider_len;
# Since slider jumps in maps cause parameters to be learned too high
# we try to deal with slider leniency by using the beginning of slider
v2 = np.array([prev_note["x"], prev_note["y"]]);
v3 = v1 - v2;
start_type_momentum = np.sqrt(v3.dot(v3)) / (note["time"] - prev_note["time"]) / slider_len;
return np.min([end_type_momentum, start_type_momentum]);
def is_uts_begin(map_json, tick):
uts_a = map_json["timing"]["uts"];
begin_times = [uts["beginTime"] for uts in uts_a];
for t in begin_times:
if tick > t - 1 and tick < t + 5:
return True
return False
def get_map_notes(map_json, **kwargs):
"""
Reads JSON map data and creates a list for every tick
Returns:
data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3]
flow_data = list of data array: [i, tick, note_type, x, y, vec_in_x, vec_in_y, vec_out_x, vec_out_y, end_x, end_y]
Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150
"""
length = kwargs.get("length", -1);
divisor = kwargs.get("divisor", 4);
tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
objs = map_json["obj"];
obj_times = list(map(lambda obj: obj["time"], objs));
# 1 for circle, 2 for slider, 3 for spinner
def get_note_type(obj):
if not obj:
return 0;
if obj["type"] & 2:
return 2;
elif obj["type"] & 8:
return 3;
return 1;
po = 0;
note_max_wait_time = kwargs.get("note_max_wait_time", 1000);
start_time = obj_times[0] - note_max_wait_time;
last_obj_time = start_time;
sliding = 0;
slider_end_time = 0;
spinning = 0;
spinner_end_time = 0;
data = [];
flow_data = [];
# constant multipliers and subtractions
tlen_mp = 1/500;
tlen_s = 1;
bpm_mp = 1/120;
bpm_s = 1;
slen_mp = 1/150;
slen_s = 1;
# tick count from start of uninherited timing section
uts_i = 0;
# tick is timestamp here
for i, tick in enumerate(tick_times):
if is_uts_begin(map_json, tick):
uts_i = 0;
else:
uts_i += 1;
# Attach extra vars at the end of each note data row
tlen = get_tick_len(map_json, tick);
bpm = 60000 / tlen;
slen = get_slider_len(map_json, tick);
ex1 = tlen * tlen_mp - tlen_s;
ex2 = bpm * bpm_mp - bpm_s;
ex3 = slen * slen_mp - slen_s;
while obj_times[po] < tick - 5 and po < len(obj_times) - 1:
po += 1;
if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note
last_obj_time = tick;
note_type = get_note_type(objs[po]);
# calculate momentum
if po >= 1:
momentum = get_momentum(objs[po], objs[po-1], slen/tlen);
else:
momentum = 0;
# flow data
if po >= 1:
input_vector = get_input_vector(objs[po], objs[po-1]);
output_vector = get_output_vector(objs[po], objs[po-1]);
else:
input_vector = [0, 0];
output_vector = [0, 0];
if input_vector is None or input_vector[0] is None or input_vector[1] is None:
input_vector = [0, 0];
if output_vector is None or output_vector[0] is None or output_vector[1] is None:
output_vector = [0, 0];
# end point
endpoint = get_end_point(objs[po]);
flow_data.append([uts_i, tick, note_type, objs[po]["x"], objs[po]["y"], input_vector[0], input_vector[1], output_vector[0], output_vector[1], endpoint[0], endpoint[1]]);
# put data
if note_type == 1:
spinning = 0;
sliding = 0;
elif note_type == 2:
sliding = 1;
slider_end_time = objs[po]["sliderData"]["endTime"];
elif note_type == 3:
spinning = 1;
spinner_end_time = objs[po]["spinnerEndTime"];
# because the spinner sometimes get over 3 secs
last_obj_time = spinner_end_time;
# TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3
data.append([uts_i, tick, 1, note_type, sliding, spinning, momentum, ex1, ex2, ex3]);
elif spinning == 1:
if tick >= spinner_end_time - 5:
spinning = 0;
data.append([uts_i, tick, 1, 5, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 0, 1, 0, ex1, ex2, ex3]);
elif sliding == 1:
if tick >= slider_end_time - 5:
sliding = 0;
data.append([uts_i, tick, 1, 4, 0, 0, 0, ex1, ex2, ex3]);
else:
data.append([uts_i, tick, 0, 0, 1, 0, 0, ex1, ex2, ex3]);
else: # not found
if tick - last_obj_time < note_max_wait_time and tick >= start_time:
data.append([uts_i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3]);
return data, flow_data;
|
[
"numpy.min",
"numpy.array",
"numpy.arange",
"numpy.concatenate"
] |
[((4069, 4101), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (4077, 4101), True, 'import numpy as np\n'), ((4621, 4663), 'numpy.array', 'np.array', (["[prev_note['x'], prev_note['y']]"], {}), "([prev_note['x'], prev_note['y']])\n", (4629, 4663), True, 'import numpy as np\n'), ((4791, 4839), 'numpy.min', 'np.min', (['[end_type_momentum, start_type_momentum]'], {}), '([end_type_momentum, start_type_momentum])\n', (4797, 4839), True, 'import numpy as np\n'), ((2259, 2328), 'numpy.arange', 'np.arange', (["uts['beginTime']", 'endtimes[i]', "(uts['tickLength'] / divisor)"], {}), "(uts['beginTime'], endtimes[i], uts['tickLength'] / divisor)\n", (2268, 2328), True, 'import numpy as np\n'), ((2873, 2903), 'numpy.concatenate', 'np.concatenate', (['ticks_from_uts'], {}), '(ticks_from_uts)\n', (2887, 2903), True, 'import numpy as np\n'), ((2955, 2979), 'numpy.concatenate', 'np.concatenate', (['tick_len'], {}), '(tick_len)\n', (2969, 2979), True, 'import numpy as np\n'), ((2981, 3001), 'numpy.array', 'np.array', (['slider_len'], {}), '(slider_len)\n', (2989, 3001), True, 'import numpy as np\n'), ((3069, 3089), 'numpy.array', 'np.array', (['[256, 192]'], {}), '([256, 192])\n', (3077, 3089), True, 'import numpy as np\n'), ((693, 740), 'numpy.arange', 'np.arange', (['begin_time', 'end_time', '(mspb / divisor)'], {}), '(begin_time, end_time, mspb / divisor)\n', (702, 740), True, 'import numpy as np\n'), ((2833, 2859), 'numpy.concatenate', 'np.concatenate', (['timestamps'], {}), '(timestamps)\n', (2847, 2859), True, 'import numpy as np\n'), ((3133, 3173), 'numpy.array', 'np.array', (["note['sliderData']['endpoint']"], {}), "(note['sliderData']['endpoint'])\n", (3141, 3173), True, 'import numpy as np\n'), ((3200, 3232), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (3208, 3232), True, 'import numpy as np\n'), ((3425, 3457), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (3433, 3457), True, 'import numpy as np\n'), ((3671, 3707), 'numpy.array', 'np.array', (["note['sliderData']['dOut']"], {}), "(note['sliderData']['dOut'])\n", (3679, 3707), True, 'import numpy as np\n'), ((2500, 2569), 'numpy.arange', 'np.arange', (["uts['beginTime']", 'endtimes[i]', "(uts['tickLength'] / divisor)"], {}), "(uts['beginTime'], endtimes[i], uts['tickLength'] / divisor)\n", (2509, 2569), True, 'import numpy as np\n'), ((3733, 3765), 'numpy.array', 'np.array', (["[note['x'], note['y']]"], {}), "([note['x'], note['y']])\n", (3741, 3765), True, 'import numpy as np\n'), ((2914, 2940), 'numpy.concatenate', 'np.concatenate', (['timestamps'], {}), '(timestamps)\n', (2928, 2940), True, 'import numpy as np\n')]
|
### IMPORTS
from __future__ import print_function
import os
import fnmatch
import numpy as np
import skimage.data
import cv2
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from PIL import Image
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import RMSprop, Adagrad
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping
import logging
FORMAT = "[%(lineno)4s : %(funcName)-30s ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from selective_search import selective_search_bbox
### GLOBALS
# dimensions of our images.
# img_width = 150
# img_height = 150
img_width = 224
img_height = 224
# dataset_path = 'dataset_dogs_cats'
dataset_path = 'dataset'
dataset_train_path=os.path.join(dataset_path, 'train')
dataset_val_path=os.path.join(dataset_path, 'validation')
dataset_test_path=os.path.join(dataset_path, 'test')
# path to the model weights files.
weights_path = 'weights/vgg16_weights.h5'
#top_model_weights_path = 'output/bottleneck_fc_model.h5'
#top_model_weights_path = 'output_6_categ/best-weights-015-0.5636-0.7923.hdf5'
#finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#finetune_model_weights_path = 'output_6_categ/best-weights-finetune-000-0.2325-0.9062.hdf5'
#finetune_model_weights_path = 'output_6_categ_crop/best-weights-finetune-008-0.3453-0.8774.hdf5'
#finetune_model_weights_path = 'output/best-weights-finetune-000-1.5646-0.5217.hdf5'
#finetune_model_weights_path = 'results_36categ/best-weights-finetune-000-1.5646-0.5217.hdf5'
finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#epochs = 50
epochs = 5
#batch_size = 16
#batch_size = 32
batch_size = 1
# Count no. of images(.jpg) in a directory
def get_images_count_recursive(path):
matches = []
logging.debug('path {}'.format(path))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
# logging.debug('matches {}'.format(matches))
images_count = len(matches)
return images_count
nb_test_samples = get_images_count_recursive(dataset_test_path)
logging.debug('nb_test_samples {}'.format(nb_test_samples))
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('logs'):
os.makedirs('logs')
# TODO: HARDCODING - Should be same as used during training VGG; Else error (None, None, 512)
input_shape = (img_width, img_height, 3)
# Sorted subdirectories list
def get_subdir_list(path):
names=[]
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
names.append(name)
logging.debug('names {}'.format(names))
return names
class_names = get_subdir_list(dataset_train_path)
logging.debug('class_names {}'.format(class_names))
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
logging.debug('Model loaded.')
logging.debug('{}'.format(base_model.output_shape)) # (None, None, None, 512) if input_shape not given in applications.VGG16
logging.debug('{}'.format(base_model.output_shape[1:])) # (None, None, 512)
### MODEL 1
# build a classifier model to put on top of the convolutional model
# top_model = Sequential()
# top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
# top_model.add(Dense(256, activation='relu'))
# top_model.add(Dropout(0.5))
# top_model.add(Dense(len(class_names), activation='softmax')) # Binary to Multi classification changes
# #top_model.add(Dense(1, activation='sigmoid'))
# # note that it is necessary to start with a fully-trained
# # classifier, including the top classifier,
# # in order to successfully do fine-tuning
# # top_model.load_weights(top_model_weights_path)
# # add the model on top of the convolutional base
# # base_model.add(top_model) # Not working; AttributeError: 'Model' object has no attribute 'add'
# model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
# logging.debug('{}'.format(model.summary()))
# model.compile(loss='sparse_categorical_crossentropy',
# optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
# metrics=['accuracy'])
### MODEL2
inputs = Input(shape=(base_model.output_shape[1:]))
x_common = Dense(256, activation='relu')(inputs)
## Model Classification
x = Flatten()(x_common)
#x = Dropout(dropout_rate)(x)
predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x)
## Model (Regression) IOU score
x = Flatten()(x_common)
# x = Dense(256, activation='relu')(x)
# x = Dropout(dropout_rate)(x)
predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x)
# This creates a model that includes the Input layer and three Dense layers
#model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou(base_model.output)])
model = Model(inputs=inputs, outputs=[predictions_class(base_model.output), predictions_iou])
logging.debug('model summary {}'.format(model.summary()))
model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'},
metrics=['accuracy'])
model.load_weights(finetune_model_weights_path)
logging.debug('weights loaded: {}'.format(finetune_model_weights_path))
def evaluate_test_dataset():
## Test
test_datagen = ImageDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(
dataset_test_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
logging.debug('model.metrics_names {}'.format(model.metrics_names))
logging.debug('scores {}'.format(scores))
def predict_image_dir():
# Predict
# TODO: Hardcoding
# Put all images in sample_images/test folder
dataset_predict_path='sample_images'
#dataset_predict_path='temp'
logging.debug('dataset_predict_path {}'.format(dataset_predict_path))
predict_datagen = ImageDataGenerator(rescale=1. / 255)
predict_generator = predict_datagen.flow_from_directory(
dataset_predict_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='sparse', # Binary to Multi classification changes
save_to_dir=None,
shuffle=False)
nb_predict_samples = get_images_count_recursive(dataset_predict_path)
logging.debug('nb_predict_samples {}'.format(nb_predict_samples))
prediction = model.predict_generator(predict_generator, nb_predict_samples // batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
# Display predictions
matches=[]
for root, dirnames, filenames in os.walk(os.path.join(dataset_predict_path,'test')):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
for index,preds in enumerate(prediction):
logging.debug('\n{}'.format((matches[index])))
for index2, pred in enumerate(preds):
logging.debug('class_names {}'.format(class_names[index2]))
logging.debug('pred {0:6f}'.format(float(pred)))
def pad_and_crop_image(old_im, new_width, new_height):
# old_im = Image.open('someimage.jpg')
old_size = old_im.size
new_size = (new_width, new_height)
new_im = Image.new("RGB", new_size) # this is already black!
new_im.paste(old_im, ((new_size[0]-old_size[0])/2,
(new_size[1]-old_size[1])/2))
# new_im.show()
# new_im.save('someimage.jpg')
return new_im
def predict_image_name(image_path_name):
logging.debug('image_path_name {}'.format(image_path_name))
candidates = selective_search_bbox(image_path_name)
logging.debug('candidates {}'.format(candidates))
image_name = image_path_name.split('/')[-1].split('.')[0]
logging.debug('image_name {}'.format(image_name))
# img = Image.open(image_path_name)
# logging.debug('{} {} {}'.format(img.format, img.size, img.mode))
#img2 = img.crop((0, 0, 100, 100))
# img2.save("img2.jpg")
# img2.show()
#crop_img = img[200:400, 100:300] # Crop from x, y, w, h -> 100, 200, 300, 400
# NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
# img = cv2.imread(image_path_name)
# fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
img_read = Image.open(image_path_name)
logging.debug('{} {} {}'.format(img_read.format, img_read.size, img_read.mode))
# img_read.show()
i=0
for x, y, w, h in (candidates):
# left, upper, right, and lower pixel; The cropped section includes the left column and
# the upper row of pixels and goes up to (but doesn't include) the right column and bottom row of pixels
img_crop = img_read.crop((y, x, y+w, x+h))
img_crop.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_' + '.jpg')
logging.debug('img_crop {} {} {}'.format(img_crop.format, img_crop.size, img_crop.mode))
img_crop_resize = img_crop.resize((img_width, img_height))
img_crop_resize.save('temp/test/'+ image_name + '_' + str(i) + '_cropped_resize' + '.jpg')
logging.debug('img_crop_resize {} {} {}'.format(img_crop_resize.format, img_crop_resize.size, img_crop_resize.mode))
i=i+1
# crop_img = img[x:y, w:h] # Crop from x, y, w, h -> 100, 200, 300, 400
# logging.debug('crop_img {}'.format(crop_img.shape))
# ax.imshow(crop_img)
# # cv2.imshow('cropped', crop_img)
# # cv2.waitKey(0)
# plt.show()
# # Convert Image to array
# img = PIL.Image.open("foo.jpg").convert("L")
# arr = numpy.array(img)
# # Convert array to Image
# img = PIL.Image.fromarray(arr)
# img = cv2.resize(cv2.imread(image_path_name), (224, 224)).astype(np.float32)
# img2.save('temp/test/img_'+str(i)+'.jpg')
# img3 = img2.thumbnail((img_width, img_height))
# logging.debug('img3 {}'.format(type(img3)))
# # img3.save('temp/test/img_'+str(i)+'_resized.jpg')
# logging.debug('{} {} {}'.format(img3.format, img3.size, img3.mode))
# img4 = pad_and_crop_image(img3, img_width, img_height)
# logging.debug('{} {} {}'.format(img4.format, img4.size, img4.mode))
# img4.save('temp/test/img_'+str(i)+'_resized1.jpg')
img=np.array(img_crop_resize).astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
#img = img.transpose((2,0,1))
img = np.expand_dims(img, axis=0)
prediction = model.predict(img, batch_size, verbose=1)
logging.debug('\n\nprediction \n{}'.format(prediction))
for index,preds in enumerate(prediction):
for pred in preds:
logging.debug('pred {0:6f}'.format(float(pred)))
### MAIN ###
#evaluate_test_dataset()
#predict_image_dir()
# #image='dataset/test/Jeans/img_Distressed_Skinny_Jeans_img_00000004.jpg'
# #image='sample_images/test/img_Distressed_Denim_Jeans_img_00000001.jpg'
# image='sample_images/test/img_Acid_Wash_Denim_Romper_img_00000070.jpg'
image='sample_images/test/img_Acid_Wash_-_Skinny_Jeans_img_00000005.jpg'
#image='sample_images/test/img_Boxy_Faux_Fur_Jacket_img_00000001.jpg'
#image='sample_images/test/img_Athletic_Marled_Knit_Joggers_img_00000009.jpg'
predict_image_name(image)
|
[
"keras.preprocessing.image.ImageDataGenerator",
"PIL.Image.new",
"os.walk",
"keras.applications.VGG16",
"keras.layers.Input",
"os.path.join",
"keras.optimizers.SGD",
"os.path.exists",
"keras.layers.Flatten",
"os.listdir",
"selective_search.selective_search_bbox",
"fnmatch.filter",
"logging.debug",
"os.makedirs",
"logging.basicConfig",
"numpy.expand_dims",
"PIL.Image.open",
"keras.layers.Dense",
"numpy.array"
] |
[((647, 702), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'FORMAT'}), '(level=logging.DEBUG, format=FORMAT)\n', (666, 702), False, 'import logging\n'), ((948, 983), 'os.path.join', 'os.path.join', (['dataset_path', '"""train"""'], {}), "(dataset_path, 'train')\n", (960, 983), False, 'import os\n'), ((1001, 1041), 'os.path.join', 'os.path.join', (['dataset_path', '"""validation"""'], {}), "(dataset_path, 'validation')\n", (1013, 1041), False, 'import os\n'), ((1060, 1094), 'os.path.join', 'os.path.join', (['dataset_path', '"""test"""'], {}), "(dataset_path, 'test')\n", (1072, 1094), False, 'import os\n'), ((3095, 3182), 'keras.applications.VGG16', 'applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': 'input_shape'}), "(weights='imagenet', include_top=False, input_shape=\n input_shape)\n", (3113, 3182), False, 'from keras import applications\n'), ((3178, 3208), 'logging.debug', 'logging.debug', (['"""Model loaded."""'], {}), "('Model loaded.')\n", (3191, 3208), False, 'import logging\n'), ((4743, 4783), 'keras.layers.Input', 'Input', ([], {'shape': 'base_model.output_shape[1:]'}), '(shape=base_model.output_shape[1:])\n', (4748, 4783), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((2078, 2091), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2085, 2091), False, 'import os\n'), ((2451, 2475), 'os.path.exists', 'os.path.exists', (['"""output"""'], {}), "('output')\n", (2465, 2475), False, 'import os\n'), ((2481, 2502), 'os.makedirs', 'os.makedirs', (['"""output"""'], {}), "('output')\n", (2492, 2502), False, 'import os\n'), ((2511, 2533), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (2525, 2533), False, 'import os\n'), ((2539, 2558), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {}), "('logs')\n", (2550, 2558), False, 'import os\n'), ((4797, 4826), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (4802, 4826), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((4864, 4873), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4871, 4873), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((5046, 5055), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5053, 5055), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((5154, 5208), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""predictions_iou"""'}), "(1, activation='sigmoid', name='predictions_iou')\n", (5159, 5208), False, 'from keras.layers import Dropout, Flatten, Dense, Input\n'), ((5969, 6006), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (5987, 6006), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6836, 6873), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (6854, 6873), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((8256, 8282), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'new_size'], {}), "('RGB', new_size)\n", (8265, 8282), False, 'from PIL import Image\n'), ((8646, 8684), 'selective_search.selective_search_bbox', 'selective_search_bbox', (['image_path_name'], {}), '(image_path_name)\n', (8667, 8684), False, 'from selective_search import selective_search_bbox\n'), ((9333, 9360), 'PIL.Image.open', 'Image.open', (['image_path_name'], {}), '(image_path_name)\n', (9343, 9360), False, 'from PIL import Image\n'), ((2117, 2151), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.jpg"""'], {}), "(filenames, '*.jpg')\n", (2131, 2151), False, 'import fnmatch\n'), ((2789, 2805), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2799, 2805), False, 'import os\n'), ((5582, 5621), 'keras.optimizers.SGD', 'optimizers.SGD', ([], {'lr': '(0.0001)', 'momentum': '(0.9)'}), '(lr=0.0001, momentum=0.9)\n', (5596, 5621), False, 'from keras import optimizers\n'), ((7633, 7675), 'os.path.join', 'os.path.join', (['dataset_predict_path', '"""test"""'], {}), "(dataset_predict_path, 'test')\n", (7645, 7675), False, 'import os\n'), ((7701, 7735), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.jpg"""'], {}), "(filenames, '*.jpg')\n", (7715, 7735), False, 'import fnmatch\n'), ((11488, 11515), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (11502, 11515), True, 'import numpy as np\n'), ((2833, 2857), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (2845, 2857), False, 'import os\n'), ((2180, 2208), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2192, 2208), False, 'import os\n'), ((7764, 7792), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (7776, 7792), False, 'import os\n'), ((11299, 11324), 'numpy.array', 'np.array', (['img_crop_resize'], {}), '(img_crop_resize)\n', (11307, 11324), True, 'import numpy as np\n')]
|
# Copyright 2020 Graphcore Ltd.
import argparse
import os
import time as time
import numpy as np
import tensorflow as tf
from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils
from tensorflow.python.ipu.scopes import ipu_scope, ipu_shard
import tensorflow_probability as tfp
# Model and sampling parameters
# Note: increasing model size, number of steps, or dataset size may cause out of memory errors
first_layer_size = 40
num_burnin_steps = 100
num_ipus = 2
num_results = 400
num_leapfrog_steps = 1000
useful_features = 22
num_skip_columns = 2
output_file = "output_samples.txt"
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset-dir",
type=str,
default=".",
help="Path to datasets"
)
args = parser.parse_args()
input_file = os.path.join(
args.dataset_dir, "returns_and_features_for_mcmc.txt"
)
# Print the about message
print("\nMCMC sampling example with TensorFlow Probability\n"
" Single precision\n"
f" Number of IPUs {num_ipus} (one MCMC chain per IPU)\n"
f" Number of results per IPU {num_results}\n"
f" Number of burn-in steps {num_burnin_steps}\n"
f" Number of leapfrog steps {num_leapfrog_steps}\n"
f" First layer size {first_layer_size}")
# Load data
raw_data = np.genfromtxt(input_file, skip_header=1,
delimiter="\t", dtype='float32')
# Pre-process data
observed_return_ = raw_data[:, num_skip_columns]
observed_features_ = raw_data[:, num_skip_columns+1:]
num_features = raw_data.shape[1] - num_skip_columns - 1
if useful_features < num_features:
num_features = useful_features
observed_features_ = observed_features_[:, :num_features]
# Model is an MLP with num_features input dims and layer sizes: first_layer_size, 1, 1
num_model_parameters = num_features * first_layer_size + \
first_layer_size + first_layer_size + 3
# Print dataset parameters
print(" Number of data items {}\n"
" Number of features per data item {}\n"
" Number of model parameters {}\n"
.format(raw_data.shape[0],
num_features,
num_model_parameters
))
# Import TensorFlow modules
tfd = tfp.distributions
# Suppress warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# Initialize TensorFlow graph and session
tf.reset_default_graph()
config = tf.ConfigProto()
sess = tf.Session(config=config)
# Build the neural network
def bdnn(x, p):
nf = num_features
nt = first_layer_size
# Unpack model parameters
w1 = tf.reshape(p[nt+1:nt+nf*nt+1], [nf, nt])
w2 = tf.reshape(p[1:nt+1], [nt, 1])
w3 = p[0]
b1 = p[nt+nf*nt+3:]
b2 = tf.expand_dims(p[nt+nf*nt+2], 0)
b3 = p[nt+nf*nt+1]
# Build layers
x = tf.tanh(tf.nn.xw_plus_b(x, w1, b1))
x = tf.nn.xw_plus_b(x, w2, b2)
x = x * w3 + b3
return tf.squeeze(x)
# Model posterior log probability
def model_log_prob(ret, feat, p):
# Parameters of distributions
prior_scale = 200
studentT_scale = 100
# Features normalization
def normalize_features(f):
return 0.001 * f
# Prior probability distributions on model parameters
rv_p = tfd.Independent(tfd.Normal(loc=0. * tf.ones(shape=[num_model_parameters], dtype=tf.float32),
scale=prior_scale * tf.ones(shape=[num_model_parameters], dtype=tf.float32)),
reinterpreted_batch_ndims=1)
# Likelihood
alpha_bp_estimate = bdnn(normalize_features(feat), p)
rv_observed = tfd.StudentT(
df=2.2, loc=alpha_bp_estimate, scale=studentT_scale)
# Sum of logs
return (rv_p.log_prob(p) +
tf.reduce_sum(rv_observed.log_prob(ret)))
def build_graph(scope_id):
with tf.variable_scope('scope'+scope_id, use_resource=True, reuse=tf.AUTO_REUSE):
# Data items
observed_return = tf.cast(observed_return_, 'float32')
observed_features = tf.cast(observed_features_, 'float32')
# Initial chain state
initial_chain_state = [
0.0 * tf.ones(shape=[num_model_parameters], dtype=tf.float32)
]
# Bijectors
unconstraining_bijectors = [
tfp.bijectors.Identity()
]
# Initialize the step_size
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
step_size = tf.get_variable(
name='step_size',
initializer=tf.constant(.01, dtype=tf.float32),
trainable=False,
use_resource=True
)
# Put the graph into a function so it can be compiled for running on IPU
def hmc_graph():
# Target log probability function
def target_log_prob_fn(*args):
return model_log_prob(observed_return, observed_features, *args)
# Hamiltonian Monte Carlo kernel
hmc_kernel = tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
target_rate=0.2,
num_adaptation_steps=num_burnin_steps,
decrement_multiplier=0.1),
state_gradients_are_stopped=False),
bijector=unconstraining_bijectors)
# Graph to sample from the chain
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=initial_chain_state,
kernel=hmc_kernel)
# Compile the graph
[p], kernel_results = ipu_compiler.compile(hmc_graph, [])
return (p, kernel_results)
# Place the graphs on IPUs
ops = []
for i in range(num_ipus):
with ipu_scope('/device:IPU:'+str(i)):
ops.append(build_graph(scope_id=str(i)))
# Configure IPU
config = utils.create_ipu_config()
# Create num_chips TF devices, with 1 IPU per device
config = utils.auto_select_ipus(config, [1]*num_ipus)
utils.configure_ipu_system(config)
utils.move_variable_initialization_to_cpu()
# Initialize variables
init_g = tf.global_variables_initializer()
sess.run(init_g)
# Warm up
print("\nWarming up...")
sess.run(ops)
print("Done\n")
# Sample
print("Sampling...")
start_time = time.time()
results = sess.run(ops)
end_time = time.time()
print("Done\n")
# Concatenate samples from separate MCMC chains
samples = np.concatenate(list(map(lambda x: x[0], results)), axis=0)
# Write samples to file
np.savetxt(output_file, samples, delimiter='\t')
print("Written {} samples to {}".format(samples.shape[0], output_file))
# Print run time
print("Completed in {0:.2f} seconds\n".format(end_time - start_time))
|
[
"argparse.ArgumentParser",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.get_variable_scope",
"tensorflow.ConfigProto",
"tensorflow_probability.mcmc.sample_chain",
"os.path.join",
"numpy.savetxt",
"numpy.genfromtxt",
"tensorflow.variable_scope",
"tensorflow.python.ipu.ipu_compiler.compile",
"tensorflow.cast",
"tensorflow_probability.bijectors.Identity",
"tensorflow.squeeze",
"tensorflow.ones",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow_probability.mcmc.make_simple_step_size_update_policy",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.expand_dims",
"time.time",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.nn.xw_plus_b",
"tensorflow.python.ipu.utils.move_variable_initialization_to_cpu",
"tensorflow.python.ipu.utils.create_ipu_config"
] |
[((645, 670), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (668, 670), False, 'import argparse\n'), ((824, 891), 'os.path.join', 'os.path.join', (['args.dataset_dir', '"""returns_and_features_for_mcmc.txt"""'], {}), "(args.dataset_dir, 'returns_and_features_for_mcmc.txt')\n", (836, 891), False, 'import os\n'), ((1330, 1403), 'numpy.genfromtxt', 'np.genfromtxt', (['input_file'], {'skip_header': '(1)', 'delimiter': '"""\t"""', 'dtype': '"""float32"""'}), "(input_file, skip_header=1, delimiter='\\t', dtype='float32')\n", (1343, 1403), True, 'import numpy as np\n'), ((2299, 2361), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (2333, 2361), True, 'import tensorflow as tf\n'), ((2408, 2432), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2430, 2432), True, 'import tensorflow as tf\n'), ((2443, 2459), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2457, 2459), True, 'import tensorflow as tf\n'), ((2468, 2493), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2478, 2493), True, 'import tensorflow as tf\n'), ((6293, 6318), 'tensorflow.python.ipu.utils.create_ipu_config', 'utils.create_ipu_config', ([], {}), '()\n', (6316, 6318), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6383, 6429), 'tensorflow.python.ipu.utils.auto_select_ipus', 'utils.auto_select_ipus', (['config', '([1] * num_ipus)'], {}), '(config, [1] * num_ipus)\n', (6405, 6429), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6429, 6463), 'tensorflow.python.ipu.utils.configure_ipu_system', 'utils.configure_ipu_system', (['config'], {}), '(config)\n', (6455, 6463), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6465, 6508), 'tensorflow.python.ipu.utils.move_variable_initialization_to_cpu', 'utils.move_variable_initialization_to_cpu', ([], {}), '()\n', (6506, 6508), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((6545, 6578), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6576, 6578), True, 'import tensorflow as tf\n'), ((6716, 6727), 'time.time', 'time.time', ([], {}), '()\n', (6725, 6727), True, 'import time as time\n'), ((6765, 6776), 'time.time', 'time.time', ([], {}), '()\n', (6774, 6776), True, 'import time as time\n'), ((6945, 6993), 'numpy.savetxt', 'np.savetxt', (['output_file', 'samples'], {'delimiter': '"""\t"""'}), "(output_file, samples, delimiter='\\t')\n", (6955, 6993), True, 'import numpy as np\n'), ((2636, 2684), 'tensorflow.reshape', 'tf.reshape', (['p[nt + 1:nt + nf * nt + 1]', '[nf, nt]'], {}), '(p[nt + 1:nt + nf * nt + 1], [nf, nt])\n', (2646, 2684), True, 'import tensorflow as tf\n'), ((2687, 2719), 'tensorflow.reshape', 'tf.reshape', (['p[1:nt + 1]', '[nt, 1]'], {}), '(p[1:nt + 1], [nt, 1])\n', (2697, 2719), True, 'import tensorflow as tf\n'), ((2768, 2806), 'tensorflow.expand_dims', 'tf.expand_dims', (['p[nt + nf * nt + 2]', '(0)'], {}), '(p[nt + nf * nt + 2], 0)\n', (2782, 2806), True, 'import tensorflow as tf\n'), ((2901, 2927), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'w2', 'b2'], {}), '(x, w2, b2)\n', (2916, 2927), True, 'import tensorflow as tf\n'), ((2961, 2974), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {}), '(x)\n', (2971, 2974), True, 'import tensorflow as tf\n'), ((2864, 2890), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'w1', 'b1'], {}), '(x, w1, b1)\n', (2879, 2890), True, 'import tensorflow as tf\n'), ((3887, 3964), 'tensorflow.variable_scope', 'tf.variable_scope', (["('scope' + scope_id)"], {'use_resource': '(True)', 'reuse': 'tf.AUTO_REUSE'}), "('scope' + scope_id, use_resource=True, reuse=tf.AUTO_REUSE)\n", (3904, 3964), True, 'import tensorflow as tf\n'), ((4015, 4051), 'tensorflow.cast', 'tf.cast', (['observed_return_', '"""float32"""'], {}), "(observed_return_, 'float32')\n", (4022, 4051), True, 'import tensorflow as tf\n'), ((4081, 4119), 'tensorflow.cast', 'tf.cast', (['observed_features_', '"""float32"""'], {}), "(observed_features_, 'float32')\n", (4088, 4119), True, 'import tensorflow as tf\n'), ((6033, 6068), 'tensorflow.python.ipu.ipu_compiler.compile', 'ipu_compiler.compile', (['hmc_graph', '[]'], {}), '(hmc_graph, [])\n', (6053, 6068), False, 'from tensorflow.python.ipu import ipu_compiler, ipu_infeed_queue, loops, utils\n'), ((4346, 4370), 'tensorflow_probability.bijectors.Identity', 'tfp.bijectors.Identity', ([], {}), '()\n', (4368, 4370), True, 'import tensorflow_probability as tfp\n'), ((5766, 5906), 'tensorflow_probability.mcmc.sample_chain', 'tfp.mcmc.sample_chain', ([], {'num_results': 'num_results', 'num_burnin_steps': 'num_burnin_steps', 'current_state': 'initial_chain_state', 'kernel': 'hmc_kernel'}), '(num_results=num_results, num_burnin_steps=\n num_burnin_steps, current_state=initial_chain_state, kernel=hmc_kernel)\n', (5787, 5906), True, 'import tensorflow_probability as tfp\n'), ((4205, 4260), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[num_model_parameters]', 'dtype': 'tf.float32'}), '(shape=[num_model_parameters], dtype=tf.float32)\n', (4212, 4260), True, 'import tensorflow as tf\n'), ((4452, 4475), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (4473, 4475), True, 'import tensorflow as tf\n'), ((3332, 3387), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[num_model_parameters]', 'dtype': 'tf.float32'}), '(shape=[num_model_parameters], dtype=tf.float32)\n', (3339, 3387), True, 'import tensorflow as tf\n'), ((3448, 3503), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[num_model_parameters]', 'dtype': 'tf.float32'}), '(shape=[num_model_parameters], dtype=tf.float32)\n', (3455, 3503), True, 'import tensorflow as tf\n'), ((4605, 4640), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {'dtype': 'tf.float32'}), '(0.01, dtype=tf.float32)\n', (4616, 4640), True, 'import tensorflow as tf\n'), ((5385, 5515), 'tensorflow_probability.mcmc.make_simple_step_size_update_policy', 'tfp.mcmc.make_simple_step_size_update_policy', ([], {'target_rate': '(0.2)', 'num_adaptation_steps': 'num_burnin_steps', 'decrement_multiplier': '(0.1)'}), '(target_rate=0.2,\n num_adaptation_steps=num_burnin_steps, decrement_multiplier=0.1)\n', (5429, 5515), True, 'import tensorflow_probability as tfp\n')]
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import json
import time
import argparse
from pathlib import Path
import random
import numpy as np
import tensorflow as tf
tf.autograph.set_verbosity(3) # 0: debug, 1: info, 2: warning, 3: error
from src.models.encoder import Encoder
from src.models.RACL import RACL
from src.utils import (
load_config,
split_documents, read_data, reverse_unk,
decode_results, format_results, dict2html
)
def load_basic_arguments(parser):
# Define arguments
parser.add_argument('--model', default='racl', type=str, help='model name')
parser.add_argument('--max_sentence_len', default=156, type=int, help='maximum number of words in sentence')
parser.add_argument('--embedding_dim', default=768, type=int, help='embedding dimension')
parser.add_argument('--n_interactions', default=6, type=int, help='number of RACL blocks to interact')
parser.add_argument('--n_filters', default=96, type=int, help='number of filters in convolution')
parser.add_argument('--kernel_size', default=11, type=int, help='kernel size in convolution')
parser.add_argument('--random_seed', default=4_10_20, type=int, help='random seed')
parser.add_argument('--include_opinion', default=True, type=bool, help='whether to use opinion for model')
parser.add_argument('--random_type', default='normal', type=str, help='random type: uniform or normal (default)')
parser.add_argument('--ckpt', default=798, type=int, help='checkpoint id to load weights')
opt = parser.parse_args()
opt.n_classes = 3
opt.is_training = False
opt.is_evaluating = False
opt.label_smoothing = False
opt.keep_prob_1, opt.keep_prob_2 = 1., 1.
random.seed(opt.random_seed)
np.random.seed(opt.random_seed)
tf.random.set_seed(opt.random_seed)
return opt
# Samples for prediction
documents = [
# 'dessert was also to die for',
# 'sushi so fresh that it crunches in your mouth',
# 'in fact , this was not a nicoise salad and was barely eatable',
# "the two waitress 's looked like they had been sucking on lemons",
"the absence of halal food - not even for room service",
"la foresto de halalaj manĝaĵoj - eĉ ne por ĉambroservo",
"عدم وجود الطعام الحلال - ولا حتى لخدمة الغرف",
"អវត្ដមាននៃអាហារហាឡាល់ - មិនសូម្បីតែសម្រាប់សេវាកម្មបន្ទប់",
"ການຂາດອາຫານຮາລານ - ບໍ່ແມ່ນແຕ່ ສຳ ລັບການບໍລິການຫ້ອງ",
"халал тағамның болмауы - тіпті бөлме қызметтері үшін де емес",
"отсутствие халяльной еды - даже для обслуживания номеров",
"die afwesigheid van halal-kos - nie eens vir kamerdiens nie",
"l'assenza di cibo halal - nemmeno per il servizio in camera",
"ハラルフードがない-ルームサービスでもない",
"할랄 음식의 부재-룸 서비스조차도",
"la ausencia de comida halal, ni siquiera para el servicio de habitaciones",
"sự vắng mặt của thức ăn halal - thậm chí không có dịch vụ ăn uống tại phòng",
# "Have to travel out in order to get food",
# "Smell of the pillows... smelt like someone odour",
# " Very noisy outside the room, found a cockroaches in bathroom, the condition did not works whole nights, very hot can't sleep",
# "I had to stay here due to holiday inn transferring me here because they were closed for renovations. First I am pist because this hotel stinks of weed, my room was not very clean and due to Covid you would think the room would be super clean but nope wrappers all over the place towels had stains, to top it off I even found bugs in my room. I am disgusted. The service is horrible. “There was never a manager on duty” I even reached out to them in email and still no reply from them so they clearly don’t care. Avoid this hotel there are so many other options by the airport that this one poor excuse for cleanliness and bugs they do not deserve a dime. They don’t fix their problems and a manager is never reachable",
# "First impression is the hotel seem to be in need of an upgrade. The grounds did not feel welcoming on the exterior. The interior had carpet coming up in the hallway, I was on the third floor. It had a bad smell that hits you in the face as soon as you get off the elevator. The rooms was decent with a nice size television, desk and a refrigerator but lacked cleanliness. We couldn't shower because the tubes were GROSS. It looked as if it hadn't been properly cleaned for months! You can see the filth buildup YUCK! This is very concerning considering the month I traveled was during the covid-19 pandemic. If this hotel is not properly cleaning guest rooms than are they really practicing safe measures during a global coronavirus pandemic?",
# "Small rooms, restaurant offers the best of microwaved food and wifi is poor. Staff set engaged, but this establishment needs investment and attention to the the customer experience. Plenty of examples where the site could use a goos cleaning - including the restaurant.",
# "I had a horrible check-in experience at this crown plaza. The manager at night shift was exceptionally rude. Just because it was night and I was tired, I stayed there. I checked out next day and went to The Renaissance across the street.",
# "DIRTY FILTHY DISGUSTING!!! Hair and mold in the bathroom, DIRTY carpeting, smells of cigarette smoke and my daughter woke up with bug bites all over her legs!!! Front desk was an absolute joke! Unprofessional rude and lazy!! Travelers BEWARE!!",
# "Called to say my flight is cancelled because of weather ,can you change to next day or refund.before I could complete the sentence they cancelled my reservation and hung up.i know the hotel room was given to somebody else.i cannot believe the service was from very reputable company like yours",
# "The value for the room and the service was very good but the Furnishings in the room is very outdated and more out. The carpet has been replaced and the linen and the bathtub was spotless. Restaurant bar",
# "The Crowne Plaza is located near the newark airport. The hotel offers a transfer ( i got it on my way back). The rooms are small but the bed is very comfortable. Bathroom regular. Also offers a transfer to the outlet nearby but only in 2 specific times a day.",
# "We stayed one night (thankfully) as there was a lot of noise from airplanes taking off and landing and from traffic on the road nearby. The room was very nice with comfortable bed. The shower was over the bath",
# "I visited this hotel with 6 family members in jan 2020. we reached jetlagged early in the morning to be greeted by an extremely rude lady whose name started with Q. I saw her even mocking a few clients. Rooms were clean. Sleep quality was nice Not many eating options around hotel for breakfast, except the hotel itself. In evening one can walk out towards quay and be delighted with so many restaurants. over all a an average hotel BUT the RUDEST STAFF i have ever seen. STAY AWAY IF YOU ANYOTHER OPTION.",
# "Hotel was very crowded and so called club lounge was so crowded that we couldn't use 20 minute wait for breakfast in main restaurant Hotel room small and basic - not luxury Pool good and hotel location excellent",
# "The hotel is actually <NAME> not <NAME> as the name claims. I had booked a room with a king size bed but they could only give me twin beds on the first night so I had to move rooms on the second day. All of the rooms I saw were tired with very bland decor and badly in need of a refresh. I also experienced a lot of noise from neighbouring rooms",
# "I do no understand why you are charging me USD 100 (66% of original room charge) because I have Netherlands nationality but booked my room stating my residential address in Thailand, where I have lived for the last 13 years",
# "Check in was appalling ! Checked into a deluxe room but was given two single beds!! Went downstairs to speak to reception and they told me only room they have is a smoking room which was not practical!!! Then had to sleep there and next day await a room change!!! Which was chased by us as no one remembered the next day!!",
# "I would not recommend this hotel, it is seriously understaffed the restaurant is small for the size of the hotel which results in the tables being too close together. The restaurant staff tried their best but there just weren't enough of them",
# "nice bar and front desk staff members happy faces they made me feel like a vip. update! hotel is dark and old. bathroom was tiny, dark and poor design. elevator was slow. hotel facilities and staff were excellent",
]
def predict(parser, args):
"""
Predict from command line and return response output as html + json
Parameters
----------
args :
args.config_path : str
path to config yml e.g. /production/model_config.yml
args.log_level: str
'debug', 'info', or 'warning' level for root logger and all handlers
"""
config = load_config(Path(args.config_path))
opt = load_basic_arguments(parser)
for key, value in config["model_params"].items():
print(f"Key: {key} - Value: {value}")
opt.key = value
# Define useful directories
predicts_dir = config["paths"]["predictions"]
artefacts_dir = config["paths"]["artefacts"]
checkpoint_dir = config["paths"]["checkpoint"]
opt.ckpt_path = os.path.join(checkpoint_dir, f"RACL-epoch={opt.ckpt:03d}.h5")
# Split document into sentences
sentences, sent2doc = split_documents(documents)
opt.batch_size = len(sentences)
# Load Tokenizer and Encoder
print(f"\n\n\nLoading Encoder ...")
sbert_version = 'distilUSE'
sbert_dir = os.path.join(artefacts_dir, sbert_version)
encoder = Encoder(sbert_dir)
# Tokenize
start_time = time.time()
embeddings, sentences_mask, position_matrices, tokens_in_doc = read_data(sentences, opt, encoder)
embeddings = np.reshape(embeddings, (opt.batch_size, opt.max_sentence_len, opt.embedding_dim))
tokens_in_doc = reverse_unk(tokens_in_doc, sentences)
end_time = time.time()
time_running = end_time - start_time
run_time = f'\n\n\nTokenize {len(sentences)} samples in {time_running:.2f}s'
print(run_time)
# Load model
model = RACL(opt)
model.load_weights(opt.ckpt_path)
# Predict
start_time = time.time()
aspect_probs, opinion_probs, sentiment_probs = model.predict(
sentence=embeddings,
word_mask=sentences_mask.reshape((opt.batch_size, opt.max_sentence_len)),
position_att=position_matrices.reshape((opt.batch_size, opt.max_sentence_len, opt.max_sentence_len))
)
end_time = time.time()
time_running = end_time - start_time
run_time = f'\n\n\nPredict {len(sentences)} samples in {time_running:.2f}s'
print(run_time)
# Feed results into DataFrame
results_df = decode_results(tokens_in_doc, sent2doc,
aspect_probs, opinion_probs, sentiment_probs)
# Write logs
output_file = os.path.join(predicts_dir, f'case_study_{opt.task}')
print(f'\n\nWriting result to \n\t{output_file}.json\n\t{output_file}.html ...')
doc_results = format_results(results_df)
with open(output_file+'.json', 'w') as f_writer:
json.dump(doc_results, f_writer, indent=4)
dict2html(doc_results, output_file+'.html')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Model Prediction')
parser.add_argument('-c', '--config-path', default='production/model_config.yml', type=str, help='Config path')
args, unk_args = parser.parse_known_args()
predict(parser, args)
##########################################
# Executive Time on Local Machine: #
# Tokenize 13 samples in 0.22s #
# Predict 13 samples in 2.27s #
##########################################
|
[
"tensorflow.random.set_seed",
"json.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"src.models.RACL.RACL",
"src.utils.dict2html",
"time.time",
"pathlib.Path",
"src.utils.split_documents",
"random.seed",
"src.models.encoder.Encoder",
"numpy.reshape",
"src.utils.reverse_unk",
"src.utils.decode_results",
"src.utils.format_results",
"os.path.join",
"src.utils.read_data",
"tensorflow.autograph.set_verbosity"
] |
[((282, 311), 'tensorflow.autograph.set_verbosity', 'tf.autograph.set_verbosity', (['(3)'], {}), '(3)\n', (308, 311), True, 'import tensorflow as tf\n'), ((1851, 1879), 'random.seed', 'random.seed', (['opt.random_seed'], {}), '(opt.random_seed)\n', (1862, 1879), False, 'import random\n'), ((1885, 1916), 'numpy.random.seed', 'np.random.seed', (['opt.random_seed'], {}), '(opt.random_seed)\n', (1899, 1916), True, 'import numpy as np\n'), ((1922, 1957), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['opt.random_seed'], {}), '(opt.random_seed)\n', (1940, 1957), True, 'import tensorflow as tf\n'), ((9518, 9579), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'f"""RACL-epoch={opt.ckpt:03d}.h5"""'], {}), "(checkpoint_dir, f'RACL-epoch={opt.ckpt:03d}.h5')\n", (9530, 9579), False, 'import os\n'), ((9646, 9672), 'src.utils.split_documents', 'split_documents', (['documents'], {}), '(documents)\n', (9661, 9672), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((9837, 9879), 'os.path.join', 'os.path.join', (['artefacts_dir', 'sbert_version'], {}), '(artefacts_dir, sbert_version)\n', (9849, 9879), False, 'import os\n'), ((9895, 9913), 'src.models.encoder.Encoder', 'Encoder', (['sbert_dir'], {}), '(sbert_dir)\n', (9902, 9913), False, 'from src.models.encoder import Encoder\n'), ((9950, 9961), 'time.time', 'time.time', ([], {}), '()\n', (9959, 9961), False, 'import time\n'), ((10030, 10064), 'src.utils.read_data', 'read_data', (['sentences', 'opt', 'encoder'], {}), '(sentences, opt, encoder)\n', (10039, 10064), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((10083, 10169), 'numpy.reshape', 'np.reshape', (['embeddings', '(opt.batch_size, opt.max_sentence_len, opt.embedding_dim)'], {}), '(embeddings, (opt.batch_size, opt.max_sentence_len, opt.\n embedding_dim))\n', (10093, 10169), True, 'import numpy as np\n'), ((10186, 10223), 'src.utils.reverse_unk', 'reverse_unk', (['tokens_in_doc', 'sentences'], {}), '(tokens_in_doc, sentences)\n', (10197, 10223), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((10240, 10251), 'time.time', 'time.time', ([], {}), '()\n', (10249, 10251), False, 'import time\n'), ((10430, 10439), 'src.models.RACL.RACL', 'RACL', (['opt'], {}), '(opt)\n', (10434, 10439), False, 'from src.models.RACL import RACL\n'), ((10514, 10525), 'time.time', 'time.time', ([], {}), '()\n', (10523, 10525), False, 'import time\n'), ((10839, 10850), 'time.time', 'time.time', ([], {}), '()\n', (10848, 10850), False, 'import time\n'), ((11050, 11139), 'src.utils.decode_results', 'decode_results', (['tokens_in_doc', 'sent2doc', 'aspect_probs', 'opinion_probs', 'sentiment_probs'], {}), '(tokens_in_doc, sent2doc, aspect_probs, opinion_probs,\n sentiment_probs)\n', (11064, 11139), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((11208, 11260), 'os.path.join', 'os.path.join', (['predicts_dir', 'f"""case_study_{opt.task}"""'], {}), "(predicts_dir, f'case_study_{opt.task}')\n", (11220, 11260), False, 'import os\n'), ((11366, 11392), 'src.utils.format_results', 'format_results', (['results_df'], {}), '(results_df)\n', (11380, 11392), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((11504, 11549), 'src.utils.dict2html', 'dict2html', (['doc_results', "(output_file + '.html')"], {}), "(doc_results, output_file + '.html')\n", (11513, 11549), False, 'from src.utils import load_config, split_documents, read_data, reverse_unk, decode_results, format_results, dict2html\n'), ((11594, 11649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Model Prediction"""'}), "(description='Model Prediction')\n", (11617, 11649), False, 'import argparse\n'), ((9114, 9136), 'pathlib.Path', 'Path', (['args.config_path'], {}), '(args.config_path)\n', (9118, 9136), False, 'from pathlib import Path\n'), ((11456, 11498), 'json.dump', 'json.dump', (['doc_results', 'f_writer'], {'indent': '(4)'}), '(doc_results, f_writer, indent=4)\n', (11465, 11498), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 17:41:44 2020
@author: salman
"""
from PIL import Image
import pandas as pd
import numpy as np
import cv2
import os
d={}
data = pd.read_csv('E:\\fyp data\\ADEK-20\\new_se_new\\new.txt', sep="\t")
arr=np.zeros(151)
print(arr)
for point in data.values:
(key,name,val)=point[0],point[-2],point[-1]
arr[key]=val
print(arr)
print(arr)
train_file= pd.read_csv('E:\\fyp data\\ADEK-20\\validation_images.txt', sep="\t")
train_lst=list(train_file["images"])
path="E:\\fyp data\\ADEK-20\\ADEChallengeData2016\\ADEChallengeData2016\\annotations\\validation\\"
saved="E:\\fyp data\\ADEK-20\\new_se_new\\adk_annotations\\validation\\"
for img in train_lst:
imgPath=path+img+'.png'
image=np.array(cv2.imread(imgPath,0))
image=arr[image]
uniques=np.unique(image)
if len(uniques>0):
cv2.imwrite(saved+img+'.png',image)
print("Done")
|
[
"pandas.read_csv",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"numpy.unique"
] |
[((182, 249), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\fyp data\\\\ADEK-20\\\\new_se_new\\\\new.txt"""'], {'sep': '"""\t"""'}), "('E:\\\\fyp data\\\\ADEK-20\\\\new_se_new\\\\new.txt', sep='\\t')\n", (193, 249), True, 'import pandas as pd\n'), ((255, 268), 'numpy.zeros', 'np.zeros', (['(151)'], {}), '(151)\n', (263, 268), True, 'import numpy as np\n'), ((408, 477), 'pandas.read_csv', 'pd.read_csv', (['"""E:\\\\fyp data\\\\ADEK-20\\\\validation_images.txt"""'], {'sep': '"""\t"""'}), "('E:\\\\fyp data\\\\ADEK-20\\\\validation_images.txt', sep='\\t')\n", (419, 477), True, 'import pandas as pd\n'), ((818, 834), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (827, 834), True, 'import numpy as np\n'), ((762, 784), 'cv2.imread', 'cv2.imread', (['imgPath', '(0)'], {}), '(imgPath, 0)\n', (772, 784), False, 'import cv2\n'), ((871, 911), 'cv2.imwrite', 'cv2.imwrite', (["(saved + img + '.png')", 'image'], {}), "(saved + img + '.png', image)\n", (882, 911), False, 'import cv2\n')]
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score
from src.utils import calc_annual_return_vec, print_test_results
from config import basedir
if __name__ == "__main__":
# Read the datasets
train = pd.read_csv(os.path.join(basedir, 'data', 'processed', 'PD_train_continuous.csv'), sep=";")
test = pd.read_csv(os.path.join(basedir, 'data', 'processed', 'PD_test_continuous.csv'), sep=";")
X_train = np.array(train.drop(columns="good_bad"))
y_train = np.array(train["good_bad"])
X_test = np.array(test.drop(columns="good_bad"))
y_test = np.array(test["good_bad"])
print('Length of training set:', len(y_train))
print('Length of testing set: ', len(y_test))
####################################################################################################################
###################################### Random Forest Classification ######################################
####################################################################################################################
reg = GradientBoostingClassifier()
reg.fit(X_train, y_train)
y_train_predict = np.round(reg.predict(X_train), 2)
y_test_predict = np.round(reg.predict(X_test), 2)
y_hat_test = reg.predict(X_test)
print("Accuracy:", accuracy_score(y_test, y_hat_test))
y_hat_test_proba = reg.predict_proba(X_test)[:][:, 1]
predictions = pd.concat([pd.DataFrame(y_test), pd.DataFrame(y_hat_test_proba)], axis=1)
predictions.columns = ["y_test", "y_hat_test_proba"]
fpr, tpr, thresholds = roc_curve(y_test, y_hat_test_proba)
auc = roc_auc_score(y_test, y_hat_test_proba)
plt.figure()
plt.plot(fpr, tpr)
plt.plot(fpr, fpr, linestyle="--", color="k")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.title(f"ROC curve (AUC = {np.round(auc, 2)})")
plt.savefig('../results/PD_GradientBoosting_model_auc.png')
plt.savefig(os.path.join(basedir, 'results', 'roc', 'PD_GradientBoosting.png'))
plt.show()
scores = mean_absolute_error(y_test_predict, y_test)
print('Mean Abs Error: {:.2f}'.format(scores))
####################################################################################################################
########################################### Feature Importance ###########################################
####################################################################################################################
print_FeatureImportance = False
if print_FeatureImportance:
importances = reg.feature_importances_
std = np.std([tree.feature_importances_ for tree in reg.estimators_], axis=0)
indices = np.flip(np.argsort(importances), axis=0)
xaxis = np.linspace(0, len(indices) - 1, len(indices))
names = []
for idx in indices:
names.append(train.columns[idx])
ax = plt.figure()
plt.title("Feature Importance")
plt.bar(xaxis, importances[indices] * 100, color="r", yerr=std[indices] * 100, align="center")
plt.xticks(xaxis, names, rotation=90)
plt.ylabel('%')
plt.tight_layout()
plt.savefig(os.path.join(basedir, 'results', 'roc', 'PD_GradientBoosting_FeatureImportance.png'))
####################################################################################################################
####################################### Evaluating Output Results ########################################
####################################################################################################################
print_results = False
if print_results:
idx = y_test_predict > 15.0
print_test_results(f"Yield (15% < predict):", test[idx])
idx = np.logical_and(y_test_predict > 10.0, y_test_predict < 15.0)
print_test_results(f"Yield (10% < predict < 15%):", test[idx])
idx = np.logical_and(y_test_predict > 5.0, y_test_predict < 10.0)
print_test_results(f"Yield (5% < predict < 10%):", test[idx])
idx = np.logical_and(y_test_predict > 0.0, y_test_predict < 5.0)
print_test_results(f"Yield (0% < predict < 5%):", test[idx])
idx = np.logical_and(y_test_predict > -10.0, y_test_predict < 0.0)
print_test_results(f"Yield (-10% < predict < 0%):", test[idx])
idx = np.logical_and(y_test_predict > -20.0, y_test_predict < -10.0)
print_test_results(f"Yield (-20% < predict < -10%):", test[idx])
idx = y_test_predict < -20.0
print_test_results(f"Yield (-20% > predict):", test[idx])
plt.show(block=True)
|
[
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.bar",
"sklearn.metrics.mean_absolute_error",
"numpy.argsort",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.round",
"pandas.DataFrame",
"numpy.std",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"src.utils.print_test_results",
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"numpy.logical_and",
"sklearn.ensemble.GradientBoostingClassifier",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((696, 723), 'numpy.array', 'np.array', (["train['good_bad']"], {}), "(train['good_bad'])\n", (704, 723), True, 'import numpy as np\n'), ((790, 816), 'numpy.array', 'np.array', (["test['good_bad']"], {}), "(test['good_bad'])\n", (798, 816), True, 'import numpy as np\n'), ((1294, 1322), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1320, 1322), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, GradientBoostingClassifier\n'), ((1797, 1832), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_hat_test_proba'], {}), '(y_test, y_hat_test_proba)\n', (1806, 1832), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((1843, 1882), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_hat_test_proba'], {}), '(y_test, y_hat_test_proba)\n', (1856, 1882), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((1888, 1900), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1898, 1900), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1923), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1913, 1923), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1973), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'fpr'], {'linestyle': '"""--"""', 'color': '"""k"""'}), "(fpr, fpr, linestyle='--', color='k')\n", (1936, 1973), True, 'import matplotlib.pyplot as plt\n'), ((1978, 2011), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False positive rate"""'], {}), "('False positive rate')\n", (1988, 2011), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True positive rate"""'], {}), "('True positive rate')\n", (2026, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2108, 2167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../results/PD_GradientBoosting_model_auc.png"""'], {}), "('../results/PD_GradientBoosting_model_auc.png')\n", (2119, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2256, 2266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2264, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2324), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test_predict', 'y_test'], {}), '(y_test_predict, y_test)\n', (2300, 2324), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((4891, 4911), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (4899, 4911), True, 'import matplotlib.pyplot as plt\n'), ((444, 513), 'os.path.join', 'os.path.join', (['basedir', '"""data"""', '"""processed"""', '"""PD_train_continuous.csv"""'], {}), "(basedir, 'data', 'processed', 'PD_train_continuous.csv')\n", (456, 513), False, 'import os\n'), ((547, 615), 'os.path.join', 'os.path.join', (['basedir', '"""data"""', '"""processed"""', '"""PD_test_continuous.csv"""'], {}), "(basedir, 'data', 'processed', 'PD_test_continuous.csv')\n", (559, 615), False, 'import os\n'), ((1525, 1559), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_hat_test'], {}), '(y_test, y_hat_test)\n', (1539, 1559), False, 'from sklearn.metrics import mean_absolute_error, accuracy_score, roc_curve, roc_auc_score\n'), ((2184, 2250), 'os.path.join', 'os.path.join', (['basedir', '"""results"""', '"""roc"""', '"""PD_GradientBoosting.png"""'], {}), "(basedir, 'results', 'roc', 'PD_GradientBoosting.png')\n", (2196, 2250), False, 'import os\n'), ((2870, 2941), 'numpy.std', 'np.std', (['[tree.feature_importances_ for tree in reg.estimators_]'], {'axis': '(0)'}), '([tree.feature_importances_ for tree in reg.estimators_], axis=0)\n', (2876, 2941), True, 'import numpy as np\n'), ((3170, 3182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3180, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3191, 3222), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (3200, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3330), 'matplotlib.pyplot.bar', 'plt.bar', (['xaxis', '(importances[indices] * 100)'], {'color': '"""r"""', 'yerr': '(std[indices] * 100)', 'align': '"""center"""'}), "(xaxis, importances[indices] * 100, color='r', yerr=std[indices] * \n 100, align='center')\n", (3238, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3371), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xaxis', 'names'], {'rotation': '(90)'}), '(xaxis, names, rotation=90)\n', (3344, 3371), True, 'import matplotlib.pyplot as plt\n'), ((3380, 3395), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""%"""'], {}), "('%')\n", (3390, 3395), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3422), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3420, 3422), True, 'import matplotlib.pyplot as plt\n'), ((3986, 4043), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (15% < predict):"""', 'test[idx]'], {}), "(f'Yield (15% < predict):', test[idx])\n", (4004, 4043), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4059, 4119), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > 10.0)', '(y_test_predict < 15.0)'], {}), '(y_test_predict > 10.0, y_test_predict < 15.0)\n', (4073, 4119), True, 'import numpy as np\n'), ((4128, 4191), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (10% < predict < 15%):"""', 'test[idx]'], {}), "(f'Yield (10% < predict < 15%):', test[idx])\n", (4146, 4191), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4207, 4266), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > 5.0)', '(y_test_predict < 10.0)'], {}), '(y_test_predict > 5.0, y_test_predict < 10.0)\n', (4221, 4266), True, 'import numpy as np\n'), ((4275, 4338), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (5% < predict < 10%):"""', 'test[idx]'], {}), "(f'Yield (5% < predict < 10%):', test[idx])\n", (4293, 4338), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4354, 4412), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > 0.0)', '(y_test_predict < 5.0)'], {}), '(y_test_predict > 0.0, y_test_predict < 5.0)\n', (4368, 4412), True, 'import numpy as np\n'), ((4421, 4483), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (0% < predict < 5%):"""', 'test[idx]'], {}), "(f'Yield (0% < predict < 5%):', test[idx])\n", (4439, 4483), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4499, 4559), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > -10.0)', '(y_test_predict < 0.0)'], {}), '(y_test_predict > -10.0, y_test_predict < 0.0)\n', (4513, 4559), True, 'import numpy as np\n'), ((4568, 4630), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (-10% < predict < 0%):"""', 'test[idx]'], {}), "(f'Yield (-10% < predict < 0%):', test[idx])\n", (4586, 4630), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4646, 4708), 'numpy.logical_and', 'np.logical_and', (['(y_test_predict > -20.0)', '(y_test_predict < -10.0)'], {}), '(y_test_predict > -20.0, y_test_predict < -10.0)\n', (4660, 4708), True, 'import numpy as np\n'), ((4717, 4781), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (-20% < predict < -10%):"""', 'test[idx]'], {}), "(f'Yield (-20% < predict < -10%):', test[idx])\n", (4735, 4781), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((4828, 4885), 'src.utils.print_test_results', 'print_test_results', (['f"""Yield (-20% > predict):"""', 'test[idx]'], {}), "(f'Yield (-20% > predict):', test[idx])\n", (4846, 4885), False, 'from src.utils import calc_annual_return_vec, print_test_results\n'), ((1649, 1669), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (1661, 1669), True, 'import pandas as pd\n'), ((1671, 1701), 'pandas.DataFrame', 'pd.DataFrame', (['y_hat_test_proba'], {}), '(y_hat_test_proba)\n', (1683, 1701), True, 'import pandas as pd\n'), ((2968, 2991), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (2978, 2991), True, 'import numpy as np\n'), ((3443, 3531), 'os.path.join', 'os.path.join', (['basedir', '"""results"""', '"""roc"""', '"""PD_GradientBoosting_FeatureImportance.png"""'], {}), "(basedir, 'results', 'roc',\n 'PD_GradientBoosting_FeatureImportance.png')\n", (3455, 3531), False, 'import os\n'), ((2083, 2099), 'numpy.round', 'np.round', (['auc', '(2)'], {}), '(auc, 2)\n', (2091, 2099), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
def soft_update(target: nn.Module, source: nn.Module, tau):
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target: nn.Module, source: nn.Module):
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def compute_target_value(reward, gamma, done, next_q):
q_target = reward + gamma * (1.0 - done) * next_q
return q_target
def to_numpy_or_python_type(tensors):
"""Converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.
For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,
it converts it to a Python type, such as a float or int, by calling
`result.item()`.
Numpy scalars are converted, as Python types are often more convenient to deal
with. This is especially useful for bfloat16 Numpy scalars, which don't
support as many operations as other Numpy values.
Args:
tensors: A structure of tensors.
Returns:
`tensors`, but scalar tensors are converted to Python types and non-scalar
tensors are converted to Numpy arrays.
"""
def _to_single_numpy_or_python_type(t):
if isinstance(t, torch.Tensor):
x = t.detach().cpu().numpy()
return x.item() if np.ndim(x) == 0 else x
return t # Don't turn ragged or sparse tensors to NumPy.
import tensorflow as tf
return tf.nest.map_structure(_to_single_numpy_or_python_type, tensors)
|
[
"tensorflow.nest.map_structure",
"torch.no_grad",
"numpy.ndim"
] |
[((1652, 1715), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['_to_single_numpy_or_python_type', 'tensors'], {}), '(_to_single_numpy_or_python_type, tensors)\n', (1673, 1715), True, 'import tensorflow as tf\n'), ((125, 140), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (138, 140), False, 'import torch\n'), ((378, 393), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (391, 393), False, 'import torch\n'), ((1523, 1533), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (1530, 1533), True, 'import numpy as np\n')]
|
"""Submodule containing frequency-based models."""
from freqtools.freq_data import OscillatorNoise
import numpy as np
import matplotlib.pyplot as plt
class FreqModel:
"""
Base class for frequency based models, i.e. values (y axis) as a function of
frequency (x axis). Its functionality is purposfully kept simple and its main
purpose is to implement basic behaviour.
Parameters
----------
*args :
Placeholder, not used. The respective subclasses have to implement behaviour of
positional
arguments
**kwargs :
All keyworded arguments are added as attribues.
"""
def __init__(self, *args, **kwargs):
del args
for key, value in kwargs.items():
setattr(self, key, value)
def values(self, freqs):
raise NotImplementedError("Subclasses have to implement this method.")
def plot(self, freqs, ax=None, xscale="log", yscale="log", ylabel=""):
"""
Plot the model.
Parameters
----------
ax : Axis (optional)
If axis is provided, they will be used for the plot. if not provided, a new
plot will automatically be created.
xscale : {"log" or "linear"}
Scaling of the x axis.
yscale : {"log" or "linear"}
Scaling for the y axis.
ylabel : str
Label for the y axis.
Returns
-------
fig, ax : Figure, Axis
The Figure and Axis handles of the plot that was used.
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
ax.plot(freqs, self.values(freqs), label=self.label)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_ylabel(ylabel)
ax.set_xlabel("Frequency / Hz")
plt.grid(True, which="both", ls="-")
return fig, ax
class OscillatorNoiseModel(FreqModel):
"""
A base class holding models of spectral densities of oscillator noise, i.e.
frequency or phase noise. Its main purpose is to make it easy to convert between
ASD(f), PSD(f) and L(f) in terms of both frequency and phase noise. The data is
provided in one of these representations and makes all other representations
available.
Parameters
----------
*args :
Placeholder, not used. The respective subclasses have to implement behaviour of
positional arguments
n_sided : 1 (optional)
placeholder, for now only one-sided distributions are supported.
label : str
Optional label used for plotting.
**kwargs :
All keyworded arguments are added as attribues.
Attributes
----------
n_sided
label : str
Optional label used for plotting
representation
unit
ylabel
"""
def __init__(self, n_sided=1, label="", representation=None, **kwargs):
_allowed_representations = [
"asd_freq",
"asd_phase",
"psd_freq",
"psd_phase",
"script_L",
]
super().__init__(
label=label,
n_sided=n_sided,
_allowed_representations=list(_allowed_representations),
representation=representation,
**kwargs
)
self._unit_dict = {
"asd_freq": "Hz/$\\sqrt{\\mathrm{Hz}}$",
"asd_phase": "$\\mathrm{rad}/\\sqrt{\\mathrm{Hz}}$",
"psd_freq": "Hz${}^2$/Hz",
"psd_phase": "rad${}^2$/Hz",
"script_L": "dBc/Hz",
}
self._ylabel_dict = {
"asd_freq": "{}-sided ASD",
"asd_phase": "{}-sided ASD",
"psd_freq": "{}-sided PSD",
"psd_phase": "{}-sided PSD",
"script_L": "L(f)",
}
@property
def ylabel(self):
"""y axis label used for plotting; doesn't contain the unit.""" # noqa: D403
return self._ylabel_dict[self.representation].format(self.n_sided)
@property
def unit(self):
"""String containing the unit of `values`"""
return self._unit_dict[self.representation]
@property
def representation(self):
"""The representation of `values`."""
return self._representation
@representation.setter
def representation(self, representation):
assert (
representation in self._allowed_representations
), "representation must be one of {}".format(self._allowed_representations)
self._representation = representation
@property
def n_sided(self):
"""Currently only one-sided distribtuions are supported."""
return self._n_sided
@n_sided.setter
def n_sided(self, new_n):
# FIXME: support for two-sided distributions.
assert new_n == 1, "Only 1-sided distributions are supported as of yet."
self._n_sided = new_n
def values(self, freqs):
"""
Array containing the values of the spectral density model. Maps to one
representation, depending on `representation` attribute.
"""
method = getattr(self, self.representation)
return method(freqs)
def asd_freq(self, freqs):
"""
Amplitude spectral density of the frequency noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return np.array(freqs) * self.asd_phase(freqs)
def asd_phase(self, freqs):
"""
Amplitude spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return np.sqrt(self.psd_phase(freqs))
def psd_freq(self, freqs):
"""
Power spectral density of the frequency noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
return self.asd_freq(freqs) ** 2
def psd_phase(self, freqs):
"""
Power spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
# psd_phase can either be derived from psd_freq or script_L
try:
# convert to linear scale, factor 1/10 in exponent because dBc are used
psd_phase = 10 ** (self.script_L(freqs) / 10)
if self.n_sided == 1:
# one-sided distributions have a factor 2, see Table A1 in [1]
psd_phase *= 2
except AttributeError:
psd_phase = self.psd_freq(freqs) / np.array(freqs) ** 2
return psd_phase
def script_L(self, freqs):
"""
The phase noise L(f) (pronounced "script ell of f").
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated
Returns
-------
1darray
"""
# see Table A.1 in [1] for the conversion from S_phi(f) and L(f)
L = self.psd_phase(freqs)
if self.n_sided == 1:
L /= 2
L = 10 * np.log10(L) # convert to dBc/Hz
return L
def plot(self, freqs, ax=None, xscale="log", yscale="log", ylabel=""):
"""
Plot the spectral density model.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
ax : matplotlib.axes.Axes (optional)
The axes to plot on. If not given, a new figure is created.
xscale : str {"log", "linear"} (optional)
The scale of the x-axis.
yscale : str {"log", "linear"} (optional)
The scale of the y-axis.
ylabel : str (optional)
The label of the y-axis.
"""
if not ylabel:
# automatically create ylabel
ylabel = self.ylabel + " / " + self.unit
fig, ax = super().plot(
freqs, ax=ax, xscale=xscale, yscale=yscale, ylabel=ylabel
)
if not self.representation == "script_L":
ax.set_yscale("log")
return fig, ax
def to_oscillator_noise(self, freqs):
"""
Convert the noise model to a `OscillatorNoise` object.
Parameters
----------
freqs : 1d-array
The Fourier frequencies in Hz.
Returns
-------
oscillator_noise : OscillatorNoise
The model represented as an `OscillatorNoise` object.
"""
oscillator_noise = OscillatorNoise(
freqs,
self.values(freqs),
representation=self.representation,
n_sided=self.n_sided,
divide_by=1,
)
return oscillator_noise
class PowerLawNoise(OscillatorNoiseModel):
r"""
Power law phase and frequency noise models [1] for common noise types:
.. math:: S_\phi = b_{i} \cdot f^{i}
or
.. math:: S_\phi = d_{i} \cdot f^{i}
Parameters
----------
coeff : float or list of floats
Coefficient b_i (for phase noise) or d_i (for frequency noise), cp. [1]. Has to
b a list if `edge_freqs` is set.
exponent : int or list of ints
The coefficient of the power law noise. The noise type depends on the `base`
for a given exponent, cp. [1]. Has to be a list if `edge_freqs` is set.
edge_freqs : list of floats (optional)
Allows to construct composite models that have different noise types for
different frequency ranges. In this case, `coeff` and `exponent` have to be
lists of length `len(edge_freqs) + 1`. The edge frequencies are the frequencies
where the noise type changes.
Allowed coefficients for phase noise:
- -4 : random walk frequency
- -3 : flicker frequency
- -2 : white frequency
- -1 : flicker phase
- 0 : white phase
Allowed coefficients for frequency noise:
- -2 : random walk frequency
- -1 : flicker frequency
- 0 : white frequency
- 1 : flicker phase
- 2 : white phase
base : {'phase', 'freq'}:
determines whether the exponent and coefficient is given in terms of phase or
frequency.
References
----------
[1] <NAME> - Enrico's Chart of Phase Noise and Two-Sample Variances
(http://rubiola.org/pdf-static/Enrico%27s-chart-EFTS.pdf)
"""
def __init__(
self,
coeff=1,
exponent=0,
base="phase",
representation="psd_phase",
edge_freqs=None,
):
assert base in ["phase", "freq"]
if base == "freq":
# express everything in terms of psd_phase
if type(exponent) == list:
exponent = np.array(exponent)
exponent = exponent - 2
_label_dict = {
-4: "random walk frequency",
-3: "flicker frequency",
-2: "white frequency",
-1: "flicker phase",
0: "white phase",
}
try:
label = _label_dict[exponent] + " noise"
except (KeyError, TypeError):
label = "noise model"
super().__init__(
coeff=coeff, exponent=exponent, label=label, representation=representation
)
if edge_freqs:
self.edge_freqs = list(edge_freqs)
self.edge_freqs.append(np.inf)
def psd_phase(self, freqs):
"""
Power spectral density of the phase noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The power spectral density of the phase noise.
"""
# Implement PSD of phase, all other representations can be calculated by virtue
# of subclassing OscillatorNoiseModel.
# FIXME: Improve the cases
if type(self.coeff) == list:
previous_f_edge = 0
freqs = np.array(freqs)
values = []
for f_edge, coeff, exp in zip(self.edge_freqs, self.coeff, self.exponent):
idx = np.where(np.logical_and(freqs > previous_f_edge, freqs <= f_edge))
new_vals = coeff * freqs[idx] ** exp
values.append(new_vals)
previous_f_edge = f_edge
# flatten the list of lists
values = [item for sublist in values for item in sublist]
if len(values) < len(freqs):
# add the last value
values.append(coeff * freqs[-1] ** exp)
values = np.array(values)
else:
values = self.coeff * freqs**self.exponent
return values
class JohnsonNoise(OscillatorNoiseModel):
"""
Johnson Noise model.
Parameters
----------
signal_power : float
Carrier signal power in dBm / Hz
temperature : float (default 300.)
Temperature in kelvin
Attributes
----------
signal_power : float
temperature : float
References
----------
[1] Wikipedia: Johnson–Nyquist noise
(https://en.wikipedia.org/wiki/Johnson%E2%80%93Nyquist_noise)
"""
def __init__(
self,
signal_power,
temperature=300.0,
label="Johnson Noise",
representation=None,
):
super().__init__(temperature=temperature, label=label, n_sided=1)
self.signal_power = signal_power
def script_L(self, freqs):
"""
Calculate the script_L representation of the Johnson noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the Johnson noise.
"""
# Implement L(f), all other representations can be calculated by virtue of
# subclassing OscillatorNoiseModel.
kb = 1.380649e-23 # Boltzmann constant in J/K
freqs = np.ones(len(freqs))
# 1e-3 because normalized to mW, normalized to signal power, length of freqds
noise = (
10 * np.log10(4 * kb * self.temperature / 1e-3) * freqs - self.signal_power
)
# subtract 3 dB since above quantity is defined as one-sided according to [1]
noise -= 3
return noise
class PhotonShotNoise(OscillatorNoiseModel):
"""
Shot noise of an optical beatnote
Parameters
----------
signal_power : float
Signal power in dBm / Hz
radiant_sensitivity : float (default 0.3)
Radiant sensitivity of the photodiode in A/W. Default taken for Hamamatsu G4176.
optical_power : float (default 1e-3)
optical power in W
resisitivity : float (default 50)
resistivity in Ohm.
"""
def __init__(
self,
signal_power,
optical_power=1e-3,
radiant_sensitivity=0.3,
representation=None,
resistivity=50,
label="Photon shot noise",
):
super().__init__(
radiant_sensitivity=radiant_sensitivity,
resistivity=resistivity,
label=label,
optical_power=optical_power,
n_sided=1,
)
self.signal_power = signal_power
def script_L(self, freqs):
"""
Calculate the script_L representation of the Johnson noise.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the photon shot noise.
"""
e = 1.6e-19 # electron charge in C
freqs = np.ones(len(freqs))
noise = (
10
* np.log10(
2
* e
* self.radiant_sensitivity
* self.optical_power
* self.resistivity
/ 1e-3
)
* freqs
- self.signal_power
)
# FIXME: Assume above expression is a one-sided distribution, but didn't check.
noise -= 3
return noise
class NoiseFloor(OscillatorNoiseModel):
"""
Used for converting a spectrum analyzer measurement to oscilaltor noise model of the
noise floor by dividing the detection noise by the carrier signal ampliude.
Parameters
----------
signal_power : float
Signal power in dBm / Hz
noise_floor : float
measured noise floor in dBm / Hz
divide_by : int (optional)
dividy-by factor if prescaler was used for the measurements
Attributes
----------
signal_power : float
Signal power in dBm / Hz
noise_floor : float
measured noise floor in dBm / Hz
divide_by : int
dividy-by factor if prescaler was used for the measurements
"""
def __init__(
self,
signal_power,
noise_floor,
representation=None,
divide_by=1,
label="Detection noise",
):
super().__init__(label=label, divide_by=divide_by, n_sided=1)
self.signal_power = signal_power
self.noise_floor = noise_floor
def script_L(self, freqs):
"""
Calculate the script_L representation of the noise floor.
Parameters
----------
freqs : list_like
Frequencies where the model is evaluated.
Returns
-------
1darray :
The script_L representation of the noise floor.
"""
freqs = np.ones(len(freqs))
noise = (
freqs * self.noise_floor + 20 * np.log10(self.divide_by) - self.signal_power
)
noise -= 3 # is measured as one-sided distribution
return noise
class BetaLine(OscillatorNoiseModel):
"""
The beta separation line as a function of frequency. It is originally defined for
the single-sided spectral density (in Hz²/Hz).
References
----------
[1] <NAME>., <NAME>., & <NAME>. (2010). Simple approach to the
relation between laser frequency noise and laser line shape.
Applied Optics, 49(25), 4801.
https://doi.org/10.1364/AO.49.004801
"""
def __init__(self, representation="psd_freq", **kwargs):
super().__init__(
representation=representation, label=r"$\beta$ separation line", **kwargs
)
def psd_freq(self, freqs):
"""
The values of the beta separation line in Hz²/Hz as a function of frequency
Parameters
----------
freqs : float or list_like
Frequency in Hz
Returns
-------
1d array :
The values of the beta separation line.
"""
return 8 * np.log(2) * np.array(freqs) / np.pi**2
def intersection(self, density, which="first"):
"""
Returns the freqeuncy where the PSD and the beta separation line intersect.
Parameters
----------
density : OscillatorNoise
A OscillatorNoise object. Correct representation (PSD of frequency) will
automatically be used.
which : {'first', 'last'}
if there are more intersections between beta separation line and PSD, this
argument determines whether the lowest (first, default) or highest (last)
intersection frequency should be returned.
Returns
-------
float :
the frequency where the two lines intersect in Hz
"""
psd_vals = density.psd_freq
beta_vals = self.values(density.freqs)
# indices of the intersections, i.e. where the sign of the difference between
# the PSD and the beta separation line switches.
idx = np.argwhere(np.diff(np.sign(psd_vals - beta_vals))).flatten()
first_or_last = {"first": 0, "last": -1}
if idx.size == 0: # array is empty
return np.inf
return density.freqs[idx][first_or_last[which]]
def linewidth(self, density, f_min=1e3, which="first"):
"""
The FWHM linewidth according to equation (10) in [1].
Parameters
----------
density : OscillatorNoise
A PhaseFreqNoise object. Correct scaling and base (PSD of frequency) will
automatically be used.
f_min : float
minimum values of the frequency that should be considered in Hz. The
default value for f_min (1e-3) corresponds to 1 ms.
which : {'first', 'last'}
if there are more intersections between beta separation line and PSD, this
argument determines whether the lowest (first, default) or highest (last)
intersection frequency should be returned.
"""
f_max = self.intersection(density, which=which)
idx = np.where(np.logical_and(density.freqs <= f_max, density.freqs >= f_min))
freqs = density.freqs[idx]
psd_vals_over_line = density.values[idx]
# equation (10) in [1]
area = np.trapz(psd_vals_over_line, x=freqs)
fwhm = np.sqrt(8 * np.log(2) * area) # equation (9) in [1]
return fwhm
class AtomShotNoise(FreqModel):
"""
Atomic shot noise of an atom interferometer gravimeter.
Parameters
----------
n_atoms : float
Number of atoms.
contrast : float
Peak-to-peak contrast of the fringe.
T : float
Interferometer time in seconds.
keff : float
Effective wavevector of the atom interferometer in 1/m.
"""
def __init__(self, n_atoms, contrast, T, keff, **kwargs):
super().__init__(n_atoms=n_atoms, contrast=contrast, T=-T, keff=keff, **kwargs)
def values(self, freqs):
"""Shot noise limit in m/s²."""
sigma_p = 1 / np.sqrt(self.n_atoms) # atomic shot noise
sigma_g = 2 * sigma_p / (self.contrast * self.keff * self.T**2) # in m/s**2
return sigma_g
|
[
"numpy.trapz",
"numpy.log",
"numpy.logical_and",
"matplotlib.pyplot.subplots",
"numpy.array",
"numpy.sign",
"numpy.log10",
"matplotlib.pyplot.grid",
"numpy.sqrt"
] |
[((1841, 1877), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""', 'ls': '"""-"""'}), "(True, which='both', ls='-')\n", (1849, 1877), True, 'import matplotlib.pyplot as plt\n'), ((21402, 21439), 'numpy.trapz', 'np.trapz', (['psd_vals_over_line'], {'x': 'freqs'}), '(psd_vals_over_line, x=freqs)\n', (21410, 21439), True, 'import numpy as np\n'), ((1585, 1599), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1597, 1599), True, 'import matplotlib.pyplot as plt\n'), ((5482, 5497), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (5490, 5497), True, 'import numpy as np\n'), ((7389, 7400), 'numpy.log10', 'np.log10', (['L'], {}), '(L)\n', (7397, 7400), True, 'import numpy as np\n'), ((12325, 12340), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (12333, 12340), True, 'import numpy as np\n'), ((12942, 12958), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (12950, 12958), True, 'import numpy as np\n'), ((21208, 21270), 'numpy.logical_and', 'np.logical_and', (['(density.freqs <= f_max)', '(density.freqs >= f_min)'], {}), '(density.freqs <= f_max, density.freqs >= f_min)\n', (21222, 21270), True, 'import numpy as np\n'), ((22158, 22179), 'numpy.sqrt', 'np.sqrt', (['self.n_atoms'], {}), '(self.n_atoms)\n', (22165, 22179), True, 'import numpy as np\n'), ((11089, 11107), 'numpy.array', 'np.array', (['exponent'], {}), '(exponent)\n', (11097, 11107), True, 'import numpy as np\n'), ((19128, 19143), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (19136, 19143), True, 'import numpy as np\n'), ((12483, 12539), 'numpy.logical_and', 'np.logical_and', (['(freqs > previous_f_edge)', '(freqs <= f_edge)'], {}), '(freqs > previous_f_edge, freqs <= f_edge)\n', (12497, 12539), True, 'import numpy as np\n'), ((14483, 14526), 'numpy.log10', 'np.log10', (['(4 * kb * self.temperature / 0.001)'], {}), '(4 * kb * self.temperature / 0.001)\n', (14491, 14526), True, 'import numpy as np\n'), ((16105, 16200), 'numpy.log10', 'np.log10', (['(2 * e * self.radiant_sensitivity * self.optical_power * self.resistivity /\n 0.001)'], {}), '(2 * e * self.radiant_sensitivity * self.optical_power * self.\n resistivity / 0.001)\n', (16113, 16200), True, 'import numpy as np\n'), ((17988, 18012), 'numpy.log10', 'np.log10', (['self.divide_by'], {}), '(self.divide_by)\n', (17996, 18012), True, 'import numpy as np\n'), ((19116, 19125), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (19122, 19125), True, 'import numpy as np\n'), ((21467, 21476), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21473, 21476), True, 'import numpy as np\n'), ((6886, 6901), 'numpy.array', 'np.array', (['freqs'], {}), '(freqs)\n', (6894, 6901), True, 'import numpy as np\n'), ((20142, 20171), 'numpy.sign', 'np.sign', (['(psd_vals - beta_vals)'], {}), '(psd_vals - beta_vals)\n', (20149, 20171), True, 'import numpy as np\n')]
|
"""Code for setting up the optimization problem for certification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
from scipy.sparse.linalg import eigs, LinearOperator
import tensorflow as tf
from tensorflow.contrib import autograph
from cleverhans.experimental.certification import utils
# Bound on lowest value of certificate to check for numerical errors
LOWER_CERT_BOUND = -10.0
UPDATE_PARAM_CONSTANT = -0.1
class Optimization(object):
"""Class that sets up and runs the optimization of dual_formulation"""
def __init__(self, dual_formulation_object, sess, optimization_params):
"""Initialize the class variables.
Args:
dual_formulation_object: Instance of DualFormulation that contains the
dual variables and objective
sess: tf session to be used to run
optimization_params: Dictionary with the following
eig_num_iter - Number of iterations to run for computing minimum eigen
value
eig_learning_rate - Learning rate for minimum eigen value iterations
init_smooth - Starting value of the smoothness parameter (typically
around 0.001)
smooth_decay - The factor by which to decay after every outer loop epoch
optimizer - one of gd, adam, momentum or adagrad
eig_type - The method to compute eigenvalues (TF or SCIPY)
"""
self.sess = sess
self.dual_object = dual_formulation_object
self.params = optimization_params
self.penalty_placeholder = tf.placeholder(tf.float32, shape=[])
# The dimensionality of matrix M is the sum of sizes of all layers + 1
# The + 1 comes due to a row and column of M representing the linear terms
self.eig_init_vec_placeholder = tf.placeholder(
tf.float32, shape=[1 + self.dual_object.dual_index[-1], 1])
self.smooth_placeholder = tf.placeholder(tf.float32, shape=[])
self.eig_num_iter_placeholder = tf.placeholder(tf.int32, shape=[])
self.current_eig_val_estimate = None
# Create graph for optimization
self.prepare_for_optimization()
def tf_min_eig_vec(self):
"""Function for min eigen vector using tf's full eigen decomposition."""
# Full eigen decomposition requires the explicit psd matrix M
_, matrix_m = self.dual_object.get_full_psd_matrix()
[eig_vals, eig_vectors] = tf.self_adjoint_eig(matrix_m)
index = tf.argmin(eig_vals)
return tf.reshape(
eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1])
def tf_smooth_eig_vec(self):
"""Function that returns smoothed version of min eigen vector."""
_, matrix_m = self.dual_object.get_full_psd_matrix()
# Easier to think in terms of max so negating the matrix
[eig_vals, eig_vectors] = tf.self_adjoint_eig(-matrix_m)
exp_eig_vals = tf.exp(tf.divide(eig_vals, self.smooth_placeholder))
scaling_factor = tf.reduce_sum(exp_eig_vals)
# Multiplying each eig vector by exponential of corresponding eig value
# Scaling factor normalizes the vector to be unit norm
eig_vec_smooth = tf.divide(
tf.matmul(eig_vectors, tf.diag(tf.sqrt(exp_eig_vals))),
tf.sqrt(scaling_factor))
return tf.reshape(
tf.reduce_sum(eig_vec_smooth, axis=1),
shape=[eig_vec_smooth.shape[0].value, 1])
def get_min_eig_vec_proxy(self, use_tf_eig=False):
"""Computes the min eigen value and corresponding vector of matrix M.
Args:
use_tf_eig: Whether to use tf's default full eigen decomposition
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
"""
if use_tf_eig:
# If smoothness parameter is too small, essentially no smoothing
# Just output the eigen vector corresponding to min
return tf.cond(self.smooth_placeholder < 1E-8,
self.tf_min_eig_vec,
self.tf_smooth_eig_vec)
# Using autograph to automatically handle
# the control flow of minimum_eigen_vector
min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)
def _vector_prod_fn(x):
return self.dual_object.get_psd_product(x)
estimated_eigen_vector = min_eigen_tf(
x=self.eig_init_vec_placeholder,
num_steps=self.eig_num_iter_placeholder,
learning_rate=self.params['eig_learning_rate'],
vector_prod_fn=_vector_prod_fn)
return estimated_eigen_vector
def get_scipy_eig_vec(self):
"""Computes scipy estimate of min eigenvalue for matrix M.
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
"""
if not self.params['has_conv']:
matrix_m = self.sess.run(self.dual_object.matrix_m)
min_eig_vec_val, estimated_eigen_vector = eigs(matrix_m, k=1, which='SR',
tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
else:
dim = self.dual_object.matrix_m_dimension
input_vector = tf.placeholder(tf.float32, shape=(dim, 1))
output_vector = self.dual_object.get_psd_product(input_vector)
def np_vector_prod_fn(np_vector):
np_vector = np.reshape(np_vector, [-1, 1])
output_np_vector = self.sess.run(output_vector, feed_dict={input_vector:np_vector})
return output_np_vector
linear_operator = LinearOperator((dim, dim), matvec=np_vector_prod_fn)
# Performing shift invert scipy operation when eig val estimate is available
min_eig_vec_val, estimated_eigen_vector = eigs(linear_operator,
k=1, which='SR', tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
def prepare_for_optimization(self):
"""Create tensorflow op for running one step of descent."""
if self.params['eig_type'] == 'TF':
self.eig_vec_estimate = self.get_min_eig_vec_proxy()
else:
self.eig_vec_estimate = tf.placeholder(tf.float32, shape=(self.dual_object.matrix_m_dimension, 1))
self.stopped_eig_vec_estimate = tf.stop_gradient(self.eig_vec_estimate)
# Eig value is v^\top M v, where v is eigen vector
self.eig_val_estimate = tf.matmul(
tf.transpose(self.stopped_eig_vec_estimate),
self.dual_object.get_psd_product(self.stopped_eig_vec_estimate))
# Penalizing negative of min eigen value because we want min eig value
# to be positive
self.total_objective = (
self.dual_object.unconstrained_objective
+ 0.5 * tf.square(
tf.maximum(-self.penalty_placeholder * self.eig_val_estimate, 0)))
global_step = tf.Variable(0, trainable=False)
# Set up learning rate as a placeholder
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# Set up the optimizer
if self.params['optimizer'] == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'momentum':
self.optimizer = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate,
momentum=self.params['momentum_parameter'],
use_nesterov=True)
else:
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
# Write out the projection step
self.train_step = self.optimizer.minimize(
self.total_objective, global_step=global_step)
self.sess.run(tf.global_variables_initializer())
# Projecting the dual variables
proj_ops = []
for i in range(self.dual_object.nn_params.num_hidden_layers + 1):
# Lambda_pos is non negative for switch indices,
# Unconstrained for positive indices
# Zero for negative indices
proj_ops.append(self.dual_object.lambda_pos[i].assign(
tf.multiply(self.dual_object.positive_indices[i],
self.dual_object.lambda_pos[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_pos[i]))))
proj_ops.append(self.dual_object.lambda_neg[i].assign(
tf.multiply(self.dual_object.negative_indices[i],
self.dual_object.lambda_neg[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_neg[i]))))
# Lambda_quad is only non zero and positive for switch
proj_ops.append(self.dual_object.lambda_quad[i].assign(
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_quad[i]))))
# Lambda_lu is always non negative
proj_ops.append(self.dual_object.lambda_lu[i].assign(
tf.nn.relu(self.dual_object.lambda_lu[i])))
self.proj_step = tf.group(proj_ops)
# Create folder for saving stats if the folder is not None
if (self.params.get('stats_folder') and
not tf.gfile.IsDirectory(self.params['stats_folder'])):
tf.gfile.MkDir(self.params['stats_folder'])
self.current_scipy_eig_val = None
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val,
penalty_val, learning_rate_val):
"""Run one step of gradient descent for optimization.
Args:
eig_init_vec_val: Start value for eigen value computations
eig_num_iter_val: Number of iterations to run for eigen computations
smooth_val: Value of smoothness parameter
penalty_val: Value of penalty for the current step
learning_rate_val: Value of learning rate
Returns:
found_cert: True is negative certificate is found, False otherwise
"""
# Project onto feasible set of dual variables
if self.current_step != 0 and self.current_step % self.params['projection_steps'] == 0:
current_certificate = self.dual_object.compute_certificate()
tf.logging.info('Inner step: %d, current value of certificate: %f',
self.current_step, current_certificate)
# Sometimes due to either overflow or instability in inverses,
# the returned certificate is large and negative -- keeping a check
if LOWER_CERT_BOUND < current_certificate < 0:
tf.logging.info('Found certificate of robustness!')
return True
# Running step
step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val,
self.eig_num_iter_placeholder: eig_num_iter_val,
self.smooth_placeholder: smooth_val,
self.penalty_placeholder: penalty_val,
self.learning_rate: learning_rate_val}
if self.params['eig_type'] == 'SCIPY':
current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec()
step_feed_dict.update({
self.eig_vec_estimate: current_eig_vector
})
self.sess.run(self.train_step, feed_dict=step_feed_dict)
[
_, self.current_eig_vec_val, self.current_eig_val_estimate
] = self.sess.run([
self.proj_step,
self.eig_vec_estimate,
self.eig_val_estimate
], feed_dict=step_feed_dict)
if self.current_step % self.params['print_stats_steps'] == 0:
[self.current_total_objective, self.current_unconstrained_objective,
self.current_eig_vec_val,
self.current_eig_val_estimate,
self.current_nu] = self.sess.run(
[self.total_objective,
self.dual_object.unconstrained_objective,
self.eig_vec_estimate,
self.eig_val_estimate,
self.dual_object.nu], feed_dict=step_feed_dict)
stats = {
'total_objective':
float(self.current_total_objective),
'unconstrained_objective':
float(self.current_unconstrained_objective),
'min_eig_val_estimate':
float(self.current_eig_val_estimate)
}
tf.logging.debug('Current inner step: %d, optimization stats: %s',
self.current_step, stats)
if self.params['stats_folder'] is not None:
stats = json.dumps(stats)
filename = os.path.join(self.params['stats_folder'],
str(self.current_step) + '.json')
with tf.gfile.Open(filename) as file_f:
file_f.write(stats)
return False
def run_optimization(self):
"""Run the optimization, call run_one_step with suitable placeholders.
Returns:
True if certificate is found
False otherwise
"""
penalty_val = self.params['init_penalty']
# Don't use smoothing initially - very inaccurate for large dimension
self.smooth_on = False
smooth_val = 0
learning_rate_val = self.params['init_learning_rate']
self.current_outer_step = 1
while self.current_outer_step <= self.params['outer_num_steps']:
tf.logging.info('Running outer step %d with penalty %f',
self.current_outer_step, penalty_val)
# Running inner loop of optimization with current_smooth_val,
# current_penalty as smoothness parameters and penalty respectively
self.current_step = 0
# Run first step with random eig initialization and large number of steps
found_cert = self.run_one_step(
np.random.random(size=(1 + self.dual_object.dual_index[-1], 1)),
self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val)
if found_cert:
return True
while self.current_step < self.params['inner_num_steps']:
self.current_step = self.current_step + 1
found_cert = self.run_one_step(self.current_eig_vec_val,
self.params['small_eig_num_steps'],
smooth_val, penalty_val,
learning_rate_val)
if found_cert:
return -1
# Update penalty only if it looks like current objective is optimizes
if self.current_total_objective < UPDATE_PARAM_CONSTANT:
penalty_val = penalty_val * self.params['beta']
learning_rate_val = learning_rate_val*self.params['learning_rate_decay']
else:
# To get more accurate gradient estimate
self.params['small_eig_num_steps'] = (
1.5 * self.params['small_eig_num_steps'])
# If eigen values seem small enough, turn on smoothing
# useful only when performing full eigen decomposition
if np.abs(self.current_eig_val_estimate) < 0.01:
smooth_val = self.params['smoothness_parameter']
self.current_outer_step = self.current_outer_step + 1
return False
|
[
"tensorflow.cond",
"tensorflow.reduce_sum",
"numpy.abs",
"tensorflow.logging.info",
"tensorflow.logging.debug",
"tensorflow.maximum",
"tensorflow.reshape",
"json.dumps",
"tensorflow.multiply",
"tensorflow.argmin",
"tensorflow.Variable",
"tensorflow.divide",
"scipy.sparse.linalg.LinearOperator",
"tensorflow.self_adjoint_eig",
"tensorflow.sqrt",
"tensorflow.contrib.autograph.to_graph",
"tensorflow.nn.relu",
"tensorflow.placeholder",
"numpy.reshape",
"numpy.real",
"tensorflow.gfile.IsDirectory",
"tensorflow.global_variables_initializer",
"tensorflow.stop_gradient",
"tensorflow.train.AdagradOptimizer",
"tensorflow.transpose",
"tensorflow.group",
"tensorflow.train.MomentumOptimizer",
"tensorflow.train.GradientDescentOptimizer",
"scipy.sparse.linalg.eigs",
"tensorflow.gfile.Open",
"numpy.random.random",
"tensorflow.gfile.MkDir",
"tensorflow.train.AdamOptimizer"
] |
[((1580, 1616), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (1594, 1616), True, 'import tensorflow as tf\n'), ((1808, 1882), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1 + self.dual_object.dual_index[-1], 1]'}), '(tf.float32, shape=[1 + self.dual_object.dual_index[-1], 1])\n', (1822, 1882), True, 'import tensorflow as tf\n'), ((1922, 1958), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (1936, 1958), True, 'import tensorflow as tf\n'), ((1995, 2029), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[]'}), '(tf.int32, shape=[])\n', (2009, 2029), True, 'import tensorflow as tf\n'), ((2403, 2432), 'tensorflow.self_adjoint_eig', 'tf.self_adjoint_eig', (['matrix_m'], {}), '(matrix_m)\n', (2422, 2432), True, 'import tensorflow as tf\n'), ((2445, 2464), 'tensorflow.argmin', 'tf.argmin', (['eig_vals'], {}), '(eig_vals)\n', (2454, 2464), True, 'import tensorflow as tf\n'), ((2476, 2548), 'tensorflow.reshape', 'tf.reshape', (['eig_vectors[:, index]'], {'shape': '[eig_vectors.shape[0].value, 1]'}), '(eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1])\n', (2486, 2548), True, 'import tensorflow as tf\n'), ((2808, 2838), 'tensorflow.self_adjoint_eig', 'tf.self_adjoint_eig', (['(-matrix_m)'], {}), '(-matrix_m)\n', (2827, 2838), True, 'import tensorflow as tf\n'), ((2932, 2959), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['exp_eig_vals'], {}), '(exp_eig_vals)\n', (2945, 2959), True, 'import tensorflow as tf\n'), ((4062, 4108), 'tensorflow.contrib.autograph.to_graph', 'autograph.to_graph', (['utils.minimum_eigen_vector'], {}), '(utils.minimum_eigen_vector)\n', (4080, 4108), False, 'from tensorflow.contrib import autograph\n'), ((6247, 6286), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.eig_vec_estimate'], {}), '(self.eig_vec_estimate)\n', (6263, 6286), True, 'import tensorflow as tf\n'), ((6805, 6836), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (6816, 6836), True, 'import tensorflow as tf\n'), ((6906, 6942), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (6920, 6942), True, 'import tensorflow as tf\n'), ((9045, 9063), 'tensorflow.group', 'tf.group', (['proj_ops'], {}), '(proj_ops)\n', (9053, 9063), True, 'import tensorflow as tf\n'), ((2865, 2909), 'tensorflow.divide', 'tf.divide', (['eig_vals', 'self.smooth_placeholder'], {}), '(eig_vals, self.smooth_placeholder)\n', (2874, 2909), True, 'import tensorflow as tf\n'), ((3199, 3222), 'tensorflow.sqrt', 'tf.sqrt', (['scaling_factor'], {}), '(scaling_factor)\n', (3206, 3222), True, 'import tensorflow as tf\n'), ((3255, 3292), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['eig_vec_smooth'], {'axis': '(1)'}), '(eig_vec_smooth, axis=1)\n', (3268, 3292), True, 'import tensorflow as tf\n'), ((3822, 3912), 'tensorflow.cond', 'tf.cond', (['(self.smooth_placeholder < 1e-08)', 'self.tf_min_eig_vec', 'self.tf_smooth_eig_vec'], {}), '(self.smooth_placeholder < 1e-08, self.tf_min_eig_vec, self.\n tf_smooth_eig_vec)\n', (3829, 3912), True, 'import tensorflow as tf\n'), ((4796, 4839), 'scipy.sparse.linalg.eigs', 'eigs', (['matrix_m'], {'k': '(1)', 'which': '"""SR"""', 'tol': '(0.0001)'}), "(matrix_m, k=1, which='SR', tol=0.0001)\n", (4800, 4839), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((5113, 5155), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(dim, 1)'}), '(tf.float32, shape=(dim, 1))\n', (5127, 5155), True, 'import tensorflow as tf\n'), ((5465, 5517), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(dim, dim)'], {'matvec': 'np_vector_prod_fn'}), '((dim, dim), matvec=np_vector_prod_fn)\n', (5479, 5517), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((5649, 5699), 'scipy.sparse.linalg.eigs', 'eigs', (['linear_operator'], {'k': '(1)', 'which': '"""SR"""', 'tol': '(0.0001)'}), "(linear_operator, k=1, which='SR', tol=0.0001)\n", (5653, 5699), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((6136, 6210), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(self.dual_object.matrix_m_dimension, 1)'}), '(tf.float32, shape=(self.dual_object.matrix_m_dimension, 1))\n', (6150, 6210), True, 'import tensorflow as tf\n'), ((6389, 6432), 'tensorflow.transpose', 'tf.transpose', (['self.stopped_eig_vec_estimate'], {}), '(self.stopped_eig_vec_estimate)\n', (6401, 6432), True, 'import tensorflow as tf\n'), ((7037, 7093), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7059, 7093), True, 'import tensorflow as tf\n'), ((7722, 7755), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7753, 7755), True, 'import tensorflow as tf\n'), ((9242, 9285), 'tensorflow.gfile.MkDir', 'tf.gfile.MkDir', (["self.params['stats_folder']"], {}), "(self.params['stats_folder'])\n", (9256, 9285), True, 'import tensorflow as tf\n'), ((10120, 10232), 'tensorflow.logging.info', 'tf.logging.info', (['"""Inner step: %d, current value of certificate: %f"""', 'self.current_step', 'current_certificate'], {}), "('Inner step: %d, current value of certificate: %f', self.\n current_step, current_certificate)\n", (10135, 10232), True, 'import tensorflow as tf\n'), ((12129, 12226), 'tensorflow.logging.debug', 'tf.logging.debug', (['"""Current inner step: %d, optimization stats: %s"""', 'self.current_step', 'stats'], {}), "('Current inner step: %d, optimization stats: %s', self.\n current_step, stats)\n", (12145, 12226), True, 'import tensorflow as tf\n'), ((13069, 13168), 'tensorflow.logging.info', 'tf.logging.info', (['"""Running outer step %d with penalty %f"""', 'self.current_outer_step', 'penalty_val'], {}), "('Running outer step %d with penalty %f', self.\n current_outer_step, penalty_val)\n", (13084, 13168), True, 'import tensorflow as tf\n'), ((4926, 4950), 'numpy.real', 'np.real', (['min_eig_vec_val'], {}), '(min_eig_vec_val)\n', (4933, 4950), True, 'import numpy as np\n'), ((4973, 5016), 'numpy.reshape', 'np.reshape', (['estimated_eigen_vector', '[-1, 1]'], {}), '(estimated_eigen_vector, [-1, 1])\n', (4983, 5016), True, 'import numpy as np\n'), ((5286, 5316), 'numpy.reshape', 'np.reshape', (['np_vector', '[-1, 1]'], {}), '(np_vector, [-1, 1])\n', (5296, 5316), True, 'import numpy as np\n'), ((5786, 5810), 'numpy.real', 'np.real', (['min_eig_vec_val'], {}), '(min_eig_vec_val)\n', (5793, 5810), True, 'import numpy as np\n'), ((5833, 5876), 'numpy.reshape', 'np.reshape', (['estimated_eigen_vector', '[-1, 1]'], {}), '(estimated_eigen_vector, [-1, 1])\n', (5843, 5876), True, 'import numpy as np\n'), ((7165, 7224), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7190, 7224), True, 'import tensorflow as tf\n'), ((9184, 9233), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (["self.params['stats_folder']"], {}), "(self.params['stats_folder'])\n", (9204, 9233), True, 'import tensorflow as tf\n'), ((10455, 10506), 'tensorflow.logging.info', 'tf.logging.info', (['"""Found certificate of robustness!"""'], {}), "('Found certificate of robustness!')\n", (10470, 10506), True, 'import tensorflow as tf\n'), ((12311, 12328), 'json.dumps', 'json.dumps', (['stats'], {}), '(stats)\n', (12321, 12328), False, 'import json\n'), ((13484, 13547), 'numpy.random.random', 'np.random.random', ([], {'size': '(1 + self.dual_object.dual_index[-1], 1)'}), '(size=(1 + self.dual_object.dual_index[-1], 1))\n', (13500, 13547), True, 'import numpy as np\n'), ((14669, 14706), 'numpy.abs', 'np.abs', (['self.current_eig_val_estimate'], {}), '(self.current_eig_val_estimate)\n', (14675, 14706), True, 'import numpy as np\n'), ((3166, 3187), 'tensorflow.sqrt', 'tf.sqrt', (['exp_eig_vals'], {}), '(exp_eig_vals)\n', (3173, 3187), True, 'import tensorflow as tf\n'), ((6720, 6784), 'tensorflow.maximum', 'tf.maximum', (['(-self.penalty_placeholder * self.eig_val_estimate)', '(0)'], {}), '(-self.penalty_placeholder * self.eig_val_estimate, 0)\n', (6730, 6784), True, 'import tensorflow as tf\n'), ((7297, 7425), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'self.learning_rate', 'momentum': "self.params['momentum_parameter']", 'use_nesterov': '(True)'}), "(learning_rate=self.learning_rate, momentum=self.\n params['momentum_parameter'], use_nesterov=True)\n", (7323, 7425), True, 'import tensorflow as tf\n'), ((7485, 7552), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7518, 7552), True, 'import tensorflow as tf\n'), ((8979, 9020), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_lu[i]'], {}), '(self.dual_object.lambda_lu[i])\n', (8989, 9020), True, 'import tensorflow as tf\n'), ((12469, 12492), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['filename'], {}), '(filename)\n', (12482, 12492), True, 'import tensorflow as tf\n'), ((8085, 8171), 'tensorflow.multiply', 'tf.multiply', (['self.dual_object.positive_indices[i]', 'self.dual_object.lambda_pos[i]'], {}), '(self.dual_object.positive_indices[i], self.dual_object.\n lambda_pos[i])\n', (8096, 8171), True, 'import tensorflow as tf\n'), ((8387, 8473), 'tensorflow.multiply', 'tf.multiply', (['self.dual_object.negative_indices[i]', 'self.dual_object.lambda_neg[i]'], {}), '(self.dual_object.negative_indices[i], self.dual_object.\n lambda_neg[i])\n', (8398, 8473), True, 'import tensorflow as tf\n'), ((8821, 8864), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_quad[i]'], {}), '(self.dual_object.lambda_quad[i])\n', (8831, 8864), True, 'import tensorflow as tf\n'), ((8270, 8312), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_pos[i]'], {}), '(self.dual_object.lambda_pos[i])\n', (8280, 8312), True, 'import tensorflow as tf\n'), ((8572, 8614), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.dual_object.lambda_neg[i]'], {}), '(self.dual_object.lambda_neg[i])\n', (8582, 8614), True, 'import tensorflow as tf\n')]
|
"""Test NMS.
Run the examples described in `ONNX docs`_.
.. _ONNX docs: https://github.com/onnx/onnx/blob/main/docs/Operators.md#NonMaxSuppression
"""
# import pytest
import numpy as np
import box_utils._c.box_nms as box_nms
def test_nms_suppress_by_iou():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array(
[[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_IOU_and_scores():
"""Test NMS - suppress by IoU and scores."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array(
[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_single_box():
"""Test NMS - single box."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[0.9]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_identical_boxes():
"""Test NMS - identical boxes."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9
]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_limit_output_size():
"""Test NMS - limit output size."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_batches():
"""Test NMS - two batches."""
# --
boxes = np.array([[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]],
[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],
[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_classes():
"""Test NMS - two classes."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format():
"""Test NMS - center-point box format."""
# --
boxes = np.array([[
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_flipped_coordinates():
"""Test NMS - flipped coordinates."""
# --
boxes = np.array([[
[1.0, 1.0, 0.0, 0.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, 0.9, 1.0, -0.1],
[0.0, 10.0, 1.0, 11.0],
[1.0, 10.1, 0.0, 11.1],
[1.0, 101.0, 0.0, 100.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
# ---------------------------------------------------------
# box_nms can be called in some other way.
# ---------------------------------------------------------
def test_nms_suppress_by_iou_nobatch():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array(
[[0, 3], [0, 0], [0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_iou_noclass():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_iou_notopk():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([-1]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_classes_nobatch():
"""Test NMS - two classes."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 3], [0, 0], [1, 3], [1, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format_nobatch():
"""Test NMS - center-point box format."""
# --
boxes = np.array([
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]).astype(np.float32)
scores = np.array([
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 3], [0, 0], [0, 5]]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format_noclass():
"""Test NMS - center-point box format."""
# --
boxes = np.array([
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]).astype(np.float32)
scores = np.array(
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
|
[
"box_utils._c.box_nms.ltrb_nms",
"numpy.testing.assert_array_equal",
"box_utils._c.box_nms.xywh_nms",
"numpy.array"
] |
[((928, 1032), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (944, 1032), True, 'import box_utils._c.box_nms as box_nms\n'), ((1051, 1106), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (1080, 1106), True, 'import numpy as np\n'), ((1809, 1913), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (1825, 1913), True, 'import box_utils._c.box_nms as box_nms\n'), ((1932, 1987), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (1961, 1987), True, 'import numpy as np\n'), ((2452, 2556), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (2468, 2556), True, 'import box_utils._c.box_nms as box_nms\n'), ((2575, 2630), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (2604, 2630), True, 'import numpy as np\n'), ((3435, 3539), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (3451, 3539), True, 'import box_utils._c.box_nms as box_nms\n'), ((3558, 3613), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (3587, 3613), True, 'import numpy as np\n'), ((4298, 4402), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (4314, 4402), True, 'import box_utils._c.box_nms as box_nms\n'), ((4421, 4476), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (4450, 4476), True, 'import numpy as np\n'), ((5536, 5640), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (5552, 5640), True, 'import box_utils._c.box_nms as box_nms\n'), ((5659, 5714), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (5688, 5714), True, 'import numpy as np\n'), ((6460, 6564), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (6476, 6564), True, 'import box_utils._c.box_nms as box_nms\n'), ((6583, 6638), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (6612, 6638), True, 'import numpy as np\n'), ((7350, 7454), 'box_utils._c.box_nms.xywh_nms', 'box_nms.xywh_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (7366, 7454), True, 'import box_utils._c.box_nms as box_nms\n'), ((7473, 7528), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (7502, 7528), True, 'import numpy as np\n'), ((8219, 8323), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (8235, 8323), True, 'import box_utils._c.box_nms as box_nms\n'), ((8342, 8397), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (8371, 8397), True, 'import numpy as np\n'), ((9257, 9361), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (9273, 9361), True, 'import box_utils._c.box_nms as box_nms\n'), ((9380, 9435), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (9409, 9435), True, 'import numpy as np\n'), ((10105, 10209), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (10121, 10209), True, 'import box_utils._c.box_nms as box_nms\n'), ((10228, 10283), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (10257, 10283), True, 'import numpy as np\n'), ((10953, 11057), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (10969, 11057), True, 'import box_utils._c.box_nms as box_nms\n'), ((11076, 11131), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (11105, 11131), True, 'import numpy as np\n'), ((11869, 11973), 'box_utils._c.box_nms.ltrb_nms', 'box_nms.ltrb_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (11885, 11973), True, 'import box_utils._c.box_nms as box_nms\n'), ((11992, 12047), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (12021, 12047), True, 'import numpy as np\n'), ((12754, 12858), 'box_utils._c.box_nms.xywh_nms', 'box_nms.xywh_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (12770, 12858), True, 'import box_utils._c.box_nms as box_nms\n'), ((12877, 12932), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (12906, 12932), True, 'import numpy as np\n'), ((13613, 13717), 'box_utils._c.box_nms.xywh_nms', 'box_nms.xywh_nms', (['boxes', 'scores', 'score_threshold[0]', 'iou_threshold[0]', 'max_output_boxes_per_class[0]'], {}), '(boxes, scores, score_threshold[0], iou_threshold[0],\n max_output_boxes_per_class[0])\n', (13629, 13717), True, 'import box_utils._c.box_nms as box_nms\n'), ((13736, 13791), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'selected_indices'], {}), '(result, selected_indices)\n', (13765, 13791), True, 'import numpy as np\n'), ((321, 484), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (329, 484), True, 'import numpy as np\n'), ((561, 607), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (569, 607), True, 'import numpy as np\n'), ((669, 682), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (677, 682), True, 'import numpy as np\n'), ((720, 735), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (728, 735), True, 'import numpy as np\n'), ((777, 792), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (785, 792), True, 'import numpy as np\n'), ((835, 878), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 0, 5]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 0, 5]])\n', (843, 878), True, 'import numpy as np\n'), ((1222, 1385), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (1230, 1385), True, 'import numpy as np\n'), ((1462, 1508), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (1470, 1508), True, 'import numpy as np\n'), ((1570, 1583), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (1578, 1583), True, 'import numpy as np\n'), ((1621, 1636), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1629, 1636), True, 'import numpy as np\n'), ((1678, 1693), 'numpy.array', 'np.array', (['[0.4]'], {}), '([0.4])\n', (1686, 1693), True, 'import numpy as np\n'), ((1736, 1768), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0]]'], {}), '([[0, 0, 3], [0, 0, 0]])\n', (1744, 1768), True, 'import numpy as np\n'), ((2071, 2105), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0]]])\n', (2079, 2105), True, 'import numpy as np\n'), ((2152, 2171), 'numpy.array', 'np.array', (['[[[0.9]]]'], {}), '([[[0.9]]])\n', (2160, 2171), True, 'import numpy as np\n'), ((2224, 2237), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2232, 2237), True, 'import numpy as np\n'), ((2275, 2290), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (2283, 2290), True, 'import numpy as np\n'), ((2332, 2347), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (2340, 2347), True, 'import numpy as np\n'), ((2390, 2411), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (2398, 2411), True, 'import numpy as np\n'), ((2724, 2969), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, \n 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, \n 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, \n 1.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0],\n [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0,\n 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, \n 1.0, 1.0]]])\n', (2732, 2969), True, 'import numpy as np\n'), ((3076, 3140), 'numpy.array', 'np.array', (['[[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]]'], {}), '([[[0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]])\n', (3084, 3140), True, 'import numpy as np\n'), ((3207, 3220), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (3215, 3220), True, 'import numpy as np\n'), ((3258, 3273), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (3266, 3273), True, 'import numpy as np\n'), ((3315, 3330), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (3323, 3330), True, 'import numpy as np\n'), ((3373, 3394), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (3381, 3394), True, 'import numpy as np\n'), ((3711, 3874), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (3719, 3874), True, 'import numpy as np\n'), ((3951, 3997), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (3959, 3997), True, 'import numpy as np\n'), ((4059, 4072), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (4067, 4072), True, 'import numpy as np\n'), ((4110, 4125), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (4118, 4125), True, 'import numpy as np\n'), ((4167, 4182), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (4175, 4182), True, 'import numpy as np\n'), ((4225, 4257), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0]]'], {}), '([[0, 0, 3], [0, 0, 0]])\n', (4233, 4257), True, 'import numpy as np\n'), ((4562, 4877), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]], [[\n 0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0,\n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]], [[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, \n 0.9], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,\n 101.0]]])\n', (4570, 4877), True, 'import numpy as np\n'), ((5111, 5198), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]], [[0.9, 0.75, 0.6, 0.95, 0.5, \n 0.3]]])\n', (5119, 5198), True, 'import numpy as np\n'), ((5266, 5279), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (5274, 5279), True, 'import numpy as np\n'), ((5317, 5332), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (5325, 5332), True, 'import numpy as np\n'), ((5374, 5389), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (5382, 5389), True, 'import numpy as np\n'), ((5432, 5486), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]'], {}), '([[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]])\n', (5440, 5486), True, 'import numpy as np\n'), ((5800, 5963), 'numpy.array', 'np.array', (['[[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]]'], {}), '([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9\n ], [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, \n 101.0]]])\n', (5808, 5963), True, 'import numpy as np\n'), ((6040, 6125), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]\n )\n', (6048, 6125), True, 'import numpy as np\n'), ((6190, 6203), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (6198, 6203), True, 'import numpy as np\n'), ((6241, 6256), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (6249, 6256), True, 'import numpy as np\n'), ((6298, 6313), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (6306, 6313), True, 'import numpy as np\n'), ((6356, 6410), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]])\n', (6364, 6410), True, 'import numpy as np\n'), ((6748, 6900), 'numpy.array', 'np.array', (['[[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0], [0.5, \n 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]]'], {}), '([[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]])\n', (6756, 6900), True, 'import numpy as np\n'), ((6983, 7029), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (6991, 7029), True, 'import numpy as np\n'), ((7091, 7104), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (7099, 7104), True, 'import numpy as np\n'), ((7142, 7157), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (7150, 7157), True, 'import numpy as np\n'), ((7199, 7214), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (7207, 7214), True, 'import numpy as np\n'), ((7257, 7300), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 0, 5]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 0, 5]])\n', (7265, 7300), True, 'import numpy as np\n'), ((7630, 7793), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 0.0, 0.0], [0.0, 0.1, 1.0, 1.1], [0.0, 0.9, 1.0, -0.1], [0.0, \n 10.0, 1.0, 11.0], [1.0, 10.1, 0.0, 11.1], [1.0, 101.0, 0.0, 100.0]]]'], {}), '([[[1.0, 1.0, 0.0, 0.0], [0.0, 0.1, 1.0, 1.1], [0.0, 0.9, 1.0, -0.1\n ], [0.0, 10.0, 1.0, 11.0], [1.0, 10.1, 0.0, 11.1], [1.0, 101.0, 0.0, \n 100.0]]])\n', (7638, 7793), True, 'import numpy as np\n'), ((7870, 7916), 'numpy.array', 'np.array', (['[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]'], {}), '([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]])\n', (7878, 7916), True, 'import numpy as np\n'), ((7969, 7982), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (7977, 7982), True, 'import numpy as np\n'), ((8020, 8035), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (8028, 8035), True, 'import numpy as np\n'), ((8077, 8092), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8085, 8092), True, 'import numpy as np\n'), ((8135, 8178), 'numpy.array', 'np.array', (['[[0, 0, 3], [0, 0, 0], [0, 0, 5]]'], {}), '([[0, 0, 3], [0, 0, 0], [0, 0, 5]])\n', (8143, 8178), True, 'import numpy as np\n'), ((8663, 8818), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (8671, 8818), True, 'import numpy as np\n'), ((8901, 8945), 'numpy.array', 'np.array', (['[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]'], {}), '([[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]])\n', (8909, 8945), True, 'import numpy as np\n'), ((9007, 9020), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (9015, 9020), True, 'import numpy as np\n'), ((9058, 9073), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (9066, 9073), True, 'import numpy as np\n'), ((9115, 9130), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (9123, 9130), True, 'import numpy as np\n'), ((9173, 9207), 'numpy.array', 'np.array', (['[[0, 3], [0, 0], [0, 5]]'], {}), '([[0, 3], [0, 0], [0, 5]])\n', (9181, 9207), True, 'import numpy as np\n'), ((9537, 9692), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (9545, 9692), True, 'import numpy as np\n'), ((9775, 9817), 'numpy.array', 'np.array', (['[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]'], {}), '([0.9, 0.75, 0.6, 0.95, 0.5, 0.3])\n', (9783, 9817), True, 'import numpy as np\n'), ((9879, 9892), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (9887, 9892), True, 'import numpy as np\n'), ((9930, 9945), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (9938, 9945), True, 'import numpy as np\n'), ((9987, 10002), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (9995, 10002), True, 'import numpy as np\n'), ((10045, 10064), 'numpy.array', 'np.array', (['[3, 0, 5]'], {}), '([3, 0, 5])\n', (10053, 10064), True, 'import numpy as np\n'), ((10384, 10539), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (10392, 10539), True, 'import numpy as np\n'), ((10622, 10664), 'numpy.array', 'np.array', (['[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]'], {}), '([0.9, 0.75, 0.6, 0.95, 0.5, 0.3])\n', (10630, 10664), True, 'import numpy as np\n'), ((10726, 10740), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (10734, 10740), True, 'import numpy as np\n'), ((10778, 10793), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (10786, 10793), True, 'import numpy as np\n'), ((10835, 10850), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (10843, 10850), True, 'import numpy as np\n'), ((10893, 10912), 'numpy.array', 'np.array', (['[3, 0, 5]'], {}), '([3, 0, 5])\n', (10901, 10912), True, 'import numpy as np\n'), ((11225, 11380), 'numpy.array', 'np.array', (['[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9], [0.0, \n 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]]'], {}), '([[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1], [0.0, -0.1, 1.0, 0.9],\n [0.0, 10.0, 1.0, 11.0], [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]])\n', (11233, 11380), True, 'import numpy as np\n'), ((11463, 11541), 'numpy.array', 'np.array', (['[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]'], {}), '([[0.9, 0.75, 0.6, 0.95, 0.5, 0.3], [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]])\n', (11471, 11541), True, 'import numpy as np\n'), ((11611, 11624), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (11619, 11624), True, 'import numpy as np\n'), ((11662, 11677), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (11670, 11677), True, 'import numpy as np\n'), ((11719, 11734), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (11727, 11734), True, 'import numpy as np\n'), ((11777, 11819), 'numpy.array', 'np.array', (['[[0, 3], [0, 0], [1, 3], [1, 0]]'], {}), '([[0, 3], [0, 0], [1, 3], [1, 0]])\n', (11785, 11819), True, 'import numpy as np\n'), ((12165, 12315), 'numpy.array', 'np.array', (['[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0], [0.5, \n 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]'], {}), '([[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]])\n', (12173, 12315), True, 'import numpy as np\n'), ((12398, 12442), 'numpy.array', 'np.array', (['[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]'], {}), '([[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]])\n', (12406, 12442), True, 'import numpy as np\n'), ((12504, 12517), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (12512, 12517), True, 'import numpy as np\n'), ((12555, 12570), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (12563, 12570), True, 'import numpy as np\n'), ((12612, 12627), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (12620, 12627), True, 'import numpy as np\n'), ((12670, 12704), 'numpy.array', 'np.array', (['[[0, 3], [0, 0], [0, 5]]'], {}), '([[0, 3], [0, 0], [0, 5]])\n', (12678, 12704), True, 'import numpy as np\n'), ((13050, 13200), 'numpy.array', 'np.array', (['[[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0], [0.5, \n 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]]'], {}), '([[0.5, 0.5, 1.0, 1.0], [0.5, 0.6, 1.0, 1.0], [0.5, 0.4, 1.0, 1.0],\n [0.5, 10.5, 1.0, 1.0], [0.5, 10.6, 1.0, 1.0], [0.5, 100.5, 1.0, 1.0]])\n', (13058, 13200), True, 'import numpy as np\n'), ((13283, 13325), 'numpy.array', 'np.array', (['[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]'], {}), '([0.9, 0.75, 0.6, 0.95, 0.5, 0.3])\n', (13291, 13325), True, 'import numpy as np\n'), ((13387, 13400), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (13395, 13400), True, 'import numpy as np\n'), ((13438, 13453), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (13446, 13453), True, 'import numpy as np\n'), ((13495, 13510), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (13503, 13510), True, 'import numpy as np\n'), ((13553, 13572), 'numpy.array', 'np.array', (['[3, 0, 5]'], {}), '([3, 0, 5])\n', (13561, 13572), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------------
# PyTorch implementation of
# "ROAM: Recurrently Optimizing Tracking Model", CVPR, 2020
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------
import config
import numpy as np
import os
import torch
import torchvision.transforms as trans
import json
from torch.utils.data import Dataset
from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader
import cv2
class BaseDataset(Dataset):
def __init__(self, train_dir, val_dir=None, is_train=True):
if is_train:
self._seq_dir = train_dir
with open(os.path.join(train_dir, 'train.json'), 'r') as f:
self._seqs = json.load(f)
else:
np.random.seed(123)
self._seq_dir = val_dir
with open(os.path.join(val_dir, 'val.json'), 'r') as f:
self._seqs = json.load(f)
self.n_seq = len(self._seqs)
print(self.n_seq)
self.is_train = is_train
self.seq_id = -1
self.preprocessor = trans.Compose(
[trans.ToTensor(),
trans.Normalize(mean=config.mean, std=config.std)])
self.pad_value = np.array(config.mean)
base_target_sz = np.array([config.base_target_sz, config.base_target_sz])
self.base_window_sz = get_search_size(base_target_sz, config.search_scale)
window_cell_sz = np.ceil(self.base_window_sz / config.cell_sz)
self.window_cell_sz = window_cell_sz - (window_cell_sz % 2) + 1
def __len__(self):
return len(self._seqs)
def __getitem__(self, seq_id):
# uniformly choosing video frames
seq_name = self._seqs[seq_id]['seq_name']
start_frame = self._seqs[seq_id]['start_frame']
end_frame = self._seqs[seq_id]['end_frame']
bboxes = self._seqs[seq_id]['gt_bboxes']
visible = self._seqs[seq_id]['visible']
visible_frame_idxes = np.arange(0, end_frame - start_frame + 1)[visible]
seq_len = len(visible_frame_idxes)
clip_len = config.look_ahead * config.time_step + 1
assert seq_len >= clip_len
start_idx = np.random.randint(seq_len - clip_len + 1) if self.is_train else 0
selected_idxes = [visible_frame_idxes[idx] for idx in range(start_idx, start_idx + clip_len)]
# build training examples for initial frame
patches, label_maps, gt_bboxes = [], [], []
img_path = self._get_img_path(seq_name, start_frame, selected_idxes[0])
init_image = default_loader(img_path)
init_bbox = np.array(bboxes[selected_idxes[0]])
for ratio in config.aug_init_ratios:
for scale in config.aug_init_scales:
# aspect ratio augmentation
height, width = init_image.shape[0: 2]
sw, sh = int(width * ratio), int(height / ratio)
image_resized = cv2.resize(init_image, (sw, sh))
bbox_reiszed = init_bbox * np.array([ratio, 1 / ratio, ratio, 1 / ratio])
# scale changes augmentation
search_scale = config.search_scale / scale
# generate training examples
patch, label_map, bbox_on_patch = self._generate_training_examples(image_resized, bbox_reiszed, search_scale)
patches.append(patch)
label_maps.append(label_map)
gt_bboxes.append(bbox_on_patch)
# build training examples for subsequent frames.
for i, idx in enumerate(selected_idxes[1:]):
img_path = self._get_img_path(seq_name, start_frame, idx)
image = default_loader(img_path)
bbox = np.array(bboxes[idx])
# aspect ratio augmentation
height, width = image.shape[0: 2]
ratio = np.random.uniform(config.aug_ratios_range[0], config.aug_ratios_range[1])
sw, sh = int(width * ratio), int(height / ratio)
image = cv2.resize(image, (sw, sh))
bbox = bbox * np.array([ratio, 1 / ratio, ratio, 1 / ratio])
# scale changes augmentation
obj_scale = np.random.uniform(config.aug_scales_range[0], config.aug_scales_range[1])
search_scale = config.search_scale/obj_scale
# generate training examples
patch, label_map, bbox_on_patch = self._generate_training_examples(image, bbox, search_scale)
patches.append(patch)
label_maps.append(label_map)
gt_bboxes.append(bbox_on_patch)
patches = torch.stack(patches, 0)
label_maps = torch.stack(label_maps, 0)
gt_bboxes = torch.stack(gt_bboxes, 0)
return patches, label_maps, gt_bboxes
def _get_img_path(self, seq_name, start_frame, index):
raise NotImplementedError
def _generate_training_examples(self, image, bbox, search_scale):
# get roi patches
window_sz = get_search_size(bbox[2:], search_scale)
scale = self.base_window_sz[0] / window_sz[0]
offset = np.random.randint(-config.offset_range, config.offset_range, 2)
center = bbox[:2] + np.floor(bbox[2:] / 2) - offset/scale
patch = get_search_patch(image, center, self.pad_value, window_sz, self.base_window_sz)
patch = self.preprocessor(patch)
# get label maps
scaled_bbox = np.round(bbox * scale)
target_cell_sz = np.ceil(scaled_bbox[2:] / config.cell_sz)
output_sigma = target_cell_sz * config.output_sigma_factor
map_center = np.floor(self.window_cell_sz / 2) + offset //config.cell_sz
label_map = gaussian_shaped_labels(output_sigma, self.window_cell_sz, map_center)
label_map = torch.from_numpy(label_map[None, :]).float()
# get gt bboxes
bbox_center = np.zeros(2, np.float32) + offset
bbox_on_patch = np.concatenate([bbox_center, scaled_bbox[2:]])
bbox_on_patch = torch.from_numpy(bbox_on_patch).float()
return patch, label_map, bbox_on_patch
class VIDataset(BaseDataset):
def __init__(self, root_dir, is_train):
train_dir = os.path.join(root_dir, 'Data/ILSVRC/Data/VID/train')
val_dir = os.path.join(root_dir, 'Data/ILSVRC/Data/VID/val')
super(VIDataset, self).__init__(train_dir, val_dir, is_train)
def _get_img_path(self, seq_name, start_frame, index):
img_path = self._seq_dir + ('/%s/%06d.JPEG' % (seq_name, (start_frame - 1) + index))
return img_path
|
[
"numpy.random.seed",
"numpy.floor",
"utils.get_search_patch",
"numpy.random.randint",
"numpy.arange",
"torchvision.transforms.Normalize",
"os.path.join",
"numpy.round",
"cv2.resize",
"numpy.ceil",
"utils.get_search_size",
"utils.gaussian_shaped_labels",
"utils.default_loader",
"numpy.concatenate",
"torch.from_numpy",
"numpy.random.uniform",
"json.load",
"torch.stack",
"numpy.zeros",
"numpy.array",
"torchvision.transforms.ToTensor"
] |
[((1309, 1330), 'numpy.array', 'np.array', (['config.mean'], {}), '(config.mean)\n', (1317, 1330), True, 'import numpy as np\n'), ((1357, 1413), 'numpy.array', 'np.array', (['[config.base_target_sz, config.base_target_sz]'], {}), '([config.base_target_sz, config.base_target_sz])\n', (1365, 1413), True, 'import numpy as np\n'), ((1444, 1496), 'utils.get_search_size', 'get_search_size', (['base_target_sz', 'config.search_scale'], {}), '(base_target_sz, config.search_scale)\n', (1459, 1496), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((1522, 1567), 'numpy.ceil', 'np.ceil', (['(self.base_window_sz / config.cell_sz)'], {}), '(self.base_window_sz / config.cell_sz)\n', (1529, 1567), True, 'import numpy as np\n'), ((2643, 2667), 'utils.default_loader', 'default_loader', (['img_path'], {}), '(img_path)\n', (2657, 2667), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((2688, 2723), 'numpy.array', 'np.array', (['bboxes[selected_idxes[0]]'], {}), '(bboxes[selected_idxes[0]])\n', (2696, 2723), True, 'import numpy as np\n'), ((4653, 4676), 'torch.stack', 'torch.stack', (['patches', '(0)'], {}), '(patches, 0)\n', (4664, 4676), False, 'import torch\n'), ((4698, 4724), 'torch.stack', 'torch.stack', (['label_maps', '(0)'], {}), '(label_maps, 0)\n', (4709, 4724), False, 'import torch\n'), ((4745, 4770), 'torch.stack', 'torch.stack', (['gt_bboxes', '(0)'], {}), '(gt_bboxes, 0)\n', (4756, 4770), False, 'import torch\n'), ((5029, 5068), 'utils.get_search_size', 'get_search_size', (['bbox[2:]', 'search_scale'], {}), '(bbox[2:], search_scale)\n', (5044, 5068), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((5140, 5203), 'numpy.random.randint', 'np.random.randint', (['(-config.offset_range)', 'config.offset_range', '(2)'], {}), '(-config.offset_range, config.offset_range, 2)\n', (5157, 5203), True, 'import numpy as np\n'), ((5286, 5365), 'utils.get_search_patch', 'get_search_patch', (['image', 'center', 'self.pad_value', 'window_sz', 'self.base_window_sz'], {}), '(image, center, self.pad_value, window_sz, self.base_window_sz)\n', (5302, 5365), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((5455, 5477), 'numpy.round', 'np.round', (['(bbox * scale)'], {}), '(bbox * scale)\n', (5463, 5477), True, 'import numpy as np\n'), ((5503, 5544), 'numpy.ceil', 'np.ceil', (['(scaled_bbox[2:] / config.cell_sz)'], {}), '(scaled_bbox[2:] / config.cell_sz)\n', (5510, 5544), True, 'import numpy as np\n'), ((5713, 5782), 'utils.gaussian_shaped_labels', 'gaussian_shaped_labels', (['output_sigma', 'self.window_cell_sz', 'map_center'], {}), '(output_sigma, self.window_cell_sz, map_center)\n', (5735, 5782), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((5952, 5998), 'numpy.concatenate', 'np.concatenate', (['[bbox_center, scaled_bbox[2:]]'], {}), '([bbox_center, scaled_bbox[2:]])\n', (5966, 5998), True, 'import numpy as np\n'), ((6207, 6259), 'os.path.join', 'os.path.join', (['root_dir', '"""Data/ILSVRC/Data/VID/train"""'], {}), "(root_dir, 'Data/ILSVRC/Data/VID/train')\n", (6219, 6259), False, 'import os\n'), ((6278, 6328), 'os.path.join', 'os.path.join', (['root_dir', '"""Data/ILSVRC/Data/VID/val"""'], {}), "(root_dir, 'Data/ILSVRC/Data/VID/val')\n", (6290, 6328), False, 'import os\n'), ((857, 876), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (871, 876), True, 'import numpy as np\n'), ((2060, 2101), 'numpy.arange', 'np.arange', (['(0)', '(end_frame - start_frame + 1)'], {}), '(0, end_frame - start_frame + 1)\n', (2069, 2101), True, 'import numpy as np\n'), ((2269, 2310), 'numpy.random.randint', 'np.random.randint', (['(seq_len - clip_len + 1)'], {}), '(seq_len - clip_len + 1)\n', (2286, 2310), True, 'import numpy as np\n'), ((3744, 3768), 'utils.default_loader', 'default_loader', (['img_path'], {}), '(img_path)\n', (3758, 3768), False, 'from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader\n'), ((3788, 3809), 'numpy.array', 'np.array', (['bboxes[idx]'], {}), '(bboxes[idx])\n', (3796, 3809), True, 'import numpy as np\n'), ((3916, 3989), 'numpy.random.uniform', 'np.random.uniform', (['config.aug_ratios_range[0]', 'config.aug_ratios_range[1]'], {}), '(config.aug_ratios_range[0], config.aug_ratios_range[1])\n', (3933, 3989), True, 'import numpy as np\n'), ((4071, 4098), 'cv2.resize', 'cv2.resize', (['image', '(sw, sh)'], {}), '(image, (sw, sh))\n', (4081, 4098), False, 'import cv2\n'), ((4237, 4310), 'numpy.random.uniform', 'np.random.uniform', (['config.aug_scales_range[0]', 'config.aug_scales_range[1]'], {}), '(config.aug_scales_range[0], config.aug_scales_range[1])\n', (4254, 4310), True, 'import numpy as np\n'), ((5633, 5666), 'numpy.floor', 'np.floor', (['(self.window_cell_sz / 2)'], {}), '(self.window_cell_sz / 2)\n', (5641, 5666), True, 'import numpy as np\n'), ((5895, 5918), 'numpy.zeros', 'np.zeros', (['(2)', 'np.float32'], {}), '(2, np.float32)\n', (5903, 5918), True, 'import numpy as np\n'), ((818, 830), 'json.load', 'json.load', (['f'], {}), '(f)\n', (827, 830), False, 'import json\n'), ((1010, 1022), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1019, 1022), False, 'import json\n'), ((1201, 1217), 'torchvision.transforms.ToTensor', 'trans.ToTensor', ([], {}), '()\n', (1215, 1217), True, 'import torchvision.transforms as trans\n'), ((1232, 1281), 'torchvision.transforms.Normalize', 'trans.Normalize', ([], {'mean': 'config.mean', 'std': 'config.std'}), '(mean=config.mean, std=config.std)\n', (1247, 1281), True, 'import torchvision.transforms as trans\n'), ((3014, 3046), 'cv2.resize', 'cv2.resize', (['init_image', '(sw, sh)'], {}), '(init_image, (sw, sh))\n', (3024, 3046), False, 'import cv2\n'), ((4125, 4171), 'numpy.array', 'np.array', (['[ratio, 1 / ratio, ratio, 1 / ratio]'], {}), '([ratio, 1 / ratio, ratio, 1 / ratio])\n', (4133, 4171), True, 'import numpy as np\n'), ((5232, 5254), 'numpy.floor', 'np.floor', (['(bbox[2:] / 2)'], {}), '(bbox[2:] / 2)\n', (5240, 5254), True, 'import numpy as np\n'), ((5803, 5839), 'torch.from_numpy', 'torch.from_numpy', (['label_map[None, :]'], {}), '(label_map[None, :])\n', (5819, 5839), False, 'import torch\n'), ((6023, 6054), 'torch.from_numpy', 'torch.from_numpy', (['bbox_on_patch'], {}), '(bbox_on_patch)\n', (6039, 6054), False, 'import torch\n'), ((739, 776), 'os.path.join', 'os.path.join', (['train_dir', '"""train.json"""'], {}), "(train_dir, 'train.json')\n", (751, 776), False, 'import os\n'), ((935, 968), 'os.path.join', 'os.path.join', (['val_dir', '"""val.json"""'], {}), "(val_dir, 'val.json')\n", (947, 968), False, 'import os\n'), ((3090, 3136), 'numpy.array', 'np.array', (['[ratio, 1 / ratio, ratio, 1 / ratio]'], {}), '([ratio, 1 / ratio, ratio, 1 / ratio])\n', (3098, 3136), True, 'import numpy as np\n')]
|
"""Creates a custom kinematics body with two links and one joint
"""
from openravepy import *
from numpy import eye, array, zeros
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
with env:
robot=RaveCreateRobot(env,'')
robot.SetName('camera')
linkinfo=KinBody.LinkInfo()
linkinfo._name='camerabase'
ginfo=KinBody.GeometryInfo()
ginfo._type=GeometryType.Box
ginfo._vGeomData=[0.1,0.1,0.1] # box extents
ginfo._vDiffuseColor=[0,0,1]
ginfo._t = eye(4)
linkinfo._vgeometryinfos = [ginfo]
camera1info=Robot.AttachedSensorInfo()
camera1info._linkname='camerabase'
camera1info._name = 'ensenson10'
camera1info._sensorname = 'base_pinhole_camera'
camera1info._trelative = eye(4)
camera1info._trelative[0:3,3] = [0,0,0.1]
camera1info._sensorgeometry = CameraGeomData()
camera1info._sensorgeometry.width = 640
camera1info._sensorgeometry.height = 480
camera1info._sensorgeometry.intrinsics.K = array([[640.0,0,320],[0,640,240],[0,0,1]])
camera1info._sensorgeometry.intrinsics.distortion_coeffs = zeros(5)
camera1info._sensorgeometry.intrinsics.distortion_model = 'opencv'
camera1info._sensorgeometry.intrinsics.focal_length = 0.05
robot.Init([linkinfo],[],[],[])
env.Add(robot)
robot.AddAttachedSensor(camera1info,True)
|
[
"numpy.zeros",
"numpy.eye",
"numpy.array"
] |
[((533, 539), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (536, 539), False, 'from numpy import eye, array, zeros\n'), ((780, 786), 'numpy.eye', 'eye', (['(4)'], {}), '(4)\n', (783, 786), False, 'from numpy import eye, array, zeros\n'), ((1020, 1070), 'numpy.array', 'array', (['[[640.0, 0, 320], [0, 640, 240], [0, 0, 1]]'], {}), '([[640.0, 0, 320], [0, 640, 240], [0, 0, 1]])\n', (1025, 1070), False, 'from numpy import eye, array, zeros\n'), ((1126, 1134), 'numpy.zeros', 'zeros', (['(5)'], {}), '(5)\n', (1131, 1134), False, 'from numpy import eye, array, zeros\n')]
|
# main imports
import numpy as np
import pandas as pd
import json
import os, sys, argparse, subprocess
# model imports
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from joblib import dump, load
# image processing imports
from PIL import Image
import ipfml.iqa.fr as fr
from ipfml import metrics
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
n_samples_image_name_postfix = "_samples_mean.png"
reference_image_name_postfix = "_1000_samples_mean.png"
def write_result(_scene_name, _data_file, _model_path, _n, _reconstructed_path, _iqa):
# prepare data to get score information
dataset=np.loadtxt(_data_file, delimiter=';')
y = dataset[:,0]
X = dataset[:,1:]
y=np.reshape(y, (-1,1))
scaler = MinMaxScaler()
scaler.fit(X)
scaler.fit(y)
xscale=scaler.transform(X)
yscale=scaler.transform(y)
_, X_test, _, y_test = train_test_split(xscale, yscale)
# prepare image path to compare
n_samples_image_path = os.path.join(cfg.reconstructed_folder, _scene_name + '_' + _n + n_samples_image_name_postfix)
reference_image_path = os.path.join(cfg.reconstructed_folder, _scene_name + reference_image_name_postfix)
if not os.path.exists(n_samples_image_path):
# call sub process to create 'n' samples img
print("Creation of 'n' samples image : ", n_samples_image_path)
subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", _n, "--image_name", n_samples_image_path.split('/')[-1]])
if not os.path.exists(reference_image_path):
# call sub process to create 'reference' img
print("Creation of reference image : ", reference_image_path)
subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", str(1000), "--image_name", reference_image_path.split('/')[-1]])
# load the trained model
with open(_model_path, 'r') as f:
json_model = json.load(f)
model = model_from_json(json_model)
model.load_weights(_model_path.replace('.json', '.h5'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# get coefficient of determination score on test set
y_predicted = model.predict(X_test)
len_shape, _ = y_predicted.shape
y_predicted = y_predicted.reshape(len_shape)
coeff = metrics.coefficient_of_determination(y_test, y_predicted)
# Get data information
reference_image = Image.open(reference_image_path)
reconstructed_image = Image.open(_reconstructed_path)
n_samples_image = Image.open(n_samples_image_path)
# Load expected IQA comparison
try:
fr_iqa = getattr(fr, _iqa)
except AttributeError:
raise NotImplementedError("FR IQA `{}` not implement `{}`".format(fr.__name__, _iqa))
mse_ref_reconstructed_samples = fr_iqa(reference_image, reconstructed_image)
mse_reconstructed_n_samples = fr_iqa(n_samples_image, reconstructed_image)
model_name = _model_path.split('/')[-1].replace('.json', '')
if not os.path.exists(cfg.results_information_folder):
os.makedirs(cfg.results_information_folder)
# save score into models_comparisons_keras.csv file
with open(cfg.global_result_filepath_keras, "a") as f:
f.write(model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';' + str(mse_reconstructed_n_samples) + ';' + str(mse_ref_reconstructed_samples) + '\n')
def main():
parser = argparse.ArgumentParser(description="Train model and saved it")
parser.add_argument('--scene', type=str, help='Scene name to reconstruct', choices=cfg.scenes_list)
parser.add_argument('--data', type=str, help='Filename of dataset')
parser.add_argument('--model_path', type=str, help='Json model file path')
parser.add_argument('--n', type=str, help='Number of pixel values approximated to keep')
parser.add_argument('--image_path', type=str, help="The image reconstructed to compare with")
parser.add_argument('--iqa', type=str, help='Image to compare', choices=['ssim', 'mse', 'rmse', 'mae', 'psnr'])
args = parser.parse_args()
param_scene_name = args.scene
param_data_file = args.data
param_n = args.n
param_model_path = args.model_path
param_image_path = args.image_path
param_iqa = args.iqa
write_result(param_scene_name, param_data_file, param_model_path, param_n, param_image_path, param_iqa)
if __name__== "__main__":
main()
|
[
"ipfml.metrics.coefficient_of_determination",
"json.load",
"argparse.ArgumentParser",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.MinMaxScaler",
"os.path.exists",
"sys.path.insert",
"PIL.Image.open",
"keras.models.model_from_json",
"numpy.loadtxt",
"numpy.reshape",
"os.path.join"
] |
[((423, 445), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (438, 445), False, 'import os, sys, argparse, subprocess\n'), ((779, 816), 'numpy.loadtxt', 'np.loadtxt', (['_data_file'], {'delimiter': '""";"""'}), "(_data_file, delimiter=';')\n", (789, 816), True, 'import numpy as np\n'), ((868, 890), 'numpy.reshape', 'np.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (878, 890), True, 'import numpy as np\n'), ((903, 917), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (915, 917), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1046, 1078), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xscale', 'yscale'], {}), '(xscale, yscale)\n', (1062, 1078), False, 'from sklearn.model_selection import train_test_split\n'), ((1143, 1240), 'os.path.join', 'os.path.join', (['cfg.reconstructed_folder', "(_scene_name + '_' + _n + n_samples_image_name_postfix)"], {}), "(cfg.reconstructed_folder, _scene_name + '_' + _n +\n n_samples_image_name_postfix)\n", (1155, 1240), False, 'import os, sys, argparse, subprocess\n'), ((1264, 1350), 'os.path.join', 'os.path.join', (['cfg.reconstructed_folder', '(_scene_name + reference_image_name_postfix)'], {}), '(cfg.reconstructed_folder, _scene_name +\n reference_image_name_postfix)\n', (1276, 1350), False, 'import os, sys, argparse, subprocess\n'), ((2574, 2631), 'ipfml.metrics.coefficient_of_determination', 'metrics.coefficient_of_determination', (['y_test', 'y_predicted'], {}), '(y_test, y_predicted)\n', (2610, 2631), False, 'from ipfml import metrics\n'), ((2682, 2714), 'PIL.Image.open', 'Image.open', (['reference_image_path'], {}), '(reference_image_path)\n', (2692, 2714), False, 'from PIL import Image\n'), ((2741, 2772), 'PIL.Image.open', 'Image.open', (['_reconstructed_path'], {}), '(_reconstructed_path)\n', (2751, 2772), False, 'from PIL import Image\n'), ((2795, 2827), 'PIL.Image.open', 'Image.open', (['n_samples_image_path'], {}), '(n_samples_image_path)\n', (2805, 2827), False, 'from PIL import Image\n'), ((3675, 3738), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train model and saved it"""'}), "(description='Train model and saved it')\n", (3698, 3738), False, 'import os, sys, argparse, subprocess\n'), ((1359, 1395), 'os.path.exists', 'os.path.exists', (['n_samples_image_path'], {}), '(n_samples_image_path)\n', (1373, 1395), False, 'import os, sys, argparse, subprocess\n'), ((1698, 1734), 'os.path.exists', 'os.path.exists', (['reference_image_path'], {}), '(reference_image_path)\n', (1712, 1734), False, 'import os, sys, argparse, subprocess\n'), ((2120, 2132), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2129, 2132), False, 'import json\n'), ((2149, 2176), 'keras.models.model_from_json', 'model_from_json', (['json_model'], {}), '(json_model)\n', (2164, 2176), False, 'from keras.models import model_from_json\n'), ((3268, 3314), 'os.path.exists', 'os.path.exists', (['cfg.results_information_folder'], {}), '(cfg.results_information_folder)\n', (3282, 3314), False, 'import os, sys, argparse, subprocess\n'), ((3324, 3367), 'os.makedirs', 'os.makedirs', (['cfg.results_information_folder'], {}), '(cfg.results_information_folder)\n', (3335, 3367), False, 'import os, sys, argparse, subprocess\n')]
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from nlp.chatbot.dataset import data_utils
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
import os,sys
import numpy as np
from nlp.chatbot import model as s2s_model
def test_bleu(count, args):
print('准备数据')
bucket_dbs = data_utils.read_bucket_dbs(args.buckets_dir)
buckets = data_utils.buckets
bucket_sizes = []
for i in range(len(buckets)):
bucket_size = bucket_dbs[i].size
bucket_sizes.append(bucket_size)
print('bucket {} 中有数据 {} 条'.format(i, bucket_size))
total_size = sum(bucket_sizes)
print('共有数据 {} 条'.format(total_size))
if count <= 0:
count = total_size
buckets_scale = [
sum(bucket_sizes[:i + 1]) / total_size
for i in range(len(bucket_sizes))
]
with tf.Session() as sess:
model = s2s_model.create_model(sess, True)
model.batch_size = 1
sess.run(tf.initialize_all_variables())
model.saver.restore(sess, os.path.join(args.model_dir, args.model_name))
total_score = 0.0
for i in tqdm(range(count)):
random_number = np.random.random_sample()
bucket_id = min([
i for i in range(len(buckets_scale))
if buckets_scale[i] > random_number
])
data, _ = model.get_batch_data(
bucket_dbs,
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
bucket_dbs,
bucket_id,
data
)
_, _, output_logits = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
True
)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
ask, _ = data[0]
all_answers = bucket_dbs[bucket_id].all_answers(ask)
ret = data_utils.indice_sentence(outputs)
if not ret:
continue
references = [list(x) for x in all_answers]
score = sentence_bleu(
references,
list(ret),
weights=(1.0,)
)
total_score += score
print('BLUE: {:.2f} in {} samples'.format(total_score / count * 10, count))
def test(args):
class TestBucket(object):
def __init__(self, sentence):
self.sentence = sentence
def random(self):
return sentence, ''
buckets = data_utils.buckets
with tf.Session() as sess:
model = s2s_model.create_model(sess, True)
model.batch_size = 1
sess.run(tf.initialize_all_variables())
model.saver.restore(sess, os.path.join(args.model_dir, args.model_name))
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
bucket_id = min([
b for b in range(len(buckets))
if buckets[b][0] > len(sentence)
])
data, _ = model.get_batch_data(
{bucket_id: TestBucket(sentence)},
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
{bucket_id: TestBucket(sentence)},
bucket_id,
data
)
_, _, output_logits = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
True
)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
ret = data_utils.indice_sentence(outputs)
print(ret)
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
|
[
"nlp.chatbot.model.create_model",
"sys.stdout.write",
"numpy.random.random_sample",
"numpy.argmax",
"tensorflow.Session",
"nlp.chatbot.dataset.data_utils.indice_sentence",
"nlp.chatbot.dataset.data_utils.read_bucket_dbs",
"sys.stdout.flush",
"tensorflow.initialize_all_variables",
"os.path.join",
"sys.stdin.readline"
] |
[((307, 351), 'nlp.chatbot.dataset.data_utils.read_bucket_dbs', 'data_utils.read_bucket_dbs', (['args.buckets_dir'], {}), '(args.buckets_dir)\n', (333, 351), False, 'from nlp.chatbot.dataset import data_utils\n'), ((833, 845), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (843, 845), True, 'import tensorflow as tf\n'), ((871, 905), 'nlp.chatbot.model.create_model', 's2s_model.create_model', (['sess', '(True)'], {}), '(sess, True)\n', (893, 905), True, 'from nlp.chatbot import model as s2s_model\n'), ((2650, 2662), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2660, 2662), True, 'import tensorflow as tf\n'), ((2688, 2722), 'nlp.chatbot.model.create_model', 's2s_model.create_model', (['sess', '(True)'], {}), '(sess, True)\n', (2710, 2722), True, 'from nlp.chatbot import model as s2s_model\n'), ((2889, 2911), 'sys.stdout.write', 'sys.stdout.write', (['"""> """'], {}), "('> ')\n", (2905, 2911), False, 'import os, sys\n'), ((2920, 2938), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2936, 2938), False, 'import os, sys\n'), ((2958, 2978), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (2976, 2978), False, 'import os, sys\n'), ((952, 981), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (979, 981), True, 'import tensorflow as tf\n'), ((1017, 1062), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.model_name'], {}), '(args.model_dir, args.model_name)\n', (1029, 1062), False, 'import os, sys\n'), ((1156, 1181), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (1179, 1181), True, 'import numpy as np\n'), ((2033, 2068), 'nlp.chatbot.dataset.data_utils.indice_sentence', 'data_utils.indice_sentence', (['outputs'], {}), '(outputs)\n', (2059, 2068), False, 'from nlp.chatbot.dataset import data_utils\n'), ((2769, 2798), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2796, 2798), True, 'import tensorflow as tf\n'), ((2834, 2879), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.model_name'], {}), '(args.model_dir, args.model_name)\n', (2846, 2879), False, 'import os, sys\n'), ((3797, 3832), 'nlp.chatbot.dataset.data_utils.indice_sentence', 'data_utils.indice_sentence', (['outputs'], {}), '(outputs)\n', (3823, 3832), False, 'from nlp.chatbot.dataset import data_utils\n'), ((3900, 3918), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3916, 3918), False, 'import os, sys\n'), ((3942, 3962), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (3960, 3962), False, 'import os, sys\n'), ((1867, 1891), 'numpy.argmax', 'np.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (1876, 1891), True, 'import numpy as np\n'), ((3725, 3749), 'numpy.argmax', 'np.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (3734, 3749), True, 'import numpy as np\n')]
|
import numpy as np
import pyaudio
from pyaudio import PyAudio
from queue import Queue
import struct
from time import sleep
def get_steinberg_device_idx(pa: PyAudio) -> int:
"""
looks up the steinberg device index
"""
for i in range(pa.get_device_count()):
name = pa.get_device_info_by_index(i)['name']
if 'steinberg' in name.lower():
return i
raise Exception("Couldn't find steinberg audio device")
class Recorder:
def __init__(self, chunk_size=512, channels=1):
# for some reason, when chunk size is 1024 we observe some
# non-random discontonuities in the signal every 1024*3 samples,
# which leads to very noticeable transients in the spectrogram
self.format = pyaudio.paFloat32
self.chunk_size = chunk_size
self.channels = channels
self.pa = PyAudio()
self.frame_queue = Queue()
self.device_idx = get_steinberg_device_idx(self.pa)
self.sr = int(self.pa.get_device_info_by_index(self.device_idx)['defaultSampleRate'])
def _get_callback(self):
def cb(input_data, frame_cnt, time_info, status_flags):
self.frame_queue.put(input_data)
return (None, pyaudio.paContinue)
return cb
def start_record(self):
self.stream = self.pa.open(
input_device_index=self.device_idx,
rate=self.sr,
format=self.format,
channels=self.channels,
input=True,
stream_callback=self._get_callback(),
frames_per_buffer=self.chunk_size)
def stop_record(self):
self.stream.stop_stream()
# unpacker = struct.Struct('f' * self.chunk_size)
# input_data = None # TODO
# output = []
# output += unpacker.unpack(input_data)
def read_queue(self):
s = struct.Struct('f'*self.chunk_size)
y = []
while not self.frame_queue.empty():
y += s.unpack(self.frame_queue.get())
return np.array(y)
if __name__ == '__main__':
r = Recorder()
r.start_record()
sleep(2)
r.stop_record()
print(r.read_queue())
|
[
"struct.Struct",
"time.sleep",
"numpy.array",
"pyaudio.PyAudio",
"queue.Queue"
] |
[((2126, 2134), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2131, 2134), False, 'from time import sleep\n'), ((859, 868), 'pyaudio.PyAudio', 'PyAudio', ([], {}), '()\n', (866, 868), False, 'from pyaudio import PyAudio\n'), ((896, 903), 'queue.Queue', 'Queue', ([], {}), '()\n', (901, 903), False, 'from queue import Queue\n'), ((1882, 1918), 'struct.Struct', 'struct.Struct', (["('f' * self.chunk_size)"], {}), "('f' * self.chunk_size)\n", (1895, 1918), False, 'import struct\n'), ((2041, 2052), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2049, 2052), True, 'import numpy as np\n')]
|
import tensorflow as tf
from nets.network import Network
import numpy as np
# !! The default data format used here is NHWC !!
# TODO: scope
def conv_bn(X, inChannel, outChannel, kernel, istrain, stride=1, name=None):
out = tf.layers.conv2d(X, outChannel, kernel, stride, 'same', use_bias=False, name=name)
out = tf.layers.batch_normalization(out, training=istrain,
name=name.replace('res', 'bn').replace('conv1', 'bn_conv1'))
return out
def conv_bn_relu(X, inChannel, outChannel, kernel, istrain, stride=1, use_bias=False, name=None):
out = conv_bn(X, inChannel, outChannel, kernel, istrain, stride=stride, name=name)
out = tf.nn.relu(out)
return out
def residual_conv(X, inChannel, interChannel, outputChannel, transition, istrain=False, name=None):
if transition:
init_stride = 2
else:
init_stride = 1
if inChannel == outputChannel:
skip_out = X
else:
skip_out = conv_bn(X, inChannel, outputChannel, 1, istrain,
stride=init_stride, name=name+'_branch1')
conv_out = conv_bn_relu(X, inChannel, interChannel, 1, istrain, name=name+'_branch2a')
conv_out = conv_bn_relu(conv_out, interChannel, interChannel, 3, istrain,
stride=init_stride, name=name+'_branch2b')
conv_out = conv_bn(conv_out, interChannel, outputChannel, 1, istrain, name=name+'_branch2c')
out = tf.nn.relu(skip_out + conv_out)
return out
def residual_block(X, inChannel, interChannel, outputChannel, numLayers,
transition=True, istrain=False, number_name=True, name=None):
if number_name and numLayers > 3:
names = [name+'a'] + [name+'b'+str(i+1) for i in range(numLayers-1)]
else:
names = [name+chr(ord('a')+i) for i in range(numLayers)]
out = residual_conv(X, inChannel, interChannel, outputChannel,
transition=transition, istrain=istrain, name=names[0])
for i in range(numLayers - 1):
out = residual_conv(out, outputChannel, interChannel, outputChannel,
transition=False, istrain=istrain, name=names[i+1])
return out
class ResNet(Network):
def __init__(self, scope=None, istrain=False, reuse=False):
super(ResNet, self).__init__(scope)
self.reuse = reuse
self.istrain = istrain
def _build_resnet(self, numBlock1, numBlock2, numBlock3, numBlock4):
number_name = (self._scope != 'resnet50')
self.conv1 = conv_bn_relu(self.input, 3, 64, 7, istrain=self.istrain, stride=2, name='conv1')
self.pool1 = tf.layers.max_pooling2d(self.conv1, 3, 2, padding='same')
self.conv2 = residual_block(self.pool1, 64, 64, 256, numBlock1, transition=False,
istrain=self.istrain, number_name=number_name, name='res2')
self.conv3 = residual_block(self.conv2, 256, 128, 512, numBlock2,
istrain=self.istrain, number_name=number_name, name='res3')
self.conv4 = residual_block(self.conv3, 512, 256, 1024, numBlock3,
istrain=self.istrain, number_name=number_name, name='res4')
self.conv5 = residual_block(self.conv4, 1024, 512, 2048, numBlock4,
istrain=self.istrain, number_name=number_name, name='res5')
self.pool5 = tf.layers.average_pooling2d(self.conv5, 7, 1)
self.pool5_flat = tf.layers.flatten(self.pool5)
self.scores = tf.layers.dense(self.pool5_flat, 1000, name='fc1000')
return self.scores
def find_key_name(self, var):
key, name = var.name.split('/')[-2:]
if 'kernel' in name or 'weight' in name:
name = 'weights'
elif 'bias' in name:
name = 'biases'
elif 'mean' in name:
name = 'mean'
elif 'variance' in name:
name = 'variance'
elif 'gamma' in name:
name = 'scale'
elif 'beta' in name:
name = 'offset'
else:
raise Exception('%s is not defined in official resnet deploy.txt'%name)
return key, name
def load(self, sess, pretrained_file):
data = np.load(pretrained_file).item()
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope)
for var in variables:
key, name = self.find_key_name(var)
sess.run(var.assign(data[key][name]))
class ResNet50(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet50, self).__init__('resnet50', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 4, 6, 3)
class ResNet101(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet101, self).__init__('resnet101', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 4, 23, 3)
class ResNet152(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet152, self).__init__('resnet152', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 8, 36, 3)
|
[
"numpy.load",
"tensorflow.nn.relu",
"tensorflow.get_collection",
"tensorflow.layers.dense",
"tensorflow.layers.flatten",
"tensorflow.layers.average_pooling2d",
"tensorflow.layers.conv2d",
"tensorflow.layers.max_pooling2d"
] |
[((228, 314), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['X', 'outChannel', 'kernel', 'stride', '"""same"""'], {'use_bias': '(False)', 'name': 'name'}), "(X, outChannel, kernel, stride, 'same', use_bias=False,\n name=name)\n", (244, 314), True, 'import tensorflow as tf\n'), ((661, 676), 'tensorflow.nn.relu', 'tf.nn.relu', (['out'], {}), '(out)\n', (671, 676), True, 'import tensorflow as tf\n'), ((1424, 1455), 'tensorflow.nn.relu', 'tf.nn.relu', (['(skip_out + conv_out)'], {}), '(skip_out + conv_out)\n', (1434, 1455), True, 'import tensorflow as tf\n'), ((2607, 2664), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', (['self.conv1', '(3)', '(2)'], {'padding': '"""same"""'}), "(self.conv1, 3, 2, padding='same')\n", (2630, 2664), True, 'import tensorflow as tf\n'), ((3386, 3431), 'tensorflow.layers.average_pooling2d', 'tf.layers.average_pooling2d', (['self.conv5', '(7)', '(1)'], {}), '(self.conv5, 7, 1)\n', (3413, 3431), True, 'import tensorflow as tf\n'), ((3458, 3487), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['self.pool5'], {}), '(self.pool5)\n', (3475, 3487), True, 'import tensorflow as tf\n'), ((3510, 3563), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.pool5_flat', '(1000)'], {'name': '"""fc1000"""'}), "(self.pool5_flat, 1000, name='fc1000')\n", (3525, 3563), True, 'import tensorflow as tf\n'), ((4272, 4339), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'self._scope'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope)\n', (4289, 4339), True, 'import tensorflow as tf\n'), ((4220, 4244), 'numpy.load', 'np.load', (['pretrained_file'], {}), '(pretrained_file)\n', (4227, 4244), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# This is the part of the codes used for the article entitled "A Deep Learning
# Approach for Assessment of Regional Wall Motion Abnormality from
# Echocardiographic Images" for JACC CV imaging.
#
# Before using this code, please prepare image data at "./data_folder" dir.
#
# ./data_folder/train/Norm
# ./data_folder/train/LAD
# ./data_folder/train/LCXD
# ./data_folder/train/RCA
#
# ./data_folder/test/Norm
# ./data_folder/test/LAD
# ./data_folder/test/LCX
# ./data_folder/test/RCA
#
# Each dir should have echocardiographic images (.png is recommended and .jpg
# acceptable) that contains endo-diastolic, mid-systolic, and endo-systolic
# phases. We put endo-diastolic for red color image channel, mid-systolic for
# Green and endo-systolic for Blue image channle with Python3.5 programming
# language with PIL and numpy libraries.
#
# This code was used with
# OS: Ubuntu 14.04LTS
# Programming language: Python 3.5 Anaconda
# Deep Learning library: tensorflow-gpu 1.4.1, Keras 2.1.5
# CUDA toolkit 8.0, CuDNN v5.1
# Python libraries: numpy 1.14.2, Pillow 5.0.0
#
#
# If NeuralNet == "Xception":
# this code takes about 4 min for training (100 epoches, 320 train/valid)
# with core i7 6850K, RAM 256GB, NVMe SSD w 3.5" HDD, 1080ti.
import os, keras
import numpy as np
from datetime import datetime
from PIL import Image
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam
# to select which neuralnetwork to use
#NeuralNet = 'VGG16' # ILSVRC image classification top-1 accuracy of 0.715
#NeuralNet = 'VGG19' # ILSVRC image classification top-1 accuracy of 0.727
NeuralNet = 'ResNet50' # ILSVRC image classification top-1 accuracy of 0.759
#NeuralNet = 'DenseNet201' # ILSVRC image classification top-1 accuracy of 0.770
#NeuralNet = 'InceptionV3' # ILSVRC image classification top-1 accuracy of 0.788
#NeuralNet = 'Xception' # ILSVRC image classification top-1 accuracy of 0.790
#NeuralNet = 'IncResV2' # ILSVRC image classification top-1 accuracy of 0.804
# making training data
image_list = []
label_list = []
for dir_name in os.listdir("data_folder/train"):
dir_train = "data_folder/train/" + dir_name
label = 0
if dir_name == "LAD":
label = 0
elif dir_name == "LCX":
label = 1
elif dir_name == "RCA":
label = 2
elif dir_name == "Norm":
label = 3
for file_name in os.listdir(dir_train):
label_list.append(label)
filepath = dir_train + "/" + file_name
if NeuralNet == 'Xception':
image = np.array(Image.open(filepath).resize((128, 128)))
else:
image = np.array(Image.open(filepath).resize((224, 224)))
image_list.append(image / 255)
image_list = np.array(image_list)
label_list = to_categorical(label_list)
#making neural network
if NeuralNet == 'VGG16':
print('NeuralNetwork: VGG16.\nILSVRC top-1 accuracy of 0.715')
DCNN = keras.applications.vgg16.VGG16(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'VGG19':
print('NeuralNetwork: VGG16.\nILSVRC top-1 accuracy of 0.727')
DCNN = keras.applications.vgg19.VGG19(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'ResNet50':
print('NeuralNetwork: ResNet50.\nILSVRC top-1 accuracy of 0.759')
DCNN = keras.applications.resnet50.ResNet50(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'DenseNet201':
print('NeuralNetwork: DenseNet201.\nILSVRC top-1 accuracy of 0.770')
DCNN = keras.applications.densenet.DenseNet201(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'InceptionV3':
print('NeuralNetwork: InceptionV3.\nILSVRC top-1 accuracy of 0.788')
DCNN = keras.applications.inception_v3.InceptionV3(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'Xception':
print('NeuralNetwork: Xception.\nILSVRC top-1 accuracy of 0.790')
DCNN = keras.applications.xception.Xception(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'IncResV2':
print('NeuralNetwork: Inception-ResNet-V2.\nILSVRC top-1 accuracy of 0.804')
DCNN = keras.applications.inception_resnet_v2.InceptionResNetV2(
include_top=True, input_tensor=None, pooling=None, classes=1000)
else:
print('error, no neural network.')
opt = Adam(lr = 0.0001)
model = Sequential()
model.add((DCNN))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy",
optimizer=opt, metrics=["accuracy"])
#training
print('training')
model.fit(image_list, label_list,
epochs=100, batch_size=16, validation_split=0.2)
#saving post-trained model
prefix = datetime.now().strftime("%Y"+"_"+"%m%d"+"_"+"%H%M")
save_name = NeuralNet + '_' + prefix + '.h5'
model.save_weights(save_name)
print('saving post-trained model:', save_name)
print('finished training.')
print('finished: train_DCNN.py')
|
[
"keras.applications.xception.Xception",
"keras.applications.inception_resnet_v2.InceptionResNetV2",
"keras.layers.Activation",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"PIL.Image.open",
"keras.applications.vgg19.VGG19",
"keras.applications.resnet50.ResNet50",
"keras.utils.np_utils.to_categorical",
"keras.layers.Dense",
"numpy.array",
"keras.applications.densenet.DenseNet201",
"keras.applications.inception_v3.InceptionV3",
"keras.applications.vgg16.VGG16",
"keras.models.Sequential",
"datetime.datetime.now",
"os.listdir"
] |
[((2248, 2279), 'os.listdir', 'os.listdir', (['"""data_folder/train"""'], {}), "('data_folder/train')\n", (2258, 2279), False, 'import os, keras\n'), ((2895, 2915), 'numpy.array', 'np.array', (['image_list'], {}), '(image_list)\n', (2903, 2915), True, 'import numpy as np\n'), ((2929, 2955), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['label_list'], {}), '(label_list)\n', (2943, 2955), False, 'from keras.utils.np_utils import to_categorical\n'), ((4609, 4624), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (4613, 4624), False, 'from keras.optimizers import Adam\n'), ((4636, 4648), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4646, 4648), False, 'from keras.models import Sequential\n'), ((2549, 2570), 'os.listdir', 'os.listdir', (['dir_train'], {}), '(dir_train)\n', (2559, 2570), False, 'import os, keras\n'), ((3083, 3183), 'keras.applications.vgg16.VGG16', 'keras.applications.vgg16.VGG16', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None, pooling\n =None, classes=1000)\n', (3113, 3183), False, 'import os, keras\n'), ((4677, 4689), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4684, 4689), False, 'from keras.layers import Activation, Dense, Dropout\n'), ((4701, 4709), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (4706, 4709), False, 'from keras.layers import Activation, Dense, Dropout\n'), ((4721, 4742), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (4731, 4742), False, 'from keras.layers import Activation, Dense, Dropout\n'), ((3293, 3393), 'keras.applications.vgg19.VGG19', 'keras.applications.vgg19.VGG19', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None, pooling\n =None, classes=1000)\n', (3323, 3393), False, 'import os, keras\n'), ((5001, 5015), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5013, 5015), False, 'from datetime import datetime\n'), ((3509, 3614), 'keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None,\n pooling=None, classes=1000)\n', (3545, 3614), False, 'import os, keras\n'), ((3737, 3845), 'keras.applications.densenet.DenseNet201', 'keras.applications.densenet.DenseNet201', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None,\n pooling=None, classes=1000)\n', (3776, 3845), False, 'import os, keras\n'), ((3968, 4081), 'keras.applications.inception_v3.InceptionV3', 'keras.applications.inception_v3.InceptionV3', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=\n None, pooling=None, classes=1000)\n', (4011, 4081), False, 'import os, keras\n'), ((2717, 2737), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (2727, 2737), False, 'from PIL import Image\n'), ((2801, 2821), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (2811, 2821), False, 'from PIL import Image\n'), ((4197, 4302), 'keras.applications.xception.Xception', 'keras.applications.xception.Xception', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True, input_tensor=None,\n pooling=None, classes=1000)\n', (4233, 4302), False, 'import os, keras\n'), ((4430, 4555), 'keras.applications.inception_resnet_v2.InceptionResNetV2', 'keras.applications.inception_resnet_v2.InceptionResNetV2', ([], {'include_top': '(True)', 'input_tensor': 'None', 'pooling': 'None', 'classes': '(1000)'}), '(include_top=True,\n input_tensor=None, pooling=None, classes=1000)\n', (4486, 4555), False, 'import os, keras\n')]
|
import os
import numpy as np
import torch
import torch.nn as nn
from .pu_net import PUNet
class SORDefense(nn.Module):
"""Statistical outlier removal as defense.
"""
def __init__(self, k=2, alpha=1.1):
"""SOR defense.
Args:
k (int, optional): kNN. Defaults to 2.
alpha (float, optional): \miu + \alpha * std. Defaults to 1.1.
"""
super(SORDefense, self).__init__()
self.k = k
self.alpha = alpha
def outlier_removal(self, x):
"""Removes large kNN distance points.
Args:
x (torch.FloatTensor): batch input pc, [B, K, 3]
Returns:
torch.FloatTensor: pc after outlier removal, [B, N, 3]
"""
pc = x.clone().detach().double()
B, K = pc.shape[:2]
pc = pc.transpose(2, 1) # [B, 3, K]
inner = -2. * torch.matmul(pc.transpose(2, 1), pc) # [B, K, K]
xx = torch.sum(pc ** 2, dim=1, keepdim=True) # [B, 1, K]
dist = xx + inner + xx.transpose(2, 1) # [B, K, K]
assert dist.min().item() >= -1e-6
# the min is self so we take top (k + 1)
neg_value, _ = (-dist).topk(k=self.k + 1, dim=-1) # [B, K, k + 1]
value = -(neg_value[..., 1:]) # [B, K, k]
value = torch.mean(value, dim=-1) # [B, K]
mean = torch.mean(value, dim=-1) # [B]
std = torch.std(value, dim=-1) # [B]
threshold = mean + self.alpha * std # [B]
bool_mask = (value <= threshold[:, None]) # [B, K]
sel_pc = [x[i][bool_mask[i]] for i in range(B)]
return sel_pc
def forward(self, x):
with torch.no_grad():
x = self.outlier_removal(x)
return x
class DUPNet(nn.Module):
def __init__(self, sor_k=2, sor_alpha=1.1,
npoint=1024, up_ratio=4):
super(DUPNet, self).__init__()
self.npoint = npoint
self.sor = SORDefense(k=sor_k, alpha=sor_alpha)
self.pu_net = PUNet(npoint=self.npoint, up_ratio=up_ratio,
use_normal=False, use_bn=False, use_res=False)
def process_data(self, pc, npoint=None):
"""Process point cloud data to be suitable for
PU-Net input.
We do two things:
sample npoint or duplicate to npoint.
Args:
pc (torch.FloatTensor): list input, [(N_i, 3)] from SOR.
Need to pad or trim to [B, self.npoint, 3].
"""
if npoint is None:
npoint = self.npoint
B = len(pc)
proc_pc = torch.zeros((B, npoint, 3)).float().cuda()
for pc_idx in range(B):
one_pc = pc[pc_idx]
# [N_i, 3]
N = len(one_pc)
if N > npoint:
# random sample some of them
idx = np.random.choice(N, npoint, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = one_pc[idx]
elif N < npoint:
# just duplicate to the number
duplicated_pc = one_pc
num = npoint // N - 1
for i in range(num):
duplicated_pc = torch.cat([
duplicated_pc, one_pc
], dim=0)
num = npoint - len(duplicated_pc)
# random sample the remaining
idx = np.random.choice(N, num, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = torch.cat([
duplicated_pc, one_pc[idx]
], dim=0)
proc_pc[pc_idx] = one_pc
return proc_pc
def forward(self, x):
# import pdb; pdb.set_trace()
with torch.no_grad():
x = self.sor(x) # a list of pc
x = self.process_data(x) # to batch input
x = self.pu_net(x) # [B, N * r, 3]
return x
|
[
"torch.mean",
"torch.cat",
"torch.std",
"numpy.random.choice",
"torch.zeros",
"torch.no_grad",
"torch.sum",
"torch.from_numpy"
] |
[((938, 977), 'torch.sum', 'torch.sum', (['(pc ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(pc ** 2, dim=1, keepdim=True)\n', (947, 977), False, 'import torch\n'), ((1284, 1309), 'torch.mean', 'torch.mean', (['value'], {'dim': '(-1)'}), '(value, dim=-1)\n', (1294, 1309), False, 'import torch\n'), ((1335, 1360), 'torch.mean', 'torch.mean', (['value'], {'dim': '(-1)'}), '(value, dim=-1)\n', (1345, 1360), False, 'import torch\n'), ((1382, 1406), 'torch.std', 'torch.std', (['value'], {'dim': '(-1)'}), '(value, dim=-1)\n', (1391, 1406), False, 'import torch\n'), ((1643, 1658), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1656, 1658), False, 'import torch\n'), ((3726, 3741), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3739, 3741), False, 'import torch\n'), ((2810, 2852), 'numpy.random.choice', 'np.random.choice', (['N', 'npoint'], {'replace': '(False)'}), '(N, npoint, replace=False)\n', (2826, 2852), True, 'import numpy as np\n'), ((3380, 3419), 'numpy.random.choice', 'np.random.choice', (['N', 'num'], {'replace': '(False)'}), '(N, num, replace=False)\n', (3396, 3419), True, 'import numpy as np\n'), ((3503, 3549), 'torch.cat', 'torch.cat', (['[duplicated_pc, one_pc[idx]]'], {'dim': '(0)'}), '([duplicated_pc, one_pc[idx]], dim=0)\n', (3512, 3549), False, 'import torch\n'), ((2558, 2585), 'torch.zeros', 'torch.zeros', (['(B, npoint, 3)'], {}), '((B, npoint, 3))\n', (2569, 2585), False, 'import torch\n'), ((3174, 3215), 'torch.cat', 'torch.cat', (['[duplicated_pc, one_pc]'], {'dim': '(0)'}), '([duplicated_pc, one_pc], dim=0)\n', (3183, 3215), False, 'import torch\n'), ((2875, 2896), 'torch.from_numpy', 'torch.from_numpy', (['idx'], {}), '(idx)\n', (2891, 2896), False, 'import torch\n'), ((3442, 3463), 'torch.from_numpy', 'torch.from_numpy', (['idx'], {}), '(idx)\n', (3458, 3463), False, 'import torch\n')]
|
## @ingroup Methods-Weights-Correlations-FLOPS
# operating_items.py
#
# Created: May 2020, <NAME>
# Modified:
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units, Data
import numpy as np
## @ingroup Methods-Weights-Correlations-FLOPS
def operating_items_FLOPS(vehicle):
""" Calculate the weight of operating items, including:
- crew
- baggage
- unusable fuel
- engine oil
- passenger service
- ammunition and non-fixed weapons
- cargo containers
Assumptions:
If no tanks are specified, 5 fuel tanks are assumed (includes main and auxiliary tanks)
If the number of coach seats is not defined, then it assumed that 5% of
of the seats are first class and an additional 10 % are business class.
If the number of coach seats is defined, then the additional seats are 1/4 first class
and 3/4 business class
Source:
The Flight Optimization System Weight Estimation Method
Inputs:
vehicle - data dictionary with vehicle properties [dimensionless]
-.networks: data dictionary containing all propulsion properties
-.number_of_engines: number of engines
-.sealevel_static_thrust: thrust at sea level [N]
-.reference_area: wing surface area [m^2]
-.mass_properties.max_takeoff: MTOW [kilograms]
-.passengers: number of passengers in aircraft
-.design_mach_number: design mach number for cruise flight
-.design_range: design range of aircraft [nmi]
-.mass_properties.cargo: weight of cargo carried [kilograms]
Outputs:
output - data dictionary with weights [kilograms]
- output.oper_items: unusable fuel, engine oil, passenger service weight and cargo containers
- output.flight_crew: flight crew weight
- output.flight_attendants: flight attendants weight
- output.total: total operating items weight
Properties Used:
N/A
"""
network_name = list(vehicle.networks.keys())[0]
networks = vehicle.networks[network_name]
NENG = networks.number_of_engines
THRUST = networks.sealevel_static_thrust * 1 / Units.lbf
SW = vehicle.reference_area / Units.ft ** 2
FMXTOT = vehicle.mass_properties.max_zero_fuel / Units.lbs
DESRNG = vehicle.design_range / Units.nmi
VMAX = vehicle.design_mach_number
NTANK = 0 # number of fuel tanks
for wing in vehicle.wings:
NTANK += len(wing.Fuel_Tanks)
for fuselage in vehicle.fuselages:
NTANK += len(fuselage.Fuel_Tanks)
if NTANK == 0:
NTANK = 5
WUF = 11.5 * NENG * THRUST ** 0.2 + 0.07 * SW + 1.6 * NTANK * FMXTOT ** 0.28 # unusable fuel weight
WOIL = 0.082 * NENG * THRUST ** 0.65 # engine oil weight
if hasattr(vehicle.fuselages['fuselage'], 'number_coach_seats'):
NPT = vehicle.fuselages['fuselage'].number_coach_seats # number of economy passengers
NPF = (vehicle.passengers - NPT) / 4. # number of first clss passengers
NPB = vehicle.passengers - NPF - NPT # number of bussines passengers
else:
NPF = vehicle.passengers / 20.
NPB = vehicle.passengers / 10.
NPT = vehicle.passengers - NPF - NPB
vehicle.NPF = NPF
vehicle.NPB = NPB
vehicle.NPT = NPT
WSRV = (5.164 * NPF + 3.846 * NPB + 2.529 * NPT) * (DESRNG / VMAX) ** 0.255 # passenger service weight
WCON = 175 * np.ceil(vehicle.mass_properties.cargo / Units.lbs * 1. / 950) # cargo container weight
if vehicle.passengers >= 150:
NFLCR = 3 # number of flight crew
NGALC = 1 + np.floor(vehicle.passengers / 250.) # number of galley crew
else:
NFLCR = 2
NGALC = 0
if vehicle.passengers < 51:
NFLA = 1 # number of flight attendants, NSTU in FLOPS
else:
NFLA = 1 + np.floor(vehicle.passengers / 40.)
WFLAAB = NFLA * 155 + NGALC * 200 # flight attendant weight, WSTUAB in FLOPS
WFLCRB = NFLCR * 225 # flight crew and baggage weight
output = Data()
output.operating_items_less_crew = WUF * Units.lbs + WOIL * Units.lbs + WSRV * Units.lbs + WCON * Units.lbs
output.flight_crew = WFLCRB * Units.lbs
output.flight_attendants = WFLAAB * Units.lbs
output.total = output.operating_items_less_crew + output.flight_crew + \
output.flight_attendants
return output
|
[
"numpy.floor",
"numpy.ceil",
"SUAVE.Core.Data"
] |
[((4650, 4656), 'SUAVE.Core.Data', 'Data', ([], {}), '()\n', (4654, 4656), False, 'from SUAVE.Core import Units, Data\n'), ((4016, 4078), 'numpy.ceil', 'np.ceil', (['(vehicle.mass_properties.cargo / Units.lbs * 1.0 / 950)'], {}), '(vehicle.mass_properties.cargo / Units.lbs * 1.0 / 950)\n', (4023, 4078), True, 'import numpy as np\n'), ((4202, 4238), 'numpy.floor', 'np.floor', (['(vehicle.passengers / 250.0)'], {}), '(vehicle.passengers / 250.0)\n', (4210, 4238), True, 'import numpy as np\n'), ((4433, 4468), 'numpy.floor', 'np.floor', (['(vehicle.passengers / 40.0)'], {}), '(vehicle.passengers / 40.0)\n', (4441, 4468), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
def save_weights_resnet152_10channel():
# Initialize configuration
required_input_shape = (7, 7, 10, 64)
output_file_prefix = "resnet152_10channel"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
new_weights[:, :, 3:6, :] = input_layer_weights
new_weights[:, :, 6:9, :] = input_layer_weights
weights[0] = new_weights
# Save the new weights
np.save(os.path.join(os.getcwd(), 'data', output_file_prefix + "_mask_weights.npy"), weights)
def save_weights_resnet152_6channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 6, 64)
output_file_prefix = "resnet152_6channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_opticalflow_weights.npy"), weights)
def save_weights_resnet152_15channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 15, 64)
output_file_prefix = "resnet152_15channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights # OpticalFlow-1_2.
new_weights[:, :, 6:9, :] = input_layer_weights # Second image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-2_3.
new_weights[:, :, 12:15, :] = input_layer_weights # Third image.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_opticalflow_weights.npy"), weights)
def save_weights_resnet152_13channel(allpretrained=False):
"""
image_1 (3) + image_2 (3) + image_3 (3) + opticalflow_average (3) + MOG2_mask (1)
"""
# Initialize configuration
required_input_shape = (7, 7, 13, 64)
output_file_prefix = "resnet152_13channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
new_weights[:, :, 3:6, :] = input_layer_weights # Second image.
new_weights[:, :, 6:9, :] = input_layer_weights # Third image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-average.
# Mask always uses newly initialized weights.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_hybrid_weights.npy"), weights)
def save_weights_resnet152_16channel(allpretrained=False):
"""
image_1 (3) + opticalflow_1 (3) + image_2 (3) + opticalflow_2 (3) + image_3 (3) + MOG2_mask (1)
"""
# Initialize configuration
required_input_shape = (7, 7, 16, 64)
output_file_prefix = "resnet152_16channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights # OpticalFlow-1_2.
new_weights[:, :, 6:9, :] = input_layer_weights # Second image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-2_3.
new_weights[:, :, 12:15, :] = input_layer_weights # Third image.
# Mask always uses newly initialized weights.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_hybrid_weights.npy"), weights)
def save_weights_resnet152_7channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 7, 64)
output_file_prefix = "resnet152_7channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights
# 7th Channel for Mask uses the randomly initialized weights. Therefore, leave it as it is.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_maskopticalflow_weights.npy"), weights)
|
[
"tensorflow.keras.applications.ResNet152",
"os.getcwd",
"os.path.join",
"numpy.random.normal"
] |
[((280, 350), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (311, 350), True, 'import tensorflow as tf\n'), ((645, 693), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (661, 693), True, 'import numpy as np\n'), ((1335, 1405), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (1366, 1405), True, 'import tensorflow as tf\n'), ((1700, 1748), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (1716, 1748), True, 'import numpy as np\n'), ((2368, 2438), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2399, 2438), True, 'import tensorflow as tf\n'), ((2733, 2781), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (2749, 2781), True, 'import numpy as np\n'), ((3836, 3906), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (3867, 3906), True, 'import tensorflow as tf\n'), ((4201, 4249), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (4217, 4249), True, 'import numpy as np\n'), ((5319, 5389), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (5350, 5389), True, 'import tensorflow as tf\n'), ((5684, 5732), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (5700, 5732), True, 'import numpy as np\n'), ((6782, 6852), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (6813, 6852), True, 'import tensorflow as tf\n'), ((7147, 7195), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.001)', 'required_input_shape'], {}), '(0, 0.001, required_input_shape)\n', (7163, 7195), True, 'import numpy as np\n'), ((1948, 2023), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_opticalflow_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_opticalflow_weights.npy')\n", (1960, 2023), False, 'import os\n'), ((3313, 3388), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_opticalflow_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_opticalflow_weights.npy')\n", (3325, 3388), False, 'import os\n'), ((4787, 4857), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_hybrid_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_hybrid_weights.npy')\n", (4799, 4857), False, 'import os\n'), ((6370, 6440), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_hybrid_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_hybrid_weights.npy')\n", (6382, 6440), False, 'import os\n'), ((7492, 7571), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', "(output_file_prefix + '_maskopticalflow_weights.npy')"], {}), "('..', 'data', output_file_prefix + '_maskopticalflow_weights.npy')\n", (7504, 7571), False, 'import os\n'), ((931, 942), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (940, 942), False, 'import os\n')]
|
'''
=========================================
Inference for Non-Linear Gaussian Systems
=========================================
This module contains the Unscented Kalman Filter (Wan, <NAME> 2000)
for state estimation in systems with non-Gaussian noise and non-linear dynamics
'''
from collections import namedtuple
import numpy as np
from numpy import ma
from scipy import linalg
from .utils import array1d, array2d, check_random_state, get_params, preprocess_arguments, check_random_state
from .standard import _last_dims, _determine_dimensionality, _arg_or_default
# represents a collection of sigma points and their associated weights. one
# point per row
SigmaPoints = namedtuple(
'SigmaPoints',
['points', 'weights_mean', 'weights_covariance']
)
# represents mean and covariance of a multivariate normal distribution
Moments = namedtuple('Moments', ['mean', 'covariance'])
def points2moments(points, sigma_noise=None):
'''Calculate estimated mean and covariance of sigma points
Parameters
----------
points : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
SigmaPoints object containing points and weights
sigma_noise : [n_dim_state, n_dim_state] array
additive noise covariance matrix, if any
Returns
-------
moments : Moments object of size [n_dim_state]
Mean and covariance estimated using points
'''
(points, weights_mu, weights_sigma) = points
mu = points.T.dot(weights_mu)
points_diff = points.T - mu[:, np.newaxis]
sigma = points_diff.dot(np.diag(weights_sigma)).dot(points_diff.T)
if sigma_noise is not None:
sigma = sigma + sigma_noise
return Moments(mu.ravel(), sigma)
def moments2points(moments, alpha=None, beta=None, kappa=None):
'''Calculate "sigma points" used in Unscented Kalman Filter
Parameters
----------
moments : [n_dim] Moments object
mean and covariance of a multivariate normal
alpha : float
Spread of the sigma points. Typically 1e-3.
beta : float
Used to "incorporate prior knowledge of the distribution of the state".
2 is optimal is the state is normally distributed.
kappa : float
a parameter which means ????
Returns
-------
points : [2*n_dim+1, n_dim] SigmaPoints
sigma points and associated weights
'''
(mu, sigma) = moments
n_dim = len(mu)
mu = array2d(mu, dtype=float)
if alpha is None:
alpha = 1.0
if beta is None:
beta = 0.0
if kappa is None:
kappa = 3.0 - n_dim
# compute sqrt(sigma)
sigma2 = linalg.cholesky(sigma).T
# Calculate scaling factor for all off-center points
lamda = (alpha * alpha) * (n_dim + kappa) - n_dim
c = n_dim + lamda
# calculate the sigma points; that is,
# mu
# mu + each column of sigma2 * sqrt(c)
# mu - each column of sigma2 * sqrt(c)
# Each column of points is one of these.
points = np.tile(mu.T, (1, 2 * n_dim + 1))
points[:, 1:(n_dim + 1)] += sigma2 * np.sqrt(c)
points[:, (n_dim + 1):] -= sigma2 * np.sqrt(c)
# Calculate weights
weights_mean = np.ones(2 * n_dim + 1)
weights_mean[0] = lamda / c
weights_mean[1:] = 0.5 / c
weights_cov = np.copy(weights_mean)
weights_cov[0] = lamda / c + (1 - alpha * alpha + beta)
return SigmaPoints(points.T, weights_mean, weights_cov)
def unscented_transform(points, f=None, points_noise=None, sigma_noise=None):
'''Apply the Unscented Transform to a set of points
Apply f to points (with secondary argument points_noise, if available),
then approximate the resulting mean and covariance. If sigma_noise is
available, treat it as additional variance due to additive noise.
Parameters
----------
points : [n_points, n_dim_state] SigmaPoints
points to pass into f's first argument and associated weights if f is
defined. If f is unavailable, then f is assumed to be the identity
function.
f : [n_dim_state, n_dim_state_noise] -> [n_dim_state] function
transition function from time t to time t+1, if available.
points_noise : [n_points, n_dim_state_noise] array
points to pass into f's second argument, if any
sigma_noise : [n_dim_state, n_dim_state] array
covariance matrix for additive noise, if any
Returns
-------
points_pred : [n_points, n_dim_state] SigmaPoints
points transformed by f with same weights
moments_pred : [n_dim_state] Moments
moments associated with points_pred
'''
n_points, n_dim_state = points.points.shape
(points, weights_mean, weights_covariance) = points
# propagate points through f
if f is not None:
if points_noise is None:
points_pred = [f(points[i]) for i in range(n_points)]
else:
points_noise = points_noise.points
points_pred = [f(points[i], points_noise[i]) for i in range(n_points)]
else:
points_pred = points
# make each row a predicted point
points_pred = np.vstack(points_pred)
points_pred = SigmaPoints(points_pred, weights_mean, weights_covariance)
# calculate approximate mean, covariance
moments_pred = points2moments(points_pred, sigma_noise)
return (points_pred, moments_pred)
def unscented_correct(cross_sigma, moments_pred, obs_moments_pred, z):
'''Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t given observations from
timesteps [0, t-1]
obs_moments_pred : [n_dim_obs] Moments
mean and covariance of observation at time t given observations from
times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t given observations from time
steps [0, t]
'''
mu_pred, sigma_pred = moments_pred
obs_mu_pred, obs_sigma_pred = obs_moments_pred
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
# calculate Kalman gain
K = cross_sigma.dot(linalg.pinv(obs_sigma_pred))
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
sigma_filt = sigma_pred - K.dot(cross_sigma.T)
else:
# no corrections to be made
mu_filt = mu_pred
sigma_filt = sigma_pred
return Moments(mu_filt, sigma_filt)
def augmented_points(momentses):
'''Calculate sigma points for augmented UKF
Parameters
----------
momentses : list of Moments
means and covariances for multiple multivariate normals
Returns
-------
pointses : list of Points
sigma points for each element of momentses
'''
# stack everything together
means, covariances = zip(*momentses)
mu_aug = np.concatenate(means)
sigma_aug = linalg.block_diag(*covariances)
moments_aug = Moments(mu_aug, sigma_aug)
# turn augmented representation into sigma points
points_aug = moments2points(moments_aug)
# unstack everything
dims = [len(m) for m in means]
result = []
start = 0
for i in range(len(dims)):
end = start + dims[i]
part = SigmaPoints(
points_aug.points[:, start:end],
points_aug.weights_mean,
points_aug.weights_covariance
)
result.append(part)
start = end
# return
return result
def augmented_unscented_filter_points(mean_state, covariance_state,
covariance_transition,
covariance_observation):
"""Extract sigma points using augmented state representation
Primarily used as a pre-processing step before predicting and updating in
the Augmented UKF.
Parameters
----------
mean_state : [n_dim_state] array
mean of state at time t given observations from time steps 0...t
covariance_state : [n_dim_state, n_dim_state] array
covariance of state at time t given observations from time steps 0...t
covariance_transition : [n_dim_state, n_dim_state] array
covariance of zero-mean noise resulting from transitioning from time
step t to t+1
covariance_observation : [n_dim_obs, n_dim_obs] array
covariance of zero-mean noise resulting from observation state at time
t+1
Returns
-------
points_state : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for state at time t
points_transition : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
sigma points for transition noise between time t and t+1
points_observation : [2 * n_dim_state + 1, n_dim_obs] SigmaPoints
sigma points for observation noise at time step t+1
"""
# get sizes of dimensions
n_dim_state = covariance_state.shape[0]
n_dim_obs = covariance_observation.shape[0]
# extract sigma points using augmented representation
state_moments = Moments(mean_state, covariance_state)
transition_noise_moments = (
Moments(np.zeros(n_dim_state), covariance_transition)
)
observation_noise_moments = (
Moments(np.zeros(n_dim_obs), covariance_observation)
)
(points_state, points_transition, points_observation) = (
augmented_points([
state_moments,
transition_noise_moments,
observation_noise_moments
])
)
return (points_state, points_transition, points_observation)
def unscented_filter_predict(transition_function, points_state,
points_transition=None,
sigma_transition=None):
"""Predict next state distribution
Using the sigma points representing the state at time t given observations
from time steps 0...t, calculate the predicted mean, covariance, and sigma
points for the state at time t+1.
Parameters
----------
transition_function : function
function describing how the state changes between times t and t+1
points_state : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the state at time step t given
observations from time steps 0...t
points_transition : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the noise in transitioning from time step
t to t+1, if available. If not, assumes that noise is additive
sigma_transition : [n_dim_state, n_dim_state] array
covariance corresponding to additive noise in transitioning from time
step t to t+1, if available. If not, assumes noise is not additive.
Returns
-------
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to state at time step t+1 given observations
from time steps 0...t. These points have not been "standardized" by the
unscented transform yet.
moments_pred : [n_dim_state] Moments
mean and covariance corresponding to time step t+1 given observations
from time steps 0...t
"""
assert points_transition is not None or sigma_transition is not None, \
"Your system is noiseless? really?"
(points_pred, moments_pred) = (
unscented_transform(
points_state, transition_function,
points_noise=points_transition, sigma_noise=sigma_transition
)
)
return (points_pred, moments_pred)
def unscented_filter_correct(observation_function, moments_pred,
points_pred, observation,
points_observation=None,
sigma_observation=None):
"""Integrate new observation to correct state estimates
Parameters
----------
observation_function : function
function characterizing how the observation at time t+1 is generated
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to moments_pred
observation : [n_dim_state] array
observation at time t+1. If masked, treated as missing.
points_observation : [2*n_dim_state, n_dim_obs] SigmaPoints
sigma points corresponding to predicted observation at time t+1 given
observations from times 0...t, if available. If not, noise is assumed
to be additive.
sigma_observation : [n_dim_obs, n_dim_obs] array
covariance matrix corresponding to additive noise in observation at
time t+1, if available. If missing, noise is assumed to be non-linear.
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t+1
"""
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
(obs_points_pred, obs_moments_pred) = (
unscented_transform(
points_pred, observation_function,
points_noise=points_observation, sigma_noise=sigma_observation
)
)
# Calculate Cov(x_t, z_t | z_{0:t-1})
sigma_pair = (
((points_pred.points - moments_pred.mean).T)
.dot(np.diag(points_pred.weights_mean))
.dot(obs_points_pred.points - obs_moments_pred.mean)
)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
moments_filt = unscented_correct(sigma_pair, moments_pred, obs_moments_pred, observation)
return moments_filt
def augmented_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with arbitrary noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
# TODO use _augumented_unscented_filter_update here
for t in range(T):
# Calculate sigma points for augmented state:
# [actual state, transition noise, observation noise]
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
# extract sigma points using augmented representation
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(mu, sigma, Q, R)
)
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1}) and sigma points
# for P(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
points_transition=points_transition
)
)
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], points_observation=points_observation
)
)
return (mu_filt, sigma_filt)
def augmented_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Smoother with arbitrary noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and the
process noise and outputs the next state.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for [state, transition noise]
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
moments_transition_noise = Moments(np.zeros(n_dim_state), Q)
(points_state, points_transition) = (
augmented_points([moments_state, moments_transition_noise])
)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = unscented_transform(
points_state, f_t, points_noise=points_transition
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
def additive_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with additive noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma_filt = np.zeros((T, n_dim_state, n_dim_state))
for t in range(T):
# Calculate sigma points for P(x_{t-1} | z_{0:t-1})
if t == 0:
mu, sigma = mu_0, sigma_0
else:
mu, sigma = mu_filt[t - 1], sigma_filt[t - 1]
points_state = moments2points(Moments(mu, sigma))
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, sigma_transition=Q
)
)
points_pred = moments2points(moments_pred)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma_filt[t] = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], sigma_observation=R
)
)
return (mu_filt, sigma_filt)
def additive_unscented_smoother(mu_filt, sigma_filt, f, Q):
'''Apply the Unscented Kalman Filter assuming additiven noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_filt : [T, n_dim_state, n_dim_state] array
sigma_filt[t] = covariance of state at time t given observations from
times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma_smooth : [T, n_dim_state, n_dim_state] array
sigma_smooth[t] = covariance of state at time t given observations from
times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma_smooth = np.zeros(sigma_filt.shape)
mu_smooth[-1], sigma_smooth[-1] = mu_filt[-1], sigma_filt[-1]
for t in reversed(range(T - 1)):
# get sigma points for state
mu = mu_filt[t]
sigma = sigma_filt[t]
moments_state = Moments(mu, sigma)
points_state = moments2points(moments_state)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
f_t = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = (
unscented_transform(points_state, f_t, sigma_noise=Q)
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
smoother_gain = sigma_pair.dot(linalg.pinv(moments_pred.covariance))
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
sigma_smooth[t] = (
sigma_filt[t]
+ smoother_gain
.dot(sigma_smooth[t + 1] - moments_pred.covariance)
.dot(smoother_gain.T)
)
return (mu_smooth, sigma_smooth)
class UnscentedMixin(object):
"""Methods shared by all Unscented Kalman Filter implementations."""
def __init__(self, transition_functions=None, observation_functions=None,
transition_covariance=None, observation_covariance=None,
initial_state_mean=None, initial_state_covariance=None,
n_dim_state=None, n_dim_obs=None, random_state=None):
# determine size of state and observation space
n_dim_state = _determine_dimensionality(
[(transition_covariance, array2d, -2),
(initial_state_covariance, array2d, -2),
(initial_state_mean, array1d, -1)],
n_dim_state
)
n_dim_obs = _determine_dimensionality(
[(observation_covariance, array2d, -2)],
n_dim_obs
)
# set parameters
self.transition_functions = transition_functions
self.observation_functions = observation_functions
self.transition_covariance = transition_covariance
self.observation_covariance = observation_covariance
self.initial_state_mean = initial_state_mean
self.initial_state_covariance = initial_state_covariance
self.n_dim_state = n_dim_state
self.n_dim_obs = n_dim_obs
self.random_state = random_state
def _initialize_parameters(self):
"""Retrieve parameters if they exist, else replace with defaults"""
arguments = get_params(self)
defaults = self._default_parameters()
converters = self._converters()
processed = preprocess_arguments([arguments, defaults], converters)
return (
processed['transition_functions'],
processed['observation_functions'],
processed['transition_covariance'],
processed['observation_covariance'],
processed['initial_state_mean'],
processed['initial_state_covariance']
)
def _parse_observations(self, obs):
"""Safely convert observations to their expected format"""
obs = ma.atleast_2d(obs)
if obs.shape[0] == 1 and obs.shape[1] > 1:
obs = obs.T
return obs
def _converters(self):
return {
'transition_functions': array1d,
'observation_functions': array1d,
'transition_covariance': array2d,
'observation_covariance': array2d,
'initial_state_mean': array1d,
'initial_state_covariance': array2d,
'n_dim_state': int,
'n_dim_obs': int,
'random_state': check_random_state,
}
class UnscentedKalmanFilter(UnscentedMixin):
r'''Implements the General (aka Augmented) Unscented Kalman Filter governed
by the following equations,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t, \text{Normal}(0, Q)) \\
z_{t} &= g_t(x_t, \text{Normal}(0, R))
Notice that although the input noise to the state transition equation and
the observation equation are both normally distributed, any non-linear
transformation may be applied afterwards. This allows for greater
generality, but at the expense of computational complexity. The complexity
of :class:`UnscentedKalmanFilter.filter()` is :math:`O(T(2n+m)^3)`
where :math:`T` is the number of time steps, :math:`n` is the size of the
state space, and :math:`m` is the size of the observation space.
If your noise is simply additive, consider using the
:class:`AdditiveUnscentedKalmanFilter`
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state and the transition
noise at time t and produces the state at time t+1. Also known as
:math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state and the observation
noise at time t and produces the observation at time t. Also known as
:math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
random_state : optional, int or Random
random number generator
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = rng.multivariate_normal(
initial_state_mean, initial_state_covariance
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1], transition_noise)
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t], observation_noise)
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
augmented_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
(points_state, points_transition, points_observation) = (
augmented_unscented_filter_points(
filtered_state_mean, filtered_state_covariance,
transition_covariance, observation_covariance
)
)
# predict
(points_pred, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, points_transition
)
)
# correct
next_filtered_state_mean, next_filtered_state_covariance = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, points_observation=points_observation
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
augmented_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state, noise: state + noise,
'observation_functions': lambda state, noise: state + noise,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
class AdditiveUnscentedKalmanFilter(UnscentedMixin):
r'''Implements the Unscented Kalman Filter with additive noise.
Observations are assumed to be generated from the following process,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t) + \text{Normal}(0, Q) \\
z_{t} &= g_t(x_t) + \text{Normal}(0, R)
While less general the general-noise Unscented Kalman Filter, the Additive
version is more computationally efficient with complexity :math:`O(Tn^3)`
where :math:`T` is the number of time steps and :math:`n` is the size of
the state space.
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state at time t and
produces the state at time t+1. Also known as :math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state at time t and
produces the observation at time t. Also known as :math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`.
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`.
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def sample(self, n_timesteps, initial_state=None, random_state=None):
'''Sample from model defined by the Unscented Kalman Filter
Parameters
----------
n_timesteps : int
number of time steps
initial_state : optional, [n_dim_state] array
initial state. If unspecified, will be sampled from initial state
distribution.
'''
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_dim_state = transition_covariance.shape[-1]
n_dim_obs = observation_covariance.shape[-1]
# logic for instantiating rng
if random_state is None:
rng = check_random_state(self.random_state)
else:
rng = check_random_state(random_state)
# logic for selecting initial state
if initial_state is None:
initial_state = (
rng.multivariate_normal(
initial_state_mean,
initial_state_covariance
)
)
# logic for generating samples
x = np.zeros((n_timesteps, n_dim_state))
z = np.zeros((n_timesteps, n_dim_obs))
for t in range(n_timesteps):
if t == 0:
x[0] = initial_state
else:
transition_function = (
_last_dims(transition_functions, t - 1, ndims=1)[0]
)
transition_noise = (
rng.multivariate_normal(
np.zeros(n_dim_state),
transition_covariance.newbyteorder('=')
)
)
x[t] = transition_function(x[t - 1]) + transition_noise
observation_function = (
_last_dims(observation_functions, t, ndims=1)[0]
)
observation_noise = (
rng.multivariate_normal(
np.zeros(n_dim_obs),
observation_covariance.newbyteorder('=')
)
)
z[t] = observation_function(x[t]) + observation_noise
return (x, ma.asarray(z))
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = (
additive_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# make sigma points
moments_state = Moments(filtered_state_mean, filtered_state_covariance)
points_state = moments2points(moments_state)
# predict
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
sigma_transition=transition_covariance
)
)
points_pred = moments2points(moments_pred)
# correct
(next_filtered_state_mean, next_filtered_state_covariance) = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, sigma_observation=observation_covariance
)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t given
observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = ma.asarray(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
(filtered_state_means, filtered_state_covariances) = self.filter(Z)
(smoothed_state_means, smoothed_state_covariances) = (
additive_unscented_smoother(
filtered_state_means, filtered_state_covariances,
transition_functions, transition_covariance
)
)
return (smoothed_state_means, smoothed_state_covariances)
def _default_parameters(self):
return {
'transition_functions': lambda state: state,
'observation_functions': lambda state: state,
'transition_covariance': np.eye(self.n_dim_state),
'observation_covariance': np.eye(self.n_dim_obs),
'initial_state_mean': np.zeros(self.n_dim_state),
'initial_state_covariance': np.eye(self.n_dim_state),
'random_state': 0,
}
|
[
"numpy.diag",
"numpy.concatenate",
"numpy.copy",
"numpy.eye",
"scipy.linalg.block_diag",
"scipy.linalg.cholesky",
"numpy.zeros",
"numpy.ones",
"numpy.ma.atleast_2d",
"numpy.tile",
"collections.namedtuple",
"numpy.ma.getmask",
"numpy.ma.asarray",
"scipy.linalg.pinv",
"numpy.vstack",
"numpy.sqrt"
] |
[((681, 756), 'collections.namedtuple', 'namedtuple', (['"""SigmaPoints"""', "['points', 'weights_mean', 'weights_covariance']"], {}), "('SigmaPoints', ['points', 'weights_mean', 'weights_covariance'])\n", (691, 756), False, 'from collections import namedtuple\n'), ((850, 895), 'collections.namedtuple', 'namedtuple', (['"""Moments"""', "['mean', 'covariance']"], {}), "('Moments', ['mean', 'covariance'])\n", (860, 895), False, 'from collections import namedtuple\n'), ((2959, 2992), 'numpy.tile', 'np.tile', (['mu.T', '(1, 2 * n_dim + 1)'], {}), '(mu.T, (1, 2 * n_dim + 1))\n', (2966, 2992), True, 'import numpy as np\n'), ((3140, 3162), 'numpy.ones', 'np.ones', (['(2 * n_dim + 1)'], {}), '(2 * n_dim + 1)\n', (3147, 3162), True, 'import numpy as np\n'), ((3244, 3265), 'numpy.copy', 'np.copy', (['weights_mean'], {}), '(weights_mean)\n', (3251, 3265), True, 'import numpy as np\n'), ((5065, 5087), 'numpy.vstack', 'np.vstack', (['points_pred'], {}), '(points_pred)\n', (5074, 5087), True, 'import numpy as np\n'), ((7140, 7161), 'numpy.concatenate', 'np.concatenate', (['means'], {}), '(means)\n', (7154, 7161), True, 'import numpy as np\n'), ((7178, 7209), 'scipy.linalg.block_diag', 'linalg.block_diag', (['*covariances'], {}), '(*covariances)\n', (7195, 7209), False, 'from scipy import linalg\n'), ((15128, 15154), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state)'], {}), '((T, n_dim_state))\n', (15136, 15154), True, 'import numpy as np\n'), ((15172, 15211), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state, n_dim_state)'], {}), '((T, n_dim_state, n_dim_state))\n', (15180, 15211), True, 'import numpy as np\n'), ((17803, 17826), 'numpy.zeros', 'np.zeros', (['mu_filt.shape'], {}), '(mu_filt.shape)\n', (17811, 17826), True, 'import numpy as np\n'), ((17846, 17872), 'numpy.zeros', 'np.zeros', (['sigma_filt.shape'], {}), '(sigma_filt.shape)\n', (17854, 17872), True, 'import numpy as np\n'), ((20574, 20600), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state)'], {}), '((T, n_dim_state))\n', (20582, 20600), True, 'import numpy as np\n'), ((20618, 20657), 'numpy.zeros', 'np.zeros', (['(T, n_dim_state, n_dim_state)'], {}), '((T, n_dim_state, n_dim_state))\n', (20626, 20657), True, 'import numpy as np\n'), ((22901, 22924), 'numpy.zeros', 'np.zeros', (['mu_filt.shape'], {}), '(mu_filt.shape)\n', (22909, 22924), True, 'import numpy as np\n'), ((22944, 22970), 'numpy.zeros', 'np.zeros', (['sigma_filt.shape'], {}), '(sigma_filt.shape)\n', (22952, 22970), True, 'import numpy as np\n'), ((2597, 2619), 'scipy.linalg.cholesky', 'linalg.cholesky', (['sigma'], {}), '(sigma)\n', (2612, 2619), False, 'from scipy import linalg\n'), ((3034, 3044), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3041, 3044), True, 'import numpy as np\n'), ((3085, 3095), 'numpy.sqrt', 'np.sqrt', (['c'], {}), '(c)\n', (3092, 3095), True, 'import numpy as np\n'), ((9389, 9410), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (9397, 9410), True, 'import numpy as np\n'), ((9491, 9510), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (9499, 9510), True, 'import numpy as np\n'), ((26300, 26318), 'numpy.ma.atleast_2d', 'ma.atleast_2d', (['obs'], {}), '(obs)\n', (26313, 26318), False, 'from numpy import ma\n'), ((30672, 30708), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_state)'], {}), '((n_timesteps, n_dim_state))\n', (30680, 30708), True, 'import numpy as np\n'), ((30721, 30755), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_obs)'], {}), '((n_timesteps, n_dim_obs))\n', (30729, 30755), True, 'import numpy as np\n'), ((42987, 43023), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_state)'], {}), '((n_timesteps, n_dim_state))\n', (42995, 43023), True, 'import numpy as np\n'), ((43036, 43070), 'numpy.zeros', 'np.zeros', (['(n_timesteps, n_dim_obs)'], {}), '((n_timesteps, n_dim_obs))\n', (43044, 43070), True, 'import numpy as np\n'), ((50716, 50729), 'numpy.ma.asarray', 'ma.asarray', (['Z'], {}), '(Z)\n', (50726, 50729), False, 'from numpy import ma\n'), ((6346, 6359), 'numpy.ma.getmask', 'ma.getmask', (['z'], {}), '(z)\n', (6356, 6359), False, 'from numpy import ma\n'), ((6422, 6449), 'scipy.linalg.pinv', 'linalg.pinv', (['obs_sigma_pred'], {}), '(obs_sigma_pred)\n', (6433, 6449), False, 'from scipy import linalg\n'), ((18175, 18196), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (18183, 18196), True, 'import numpy as np\n'), ((18909, 18945), 'scipy.linalg.pinv', 'linalg.pinv', (['moments_pred.covariance'], {}), '(moments_pred.covariance)\n', (18920, 18945), False, 'from scipy import linalg\n'), ((23828, 23864), 'scipy.linalg.pinv', 'linalg.pinv', (['moments_pred.covariance'], {}), '(moments_pred.covariance)\n', (23839, 23864), False, 'from scipy import linalg\n'), ((31716, 31729), 'numpy.ma.asarray', 'ma.asarray', (['z'], {}), '(z)\n', (31726, 31729), False, 'from numpy import ma\n'), ((36765, 36791), 'numpy.ma.asarray', 'np.ma.asarray', (['observation'], {}), '(observation)\n', (36778, 36791), True, 'import numpy as np\n'), ((39331, 39355), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (39337, 39355), True, 'import numpy as np\n'), ((39395, 39417), 'numpy.eye', 'np.eye', (['self.n_dim_obs'], {}), '(self.n_dim_obs)\n', (39401, 39417), True, 'import numpy as np\n'), ((39453, 39479), 'numpy.zeros', 'np.zeros', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (39461, 39479), True, 'import numpy as np\n'), ((39521, 39545), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (39527, 39545), True, 'import numpy as np\n'), ((44033, 44046), 'numpy.ma.asarray', 'ma.asarray', (['z'], {}), '(z)\n', (44043, 44046), False, 'from numpy import ma\n'), ((49081, 49107), 'numpy.ma.asarray', 'np.ma.asarray', (['observation'], {}), '(observation)\n', (49094, 49107), True, 'import numpy as np\n'), ((51555, 51579), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (51561, 51579), True, 'import numpy as np\n'), ((51619, 51641), 'numpy.eye', 'np.eye', (['self.n_dim_obs'], {}), '(self.n_dim_obs)\n', (51625, 51641), True, 'import numpy as np\n'), ((51677, 51703), 'numpy.zeros', 'np.zeros', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (51685, 51703), True, 'import numpy as np\n'), ((51745, 51769), 'numpy.eye', 'np.eye', (['self.n_dim_state'], {}), '(self.n_dim_state)\n', (51751, 51769), True, 'import numpy as np\n'), ((1548, 1570), 'numpy.diag', 'np.diag', (['weights_sigma'], {}), '(weights_sigma)\n', (1555, 1570), True, 'import numpy as np\n'), ((13572, 13605), 'numpy.diag', 'np.diag', (['points_pred.weights_mean'], {}), '(points_pred.weights_mean)\n', (13579, 13605), True, 'import numpy as np\n'), ((31517, 31536), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (31525, 31536), True, 'import numpy as np\n'), ((36668, 36687), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (36676, 36687), True, 'import numpy as np\n'), ((43833, 43852), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (43841, 43852), True, 'import numpy as np\n'), ((48984, 49003), 'numpy.zeros', 'np.zeros', (['n_dim_obs'], {}), '(n_dim_obs)\n', (48992, 49003), True, 'import numpy as np\n'), ((31107, 31128), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (31115, 31128), True, 'import numpy as np\n'), ((43422, 43443), 'numpy.zeros', 'np.zeros', (['n_dim_state'], {}), '(n_dim_state)\n', (43430, 43443), True, 'import numpy as np\n'), ((18713, 18752), 'numpy.diag', 'np.diag', (['points_pred.weights_covariance'], {}), '(points_pred.weights_covariance)\n', (18720, 18752), True, 'import numpy as np\n'), ((23632, 23671), 'numpy.diag', 'np.diag', (['points_pred.weights_covariance'], {}), '(points_pred.weights_covariance)\n', (23639, 23671), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""1-Step Advantage Actor-Critic agent for episodic tasks in OpenAI Gym.
- Author: <NAME>
- Contact: <EMAIL>
"""
import argparse
from typing import Tuple
import gym
import numpy as np
import torch
import wandb
from rl_algorithms.common.abstract.agent import Agent
from rl_algorithms.common.helper_functions import numpy2floattensor
from rl_algorithms.registry import AGENTS, build_learner
from rl_algorithms.utils.config import ConfigDict
@AGENTS.register_module
class A2CAgent(Agent):
"""1-Step Advantage Actor-Critic interacting with environment.
Attributes:
env (gym.Env): openAI Gym environment
args (argparse.Namespace): arguments including hyperparameters and training settings
hyper_params (ConfigDict): hyper-parameters
network_cfg (ConfigDict): config of network for training agent
optim_cfg (ConfigDict): config of optimizer
state_dim (int): state size of env
action_dim (int): action size of env
actor (nn.Module): policy model to select actions
critic (nn.Module): critic model to evaluate states
actor_optim (Optimizer): optimizer for actor
critic_optim (Optimizer): optimizer for critic
episode_step (int): step number of the current episode
i_episode (int): current episode number
transition (list): recent transition information
"""
def __init__(
self,
env: gym.Env,
env_info: ConfigDict,
args: argparse.Namespace,
hyper_params: ConfigDict,
learner_cfg: ConfigDict,
log_cfg: ConfigDict,
):
"""Initialize."""
Agent.__init__(self, env, env_info, args, log_cfg)
self.transition: list = list()
self.episode_step = 0
self.i_episode = 0
self.hyper_params = hyper_params
self.learner_cfg = learner_cfg
self.learner_cfg.args = self.args
self.learner_cfg.env_info = self.env_info
self.learner_cfg.hyper_params = self.hyper_params
self.learner_cfg.log_cfg = self.log_cfg
self.learner = build_learner(self.learner_cfg)
def select_action(self, state: np.ndarray) -> torch.Tensor:
"""Select an action from the input space."""
state = numpy2floattensor(state, self.learner.device)
selected_action, dist = self.learner.actor(state)
if self.args.test:
selected_action = dist.mean
else:
predicted_value = self.learner.critic(state)
log_prob = dist.log_prob(selected_action).sum(dim=-1)
self.transition = []
self.transition.extend([log_prob, predicted_value])
return selected_action
def step(self, action: torch.Tensor) -> Tuple[np.ndarray, np.float64, bool, dict]:
"""Take an action and return the response of the env."""
action = action.detach().cpu().numpy()
next_state, reward, done, info = self.env.step(action)
if not self.args.test:
done_bool = done
if self.episode_step == self.args.max_episode_steps:
done_bool = False
self.transition.extend([next_state, reward, done_bool])
return next_state, reward, done, info
def write_log(self, log_value: tuple):
i, score, policy_loss, value_loss = log_value
total_loss = policy_loss + value_loss
print(
"[INFO] episode %d\tepisode step: %d\ttotal score: %d\n"
"total loss: %.4f\tpolicy loss: %.4f\tvalue loss: %.4f\n"
% (i, self.episode_step, score, total_loss, policy_loss, value_loss)
)
if self.args.log:
wandb.log(
{
"total loss": total_loss,
"policy loss": policy_loss,
"value loss": value_loss,
"score": score,
}
)
def train(self):
"""Train the agent."""
# logger
if self.args.log:
self.set_wandb()
# wandb.watch([self.actor, self.critic], log="parameters")
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
done = False
score = 0
policy_loss_episode = list()
value_loss_episode = list()
self.episode_step = 0
while not done:
if self.args.render and self.i_episode >= self.args.render_after:
self.env.render()
action = self.select_action(state)
next_state, reward, done, _ = self.step(action)
self.episode_step += 1
policy_loss, value_loss = self.learner.update_model(self.transition)
policy_loss_episode.append(policy_loss)
value_loss_episode.append(value_loss)
state = next_state
score += reward
# logging
policy_loss = np.array(policy_loss_episode).mean()
value_loss = np.array(value_loss_episode).mean()
log_value = (self.i_episode, score, policy_loss, value_loss)
self.write_log(log_value)
if self.i_episode % self.args.save_period == 0:
self.learner.save_params(self.i_episode)
self.interim_test()
# termination
self.env.close()
self.learner.save_params(self.i_episode)
self.interim_test()
|
[
"wandb.log",
"rl_algorithms.common.abstract.agent.Agent.__init__",
"rl_algorithms.registry.build_learner",
"rl_algorithms.common.helper_functions.numpy2floattensor",
"numpy.array"
] |
[((1661, 1711), 'rl_algorithms.common.abstract.agent.Agent.__init__', 'Agent.__init__', (['self', 'env', 'env_info', 'args', 'log_cfg'], {}), '(self, env, env_info, args, log_cfg)\n', (1675, 1711), False, 'from rl_algorithms.common.abstract.agent import Agent\n'), ((2112, 2143), 'rl_algorithms.registry.build_learner', 'build_learner', (['self.learner_cfg'], {}), '(self.learner_cfg)\n', (2125, 2143), False, 'from rl_algorithms.registry import AGENTS, build_learner\n'), ((2278, 2323), 'rl_algorithms.common.helper_functions.numpy2floattensor', 'numpy2floattensor', (['state', 'self.learner.device'], {}), '(state, self.learner.device)\n', (2295, 2323), False, 'from rl_algorithms.common.helper_functions import numpy2floattensor\n'), ((3685, 3796), 'wandb.log', 'wandb.log', (["{'total loss': total_loss, 'policy loss': policy_loss, 'value loss':\n value_loss, 'score': score}"], {}), "({'total loss': total_loss, 'policy loss': policy_loss,\n 'value loss': value_loss, 'score': score})\n", (3694, 3796), False, 'import wandb\n'), ((5003, 5032), 'numpy.array', 'np.array', (['policy_loss_episode'], {}), '(policy_loss_episode)\n', (5011, 5032), True, 'import numpy as np\n'), ((5065, 5093), 'numpy.array', 'np.array', (['value_loss_episode'], {}), '(value_loss_episode)\n', (5073, 5093), True, 'import numpy as np\n')]
|
import typing
import torch
import torchvision
import numpy as np
from PIL import Image
from torch.autograd import Variable
from src.final_work.transformer import Transformer
from enum import Enum
class ModelType(Enum):
HOSODA = "hosoda_mamoru"
KON = "kon_satoshi"
MIYAZAKI = "miyazaki_hayao"
SHINKAI = "shinkai_makoto"
class Device(Enum):
CPU = "cpu"
GPU = torch.device("cuda")
class ImageConverter:
MODELS_DIRECTORY = "models"
def __init__(self):
self.device = self._define_device()
self._init_models()
def _get_model(self, model_type: ModelType) -> Transformer:
return self.models[model_type.value]
@staticmethod
def _define_device() -> Device:
_is_gpu_enable = torch.cuda.is_available()
if _is_gpu_enable:
return Device.GPU
else:
return Device.CPU
def _init_models(self):
self.models = dict()
for model_type in ModelType:
self.models[model_type.value] = self._create_model(model_type)
def _load_model_parameters(self, model: ModelType):
return torch.load(f"{self.MODELS_DIRECTORY}/{model.value}.pth", self.device.value)
def _create_model(self, model_type: ModelType) -> Transformer:
new_model = Transformer()
new_model_parameters = self._load_model_parameters(model_type)
new_model.load_state_dict(new_model_parameters)
if self.device == Device.GPU:
new_model.to(self.device.value)
new_model.eval()
return new_model
def convert_image(self, image: Image, model_type: ModelType) -> Image:
image = image.convert("RGB")
image = np.asarray(image)
image = image[:, :, [2, 1, 0]]
image = torchvision.transforms.ToTensor()(image).unsqueeze(0)
image = -1 + 2 * image
if self.device == Device.GPU:
image = Variable(image).to(self.device.value)
else:
image = Variable(image).float()
model = self._get_model(model_type)
converted_image = model(image)
converted_image = converted_image[0]
converted_image = converted_image[[2, 1, 0], :, :]
converted_image = converted_image.data.cpu().float() * 0.5 + 0.5
return torchvision.transforms.ToPILImage()(converted_image)
|
[
"src.final_work.transformer.Transformer",
"torch.autograd.Variable",
"torch.load",
"numpy.asarray",
"torchvision.transforms.ToPILImage",
"torch.cuda.is_available",
"torch.device",
"torchvision.transforms.ToTensor"
] |
[((385, 405), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (397, 405), False, 'import torch\n'), ((749, 774), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (772, 774), False, 'import torch\n'), ((1118, 1193), 'torch.load', 'torch.load', (['f"""{self.MODELS_DIRECTORY}/{model.value}.pth"""', 'self.device.value'], {}), "(f'{self.MODELS_DIRECTORY}/{model.value}.pth', self.device.value)\n", (1128, 1193), False, 'import torch\n'), ((1282, 1295), 'src.final_work.transformer.Transformer', 'Transformer', ([], {}), '()\n', (1293, 1295), False, 'from src.final_work.transformer import Transformer\n'), ((1686, 1703), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1696, 1703), True, 'import numpy as np\n'), ((2275, 2310), 'torchvision.transforms.ToPILImage', 'torchvision.transforms.ToPILImage', ([], {}), '()\n', (2308, 2310), False, 'import torchvision\n'), ((1759, 1792), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (1790, 1792), False, 'import torchvision\n'), ((1902, 1917), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (1910, 1917), False, 'from torch.autograd import Variable\n'), ((1974, 1989), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (1982, 1989), False, 'from torch.autograd import Variable\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 2017-6-27
@author: cheng.li
"""
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
import numpy as np
from alphamind.portfolio.optimizers import (
QuadraticOptimizer,
TargetVolOptimizer
)
from alphamind.exceptions.exceptions import PortfolioBuilderException
def _create_bounds(lbound,
ubound,
bm,
risk_exposure,
risk_target):
if lbound is not None:
lbound = lbound - bm
if ubound is not None:
ubound = ubound - bm
if risk_exposure is not None:
cons_mat = risk_exposure.T
bm_risk = cons_mat @ bm
clbound = (risk_target[0] - bm_risk).reshape((-1, 1))
cubound = (risk_target[1] - bm_risk).reshape((-1, 1))
else:
cons_mat = None
clbound = None
cubound = None
return lbound, ubound, cons_mat, clbound, cubound
def _create_result(optimizer, bm):
if optimizer.status() == "optimal" or optimizer.status() == "optimal_inaccurate":
return optimizer.status(), optimizer.feval(), optimizer.x_value() + bm
else:
raise PortfolioBuilderException(optimizer.status())
def mean_variance_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float, None],
ubound: Union[np.ndarray, float, None],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
lam: float = 1.,
linear_solver: str = 'deprecated') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = QuadraticOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
penalty=lam,
cov=cov,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
def target_vol_builder(er: np.ndarray,
risk_model: Dict[str, Union[None, np.ndarray]],
bm: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_exposure: Optional[np.ndarray],
risk_target: Optional[Tuple[np.ndarray, np.ndarray]],
vol_target: float = 1.,
linear_solver: str = 'ma27') -> Tuple[str, float, np.ndarray]:
lbound, ubound, cons_mat, clbound, cubound = _create_bounds(lbound, ubound, bm, risk_exposure,
risk_target)
if cons_mat is not None:
cons_matrix = np.concatenate([cons_mat, clbound, cubound], axis=1)
else:
cons_matrix = None
cov = risk_model['cov']
special_risk = risk_model['idsync']
risk_cov = risk_model['factor_cov']
risk_exposure = risk_model['factor_loading']
prob = TargetVolOptimizer(objective=-er,
cons_matrix=cons_matrix,
lbound=lbound,
ubound=ubound,
target_vol=vol_target,
factor_cov=risk_cov,
factor_load=risk_exposure,
factor_special=special_risk,
cov=cov)
if prob.status() == "optimal" or prob.status() == 'optimal_inaccurate':
return prob.status(), prob.feval(), prob.x_value() + bm
else:
raise PortfolioBuilderException(prob.status())
|
[
"alphamind.portfolio.optimizers.TargetVolOptimizer",
"numpy.concatenate",
"alphamind.portfolio.optimizers.QuadraticOptimizer"
] |
[((2311, 2507), 'alphamind.portfolio.optimizers.QuadraticOptimizer', 'QuadraticOptimizer', ([], {'objective': '(-er)', 'cons_matrix': 'cons_matrix', 'lbound': 'lbound', 'ubound': 'ubound', 'penalty': 'lam', 'cov': 'cov', 'factor_cov': 'risk_cov', 'factor_load': 'risk_exposure', 'factor_special': 'special_risk'}), '(objective=-er, cons_matrix=cons_matrix, lbound=lbound,\n ubound=ubound, penalty=lam, cov=cov, factor_cov=risk_cov, factor_load=\n risk_exposure, factor_special=special_risk)\n', (2329, 2507), False, 'from alphamind.portfolio.optimizers import QuadraticOptimizer, TargetVolOptimizer\n'), ((3968, 4174), 'alphamind.portfolio.optimizers.TargetVolOptimizer', 'TargetVolOptimizer', ([], {'objective': '(-er)', 'cons_matrix': 'cons_matrix', 'lbound': 'lbound', 'ubound': 'ubound', 'target_vol': 'vol_target', 'factor_cov': 'risk_cov', 'factor_load': 'risk_exposure', 'factor_special': 'special_risk', 'cov': 'cov'}), '(objective=-er, cons_matrix=cons_matrix, lbound=lbound,\n ubound=ubound, target_vol=vol_target, factor_cov=risk_cov, factor_load=\n risk_exposure, factor_special=special_risk, cov=cov)\n', (3986, 4174), False, 'from alphamind.portfolio.optimizers import QuadraticOptimizer, TargetVolOptimizer\n'), ((2051, 2103), 'numpy.concatenate', 'np.concatenate', (['[cons_mat, clbound, cubound]'], {'axis': '(1)'}), '([cons_mat, clbound, cubound], axis=1)\n', (2065, 2103), True, 'import numpy as np\n'), ((3708, 3760), 'numpy.concatenate', 'np.concatenate', (['[cons_mat, clbound, cubound]'], {'axis': '(1)'}), '([cons_mat, clbound, cubound], axis=1)\n', (3722, 3760), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
from unpackqa import (unpack_to_array,
unpack_to_dict,
list_products,
list_qa_flags,
list_sensors,
)
from unpackqa.tools.validation import (product_info_has_required_entries,
flag_info_is_non_empty_dict,
flag_info_bit_list_non_empty,
flag_info_bits_non_neg_ints,
flag_info_flag_is_str,
bits_are_only_used_once,
bits_are_reasonable,
bits_do_not_exceed_bit_size,
max_value_matches_num_bits,
bits_are_ordered,
)
from unpackqa.tools.validation import InvalidProductSpec
from unpackqa.product_loader import all_products
"""
Validating product definitions.
"""
qa_array = np.array([[8,8,8],
[16,16,16],
[255,255,255]])
all_product_identifiers = list_products(sensor='all')
def test_product_ids_are_unique():
"""No duplicate product identifers"""
assert len(all_product_identifiers) == len(set(all_product_identifiers))
def test_list_sensors():
"""Should have some output"""
assert len(list_sensors()) > 0
def test_list_products():
"""Each sensor should have some products"""
sensors = list_sensors()
sensor_has_products = [len(list_products(sensor=s))>0 for s in sensors]
assert all(sensor_has_products)
def test_list_products_invalid_sensor():
"""Should raise error with unknown sensor"""
with pytest.raises(ValueError):
list_products(sensor='asdf')
def test_list_flags_invalid_product():
"""Should raise error with unknown product ID"""
with pytest.raises(ValueError):
list_qa_flags(product = 'asdf')
@pytest.mark.parametrize('product', all_product_identifiers)
def test_qa_flag_list(product):
"""Lists of flags should be available for each product"""
flags = list_qa_flags(product = product)
assert len(flags) > 0
@pytest.mark.parametrize('product', all_product_identifiers)
def test_product_info_is_dict(product):
"""product_info entry should be dictonary"""
product_info = all_products[product]
assert isinstance(product_info, dict)
"""
Several tests for all products configred within the package.
Within product_info the 'flag_info' entry
should be a dictonary with key value pairs:
'flag name':[bit0,bit1,..]
Where flag name is a str, with a value of a list. List entries
are non-negative ints.
These same tests are used to validate user passed custom specifications, so
instead of essentially writing a new test function for each, just iterate
over them and create some informative output if 1 or more fails.
"""
test_list = [('product info does not have required entries',product_info_has_required_entries),
('flag_info is not dictionary, or is empty', flag_info_is_non_empty_dict),
('flag_info has empty lists',flag_info_bit_list_non_empty),
('flag_info has negative and/or non-int values',flag_info_bits_non_neg_ints),
('flag_info keys are not strings',flag_info_flag_is_str),
('duplicate bits detected',bits_are_only_used_once),
('bits are larger than needed for even a 32 bit int', bits_are_reasonable),
('largest bit is greater than num_bits',bits_do_not_exceed_bit_size),
('max_value is >= 2**num_bits',max_value_matches_num_bits),
('bits are out of order',bits_are_ordered),
]
@pytest.mark.parametrize('product', all_product_identifiers)
def test_product_info(product):
product_info = all_products[product]
failed_tests = []
tests_failed = False
for test_message, test_function in test_list:
try:
test_function(product_info)
except InvalidProductSpec:
tests_failed = True
failed_tests.append(test_message)
if tests_failed:
error_message = '{} failed tests for {}\n'.format(len(failed_tests), product)
error_message = error_message + '\n'.join(['{}. {}'.format(i+1,m) for i,m in enumerate(failed_tests)])
assert False, error_message
|
[
"unpackqa.list_sensors",
"unpackqa.list_qa_flags",
"pytest.raises",
"unpackqa.list_products",
"numpy.array",
"pytest.mark.parametrize"
] |
[((1116, 1168), 'numpy.array', 'np.array', (['[[8, 8, 8], [16, 16, 16], [255, 255, 255]]'], {}), '([[8, 8, 8], [16, 16, 16], [255, 255, 255]])\n', (1124, 1168), True, 'import numpy as np\n'), ((1232, 1259), 'unpackqa.list_products', 'list_products', ([], {'sensor': '"""all"""'}), "(sensor='all')\n", (1245, 1259), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((2061, 2120), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product"""', 'all_product_identifiers'], {}), "('product', all_product_identifiers)\n", (2084, 2120), False, 'import pytest\n'), ((2292, 2351), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product"""', 'all_product_identifiers'], {}), "('product', all_product_identifiers)\n", (2315, 2351), False, 'import pytest\n'), ((3825, 3884), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""product"""', 'all_product_identifiers'], {}), "('product', all_product_identifiers)\n", (3848, 3884), False, 'import pytest\n'), ((1599, 1613), 'unpackqa.list_sensors', 'list_sensors', ([], {}), '()\n', (1611, 1613), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((2227, 2257), 'unpackqa.list_qa_flags', 'list_qa_flags', ([], {'product': 'product'}), '(product=product)\n', (2240, 2257), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1826, 1851), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1839, 1851), False, 'import pytest\n'), ((1861, 1889), 'unpackqa.list_products', 'list_products', ([], {'sensor': '"""asdf"""'}), "(sensor='asdf')\n", (1874, 1889), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1992, 2017), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2005, 2017), False, 'import pytest\n'), ((2027, 2056), 'unpackqa.list_qa_flags', 'list_qa_flags', ([], {'product': '"""asdf"""'}), "(product='asdf')\n", (2040, 2056), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1490, 1504), 'unpackqa.list_sensors', 'list_sensors', ([], {}), '()\n', (1502, 1504), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n'), ((1645, 1668), 'unpackqa.list_products', 'list_products', ([], {'sensor': 's'}), '(sensor=s)\n', (1658, 1668), False, 'from unpackqa import unpack_to_array, unpack_to_dict, list_products, list_qa_flags, list_sensors\n')]
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0602_recurrent_neural_network.py
@Version : v0.1
@Time : 2019-11-24 16:00
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec0602,P162
@Desc : 深度学习用于文本和序列,理解循环神经网络(并不适用于情感分析,建议看0603进一步理解RNN)
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras.activations import relu, sigmoid
from keras.datasets import imdb
from keras.layers import Dense
from keras.layers import Embedding, LSTM, SimpleRNN
from keras.losses import binary_crossentropy
from keras.metrics import binary_accuracy
from keras.models import Sequential
from keras.optimizers import rmsprop
from keras.preprocessing.sequence import pad_sequences
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
from tools import plot_classes_results
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# ----------------------------------------------------------------------
# Listing 6-21:简单 RNN 的 Numpy 实现
def simple_rnn_use_numpy():
timesteps = 100 # 输入序列的时间步数
input_features = 32 # 输入特征空间的维度
output_features = 64 # 输出特征空间的维度
# 输入数据:随机噪声,仅仅作为示例
inputs = np.random.random((timesteps, input_features))
state_t = np.zeros((output_features,)) # 初始状态:全零向量
# 创建随机的权重矩阵
W = np.random.random((output_features, input_features)) / 10
U = np.random.random((output_features, output_features)) / 10
b = np.random.random((output_features,)) / 10
successive_outputs = []
for input_t in inputs:
# 当前输出 = 当前输入 + 前一个输出
output_t = np.tanh(np.dot(W, input_t) + np.dot(U, state_t), +b)
successive_outputs.append(output_t) # 将输出保存到一个列表中
# 更新网络的状态,用于下一个时间步
state_t = output_t
pass
# 最终的输出是一个形状为(timesteps,output_features)的二维张量
# np.stack() 把数组组成的列表转换成一个二维数组
final_output_sequence = np.stack(successive_outputs, axis = 0)
return final_output_sequence
# ----------------------------------------------------------------------
# 简单 RNN 的 Keras 实现
def keras_simplernn():
model = Sequential(name = "完整的状态序列")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences = True))
model.summary()
model = Sequential(name = "最后一个时间步的输出")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32))
model.summary()
model = Sequential(name = "多个循环层的逐个堆叠")
model.add(Embedding(10000, 32))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32, return_sequences = True))
model.add(SimpleRNN(32))
model.summary()
pass
# 使用 RNN 和 LSTM 模型应用于 IMDB 电影评论分类问题
max_features = 10000
max_len = 500
batch_size = 128
epochs = 10
# 数据集的详细说明参考 ch0304
print("Listing 6.22:加载数据集...")
(train_data, y_train), (test_data, y_test) = imdb.load_data(num_words = max_features)
x_train = pad_sequences(train_data, maxlen = max_len)
x_test = pad_sequences(test_data, maxlen = max_len)
def train_model(model, data, labels):
return model.fit(data, labels, epochs = epochs, batch_size = batch_size,
validation_split = 0.2, verbose = 2, use_multiprocessing = True)
# ----------------------------------------------------------------------
def definite_rnn():
title = "将 SimpleRNN 应用于 IMDB "
model = Sequential(name = title)
model.add(Embedding(max_features, 64))
model.add(SimpleRNN(64))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
def definite_lstm():
title = "将 LSTM 应用于 IMDB"
model = Sequential(name = title)
model.add(Embedding(max_features, 64))
model.add(LSTM(64))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
model = definite_rnn(title)
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
# 重构 ch0304 的二分类问题
def vectorize_sequences(sequences, dimension = 10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
pass
return results
vector_train_data = vectorize_sequences(train_data, max_features)
vector_test_data = vectorize_sequences(test_data, max_features)
vector_train_labels = np.asarray(y_train)
vector_test_labels = np.asarray(y_test)
# 将数据进行 One-Hot 编码后,准确率比 RNN 和 LSTM 的质量还好(ch0304确认了密集层的效果确实很好)
def definite_dense_for_one_hot():
title = "将 Dense+One-Hot 应用于 IMDB"
model = Sequential(name = title)
model.add(Dense(16, activation = relu, input_shape = (10000,)))
model.add(Dense(16, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, vector_train_data, vector_train_labels)
plot_classes_results(history, title, epochs)
print(title + "评估测试集",
model.evaluate(vector_test_data, vector_test_labels, verbose = 2, use_multiprocessing = True))
pass
# 没有将数据进行 One-Hot 编码,准确率下降的会很厉害
def definite_dense():
title = "将 Dense 应用于 IMDB"
model = Sequential(name = title)
model.add(Dense(16, activation = relu, input_shape = (500,)))
model.add(Dense(16, activation = relu))
model.add(Dense(1, activation = sigmoid))
model.summary()
model.compile(optimizer = rmsprop(lr = 0.001), loss = binary_crossentropy, metrics = [binary_accuracy])
history = train_model(model, x_train, y_train)
plot_classes_results(history, title, epochs)
print(title + "评估测试集", model.evaluate(x_test, y_test, verbose = 2, use_multiprocessing = True))
pass
# ----------------------------------------------------------------------
definite_rnn()
definite_lstm()
definite_dense_for_one_hot()
definite_dense()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
[
"keras.optimizers.rmsprop",
"numpy.random.seed",
"keras.preprocessing.sequence.pad_sequences",
"numpy.set_printoptions",
"tools.plot_classes_results",
"matplotlib.pyplot.get_fignums",
"keras.datasets.imdb.load_data",
"numpy.stack",
"keras.layers.SimpleRNN",
"matplotlib.pyplot.show",
"numpy.asarray",
"winsound.Beep",
"numpy.dot",
"keras.layers.LSTM",
"numpy.zeros",
"numpy.random.random",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.models.Sequential"
] |
[((1218, 1303), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=3, suppress=True, threshold=np.inf, linewidth=200\n )\n', (1237, 1303), True, 'import numpy as np\n'), ((1369, 1389), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1383, 1389), True, 'import numpy as np\n'), ((3470, 3508), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (3484, 3508), False, 'from keras.datasets import imdb\n'), ((3521, 3562), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_data'], {'maxlen': 'max_len'}), '(train_data, maxlen=max_len)\n', (3534, 3562), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3574, 3614), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_data'], {'maxlen': 'max_len'}), '(test_data, maxlen=max_len)\n', (3587, 3614), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((5554, 5573), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (5564, 5573), True, 'import numpy as np\n'), ((5595, 5613), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (5605, 5613), True, 'import numpy as np\n'), ((7117, 7140), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (7130, 7140), False, 'import winsound\n'), ((1803, 1848), 'numpy.random.random', 'np.random.random', (['(timesteps, input_features)'], {}), '((timesteps, input_features))\n', (1819, 1848), True, 'import numpy as np\n'), ((1863, 1891), 'numpy.zeros', 'np.zeros', (['(output_features,)'], {}), '((output_features,))\n', (1871, 1891), True, 'import numpy as np\n'), ((2500, 2536), 'numpy.stack', 'np.stack', (['successive_outputs'], {'axis': '(0)'}), '(successive_outputs, axis=0)\n', (2508, 2536), True, 'import numpy as np\n'), ((2702, 2728), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""完整的状态序列"""'}), "(name='完整的状态序列')\n", (2712, 2728), False, 'from keras.models import Sequential\n'), ((2854, 2883), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""最后一个时间步的输出"""'}), "(name='最后一个时间步的输出')\n", (2864, 2883), False, 'from keras.models import Sequential\n'), ((2984, 3013), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""多个循环层的逐个堆叠"""'}), "(name='多个循环层的逐个堆叠')\n", (2994, 3013), False, 'from keras.models import Sequential\n'), ((3963, 3985), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (3973, 3985), False, 'from keras.models import Sequential\n'), ((4289, 4333), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (4309, 4333), False, 'from tools import plot_classes_results\n'), ((4581, 4603), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (4591, 4603), False, 'from keras.models import Sequential\n'), ((4934, 4978), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (4954, 4978), False, 'from tools import plot_classes_results\n'), ((5764, 5786), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (5774, 5786), False, 'from keras.models import Sequential\n'), ((6152, 6196), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (6172, 6196), False, 'from tools import plot_classes_results\n'), ((6437, 6459), 'keras.models.Sequential', 'Sequential', ([], {'name': 'title'}), '(name=title)\n', (6447, 6459), False, 'from keras.models import Sequential\n'), ((6801, 6845), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (6821, 6845), False, 'from tools import plot_classes_results\n'), ((7177, 7187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7185, 7187), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1981), 'numpy.random.random', 'np.random.random', (['(output_features, input_features)'], {}), '((output_features, input_features))\n', (1946, 1981), True, 'import numpy as np\n'), ((1995, 2047), 'numpy.random.random', 'np.random.random', (['(output_features, output_features)'], {}), '((output_features, output_features))\n', (2011, 2047), True, 'import numpy as np\n'), ((2061, 2097), 'numpy.random.random', 'np.random.random', (['(output_features,)'], {}), '((output_features,))\n', (2077, 2097), True, 'import numpy as np\n'), ((2745, 2765), 'keras.layers.Embedding', 'Embedding', (['(10000)', '(32)'], {}), '(10000, 32)\n', (2754, 2765), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((2781, 2817), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (2790, 2817), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((2900, 2920), 'keras.layers.Embedding', 'Embedding', (['(10000)', '(32)'], {}), '(10000, 32)\n', (2909, 2920), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((2936, 2949), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {}), '(32)\n', (2945, 2949), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3030, 3050), 'keras.layers.Embedding', 'Embedding', (['(10000)', '(32)'], {}), '(10000, 32)\n', (3039, 3050), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3066, 3102), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (3075, 3102), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3120, 3156), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (3129, 3156), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3174, 3210), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {'return_sequences': '(True)'}), '(32, return_sequences=True)\n', (3183, 3210), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((3228, 3241), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(32)'], {}), '(32)\n', (3237, 3241), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4002, 4029), 'keras.layers.Embedding', 'Embedding', (['max_features', '(64)'], {}), '(max_features, 64)\n', (4011, 4029), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4045, 4058), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(64)'], {}), '(64)\n', (4054, 4058), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4074, 4102), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (4079, 4102), False, 'from keras.layers import Dense\n'), ((4620, 4647), 'keras.layers.Embedding', 'Embedding', (['max_features', '(64)'], {}), '(max_features, 64)\n', (4629, 4647), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4663, 4671), 'keras.layers.LSTM', 'LSTM', (['(64)'], {}), '(64)\n', (4667, 4671), False, 'from keras.layers import Embedding, LSTM, SimpleRNN\n'), ((4687, 4715), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (4692, 4715), False, 'from keras.layers import Dense\n'), ((5803, 5851), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu', 'input_shape': '(10000,)'}), '(16, activation=relu, input_shape=(10000,))\n', (5808, 5851), False, 'from keras.layers import Dense\n'), ((5871, 5897), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu'}), '(16, activation=relu)\n', (5876, 5897), False, 'from keras.layers import Dense\n'), ((5915, 5943), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (5920, 5943), False, 'from keras.layers import Dense\n'), ((6476, 6522), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu', 'input_shape': '(500,)'}), '(16, activation=relu, input_shape=(500,))\n', (6481, 6522), False, 'from keras.layers import Dense\n'), ((6542, 6568), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': 'relu'}), '(16, activation=relu)\n', (6547, 6568), False, 'from keras.layers import Dense\n'), ((6586, 6614), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'sigmoid'}), '(1, activation=sigmoid)\n', (6591, 6614), False, 'from keras.layers import Dense\n'), ((7148, 7165), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (7163, 7165), True, 'import matplotlib.pyplot as plt\n'), ((4156, 4173), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4163, 4173), False, 'from keras.optimizers import rmsprop\n'), ((4769, 4786), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4776, 4786), False, 'from keras.optimizers import rmsprop\n'), ((5997, 6014), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6004, 6014), False, 'from keras.optimizers import rmsprop\n'), ((6668, 6685), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6675, 6685), False, 'from keras.optimizers import rmsprop\n'), ((2216, 2234), 'numpy.dot', 'np.dot', (['W', 'input_t'], {}), '(W, input_t)\n', (2222, 2234), True, 'import numpy as np\n'), ((2237, 2255), 'numpy.dot', 'np.dot', (['U', 'state_t'], {}), '(U, state_t)\n', (2243, 2255), True, 'import numpy as np\n')]
|
import numpy as np
from Classes.Uncertainty import Uncertainty
from Classes.QComp import QComp
class QAData(object):
"""Evaluates and stores quality assurance characteristics and messages.
Attributes
----------
q_run_threshold_caution: int
Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_run_threshold_warning: int
Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_total_threshold_caution: int
Caution threshold for total interpolated discharge for invalid ensembles, in percent.
q_total_threshold_warning: int
Warning threshold for total interpolated discharge for invalid ensembles, in percent.
transects: dict
Dictionary of quality assurance checks for transects
system_tst: dict
Dictionary of quality assurance checks on the system test(s)
compass: dict
Dictionary of quality assurance checks on compass calibration and evaluations
temperature: dict
Dictionary of quality assurance checks on temperature comparions and variation
moving_bed: dict
Dictionary of quality assurance checks on moving-bed tests
user: dict
Dictionary of quality assurance checks on user input data
bt_vel: dict
Dictionary of quality assurance checks on bottom track velocities
gga_vel: dict
Dictionary of quality assurance checks on gga boat velocities
vtg_vel: dict
Dictionary of quality assurance checks on vtg boat velocities
w_vel: dict
Dictionary of quality assurance checks on water track velocities
extrapolation: dict
Dictionary of quality assurance checks on extrapolations
edges: dict
Dictionary of quality assurance checks on edges
"""
def __init__(self, meas):
"""Checks the measurement for all quality assurance issues.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Set default thresholds
self.q_run_threshold_caution = 3
self.q_run_threshold_warning = 5
self.q_total_threshold_caution = 10
self.q_total_threshold_warning = 25
# Initialize instance variables
self.transects = dict()
self.system_tst = dict()
self.compass = dict()
self.temperature = dict()
self.moving_bed = dict()
self.user = dict()
self.depths = dict()
self.bt_vel = dict()
self.gga_vel = dict()
self.vtg_vel = dict()
self.w_vel = dict()
self.extrapolation = dict()
self.edges = dict()
# Apply QA checks
self.transects_qa(meas)
self.system_tst_qa(meas)
self.compass_qa(meas)
self.temperature_qa(meas)
self.moving_bed_qa(meas)
self.user_qa(meas)
self.depths_qa(meas)
self.boat_qa(meas)
self.water_qa(meas)
self.extrapolation_qa(meas)
self.edges_qa(meas)
def transects_qa(self, meas):
"""Apply quality checks to transects
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Assume good results
self.transects['status'] = 'good'
# Initialize keys
self.transects['messages'] = []
self.transects['recip'] = 0
self.transects['sign'] = 0
self.transects['duration'] = 0
self.transects['number'] = 0
self.transects['uncertainty'] = 0
checked = []
discharges = []
start_edge = []
for n in range(len(meas.transects)):
checked.append(meas.transects[n].checked)
if meas.transects[n].checked:
discharges.append(meas.discharge[n])
start_edge.append(meas.transects[n].start_edge)
num_checked = np.nansum(np.asarray(checked))
# Check duration
total_duration = 0
if num_checked >= 1:
for transect in meas.transects:
if transect.checked:
total_duration += transect.date_time.transect_duration_sec
# Check duration against USGS policy
if total_duration < 720:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Duration of selected transects is less than 720 seconds;', 2, 0])
self.transects['duration'] = 1
# Check transects for missing ensembles
for transect in meas.transects:
if transect.checked:
# Determine number of missing ensembles
if transect.adcp.manufacturer == 'SonTek':
# Determine number of missing ensembles for SonTek data
idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
if len(idx_missing) > 0:
average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
- np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ (len(transect.date_time.ens_duration_sec) - len(idx_missing)))
num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ average_ensemble_duration) - len(idx_missing)
else:
num_missing = 0
else:
# Determine number of lost ensembles for TRDI data
idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec) == True)[0]
num_missing = len(idx_missing) - 1
# Save caution message
if num_missing > 0:
self.transects['messages'].append([['Transects: ' + str(transect.file_name) + ' is missing'
+ str(int(num_missing)) + ' ensembles;'], 2, 0])
self.transects['status'] = 'caution'
# Check number of transects checked
if num_checked == 0:
# No transects selected
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
self.transects['number'] = 2
elif num_checked == 1:
# Only one transect selected
self.transects['status'] = 'caution'
self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
self.transects['number'] = 2
else:
self.transects['number'] = num_checked
if num_checked == 2:
# Only 2 transects selected
cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
# Check uncertainty
if cov > 2:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
# Check for consistent sign
q_positive = []
for q in discharges:
if q.total >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1:
self.transects['status'] = 'warning'
self.transects['messages'].append(
['Transects: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
# Check for reciprocal transects
num_left = start_edge.count('Left')
num_right = start_edge.count('Right')
if not num_left == num_right:
self.transects['status'] = 'warning'
self.transects['messages'].append(['Transects: Transects selected are not reciprocal transects;', 1, 0])
# Check for zero discharge transects
q_zero = False
for q in discharges:
if q.total == 0:
q_zero = True
if q_zero:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
def system_tst_qa(self, meas):
"""Apply QA checks to system test.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.system_tst['messages'] = []
self.system_tst['status'] = 'good'
# Determine is a system test was recorded
if not meas.system_test:
# No system test data recorded
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
else:
pt3_fail = False
num_tests_with_failure = 0
for test in meas.system_test:
if hasattr(test, 'result'):
if 'pt3' in test.result and test.result['pt3'] is not None:
# Check hard_limit, high gain, wide bandwidth
if 'hard_limit' in test.result['pt3']:
if 'high_wide' in test.result['pt3']['hard_limit']:
corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
if len(corr_table) > 0:
# All lags past lag 2 should be less than 50% of lag 0
qa_threshold = corr_table[0, :] * 0.5
all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
# Lag 7 should be less than 25% of lag 0
lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
# If either condition is met for any beam the test fails
if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
pt3_fail = True
if test.result['n_failed'] is not None and test.result['n_failed'] > 0:
num_tests_with_failure += 1
if pt3_fail:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
# Check for failed tests
if num_tests_with_failure == len(meas.system_test):
# All tests had a failure
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(
['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
elif num_tests_with_failure > 0:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more system test sets have at least one test that failed;', 2, 3])
def compass_qa(self, meas):
"""Apply QA checks to compass calibration and evaluation.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.compass['messages'] = []
checked = []
for transect in meas.transects:
checked.append(transect.checked)
if np.any(checked):
heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
else:
heading = np.array([0])
# Intialize variable as if ADCP has no compass
self.compass['status'] = 'inactive'
self.compass['status1'] = 'good'
self.compass['status2'] = 'good'
self.compass['magvar'] = 0
self.compass['magvar_idx'] = 0
if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
# ADCP has a compass
# A compass calibration is required is a loop test or GPS are used
# Check for loop test
loop = False
for test in meas.mb_tests:
if test.type == 'Loop':
loop = True
# Check for GPS data
gps = False
if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
gps = True
if gps or loop:
# Compass calibration is required
# Determine the ADCP manufacturer
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# SonTek ADCP
if not meas.compass_cal:
# No compass calibration
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
# If the error cannot be decoded from the calibration assume the calibration is good
self.compass['status1'] = 'good'
else:
if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['COMPASS: Calibration result > 0.2 deg;', 2, 4])
elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
# TRDI ADCP
if not meas.compass_cal:
# No compass calibration
if not meas.compass_eval:
# No calibration or evaluation
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
else:
# No calibration but an evaluation was completed
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
else:
# Compass was calibrated
if not meas.compass_eval:
# No compass evaluation
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
else:
# Check results of evaluation
try:
if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
except ValueError:
self.compass['status1'] = 'good'
else:
# Compass not required
if (not meas.compass_cal) and (not meas.compass_eval):
# No compass calibration or evaluation
self.compass['status1'] = 'default'
else:
# Compass was calibrated and evaluated
self.compass['status1'] = 'good'
# Check for consistent magvar
magvar = []
mag_error_exceeded = []
pitch_mean = []
pitch_std = []
pitch_exceeded = []
roll_mean = []
roll_std = []
roll_exceeded = []
for transect in meas.transects:
if transect.checked:
heading_source_selected = getattr(
transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
magvar.append(heading_source_selected.mag_var_deg)
pitch_mean.append(np.nanmean(pitch_source_selected.data))
pitch_std.append(np.nanstd(pitch_source_selected.data))
roll_mean.append(np.nanmean(roll_source_selected.data))
roll_std.append(np.nanstd(roll_source_selected.data))
# SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
if heading_source_selected.pitch_limit is not None:
# Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
if len(pitch_source_selected.data.shape) == 1:
pitch_data = pitch_source_selected.data
else:
pitch_data = pitch_source_selected.data[:, 0]
idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
pitch_exceeded.append(True)
else:
pitch_exceeded.append(False)
if heading_source_selected.roll_limit is not None:
if len(roll_source_selected.data.shape) == 1:
roll_data = roll_source_selected.data
else:
roll_data = roll_source_selected.data[:, 0]
idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(roll_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
roll_exceeded.append(True)
else:
roll_exceeded.append(False)
if heading_source_selected.mag_error is not None:
idx_max = np.where(heading_source_selected.mag_error > 2)[0]
if len(idx_max) > 0:
mag_error_exceeded.append(True)
else:
mag_error_exceeded.append(False)
if len(np.unique(magvar)) > 1:
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: Magnetic variation is not consistent among transects;', 2, 4])
self.compass['magvar'] = 1
# Check that magvar was set if GPS data are available
if gps:
if 0 in magvar:
self.compass['status2'] = 'warning'
self.compass['messages'].append(
['COMPASS: Magnetic variation is 0 and GPS data are present;', 1, 4])
self.compass['magvar'] = 2
self.compass['magvar_idx'] = magvar.index(0)
# Check pitch mean
if np.any(np.asarray(pitch_mean) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['PITCH: One or more transects have a mean pitch > 8 deg;', 1, 4])
elif np.any(np.asarray(pitch_mean) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a mean pitch > 4 deg;', 2, 4])
# Check roll mean
if np.any(np.asarray(roll_mean) > 8):
self.compass['status2'] = 'warning'
self.compass['messages'].append(['ROLL: One or more transects have a mean roll > 8 deg;', 1, 4])
elif np.any(np.asarray(roll_mean) > 4):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Roll: One or more transects have a mean roll > 4 deg;', 2, 4])
# Check pitch standard deviation
if np.any(np.asarray(pitch_std) > 5):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Pitch: One or more transects have a pitch std dev > 5 deg;', 2, 4])
# Check roll standard deviation
if np.any(np.asarray(roll_std) > 5):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(['Roll: One or more transects have a roll std dev > 5 deg;', 2, 4])
# Additional checks for SonTek G3 compass
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# Check if pitch limits were exceeded
if any(pitch_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have pitch exceeding calibration limits;', 2, 4])
# Check if roll limits were exceeded
if any(roll_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have roll exceeding calibration limits;', 2, 4])
# Check if magnetic error was exceeded
if any(mag_error_exceeded):
if self.compass['status2'] == 'good':
self.compass['status2'] = 'caution'
self.compass['messages'].append(
['Compass: One or more transects have a change in mag field exceeding 2%;', 2, 4])
if self.compass['status1'] == 'warning' or self.compass['status2'] == 'warning':
self.compass['status'] = 'warning'
elif self.compass['status1'] == 'caution' or self.compass['status2'] == 'caution':
self.compass['status'] = 'caution'
else:
self.compass['status'] = 'good'
def temperature_qa(self, meas):
"""Apply QA checks to temperature.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.temperature['messages'] = []
check = [0, 0]
# Create array of all temperatures
temp = np.array([])
checked = []
for transect in meas.transects:
if transect.checked:
checked.append(transect.checked)
temp_selected = getattr(transect.sensors.temperature_deg_c, transect.sensors.temperature_deg_c.selected)
if len(temp) == 0:
temp = temp_selected.data
else:
temp = np.hstack((temp, temp_selected.data))
# Check temperature range
if np.any(checked):
temp_range = np.nanmax(temp) - np.nanmin(temp)
else:
temp_range = 0
if temp_range > 2:
check[0] = 3
self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+ '%3.1f % temp_range'
+ 'degrees C which is greater than 2 degrees;', 1, 5])
elif temp_range > 1:
check[0] = 2
self.temperature['messages'].append(['TEMPERATURE: Temperature range is '
+ '%3.1f % temp_range'
+ 'degrees C which is greater than 1 degrees;', 2, 5])
else:
check[0] = 1
# Check for independent temperature reading
if 'user' in meas.ext_temp_chk:
try:
user = float(meas.ext_temp_chk['user'])
except (ValueError, TypeError) as e:
user = None
if user is None:
# No independent temperature reading
check[1] = 2
self.temperature['messages'].append(['Temperature: No independent temperature reading;', 2, 5])
elif meas.ext_temp_chk['adcp']:
# Compare user to manually entered ADCP temperature
diff = np.abs(user - meas.ext_temp_chk['adcp'])
if diff < 2:
check[1] = 1
else:
check[1] = 3
self.temperature['messages'].append(
['TEMP.: The difference between ADCP and reference is > 2: ' + '%3.1f % diff' + ' C;', 1, 5])
else:
# Compare user to mean of all temperature data
diff = np.abs(user - np.nanmean(temp))
if diff < 2:
check[1] = 1
else:
check[1] = 3
self.temperature['messages'].append(
['TEMP.: The difference between ADCP and reference is > 2: ' + '%3.1f % diff' + ' C;', 1, 5])
# Assign temperature status
max_check = max(check)
if max_check == 1:
self.temperature['status'] = 'good'
elif max_check == 2:
self.temperature['status'] = 'caution'
elif max_check == 3:
self.temperature['status'] = 'warning'
def moving_bed_qa(self, meas):
"""Applies quality checks to moving-bed tests.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.moving_bed['messages'] = []
self.moving_bed['code'] = 0
# Are there moving-bed tests?
if len(meas.mb_tests) < 1:
# No moving-bed test
self.moving_bed['messages'].append(['MOVING-BED TEST: No moving bed test;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Moving-bed tests available
mb_data = meas.mb_tests
# Are tests valid according to the user
user_valid_test = []
file_names = []
idx_selected = []
test_quality = []
mb_tests = []
mb = []
mb_test_type = []
loop = []
for n, test in enumerate(mb_data):
if test.user_valid:
user_valid_test.append(True)
file_names.append(test.transect.file_name)
if test.type == 'Loop' and not test.test_quality == 'Errors':
loop.append(test.moving_bed)
# Selected test
if test.selected:
idx_selected.append(n)
test_quality.append(test.test_quality)
mb_tests.append(test)
mb.append(test.moving_bed)
mb_test_type.append(test.type)
else:
user_valid_test.append(False)
if not any(user_valid_test):
# No valid test according to user
self.moving_bed['messages'].append(['MOVING-BED TEST: No valid moving-bed test based on user input;',
1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Check for duplicate valid moving-bed tests
if len(np.unique(file_names)) < len(file_names):
self.moving_bed['messages'].append([
'MOVING-BED TEST: Duplicate moving-bed test files marked valid;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
if self.moving_bed['code'] == 0:
# Check test quality
if len(test_quality) > 0 and sum(np.array(test_quality) == 'Good') > 0:
self.moving_bed['status'] = 'good'
self.moving_bed['code'] = 1
# Check if there is a moving-bed
if any(mb):
# Moving-bed present
self.moving_bed['messages'].append(
['Moving-Bed Test: A moving-bed is present, use GPS or moving-bed correction;', 2, 6])
self.moving_bed['code'] = 2
self.moving_bed['status'] = 'caution'
# Check for test type
if sum(np.array(mb_test_type) == 'Stationary'):
# Check for GPS or 3 stationary tests
if len(mb_tests) < 3:
gps = []
for transect in meas.transects:
if transect.checked:
if transect.gps is None:
gps.append(False)
else:
gps.append(True)
if not all(gps):
# GPS not available for all selected transects
self.moving_bed['messages'].append([
'Moving-Bed Test: '
+ 'Less than 3 stationary tests available for moving-bed correction;',
2, 6])
elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Warnings') > 0:
# Quality check has warnings
self.moving_bed['messages'].append(['Moving-Bed Test: The moving-bed test(s) has warnings, '
+ 'please review tests to determine validity;', 2, 6])
self.moving_bed['status'] = 'caution'
self.moving_bed['code'] = 2
elif len(test_quality) > 0 and sum(np.array(test_quality) == 'Manual') > 0:
# Manual override used
self.moving_bed['messages'].append(['MOVING-BED TEST: '
+ 'The user has manually forced the use of some tests;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
else:
# Test has critical errors
self.moving_bed['messages'].append(['MOVING-BED TEST: The moving-bed test(s) have critical errors '
+ 'and will not be used;', 1, 6])
self.moving_bed['status'] = 'warning'
self.moving_bed['code'] = 3
# Check multiple loops for consistency
if len(np.unique(loop)) > 1:
self.moving_bed['messages'].append(['Moving-Bed Test: Results of valid loops are not consistent, '
+ 'review moving-bed tests;', 2, 6])
if self.moving_bed['code'] < 3:
self.moving_bed['code'] = 2
self.moving_bed['status'] = 'caution'
def user_qa(self, meas):
"""Apply quality checks to user input data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.user['messages'] = []
self.user['status'] = 'good'
# Check for Station Name
self.user['sta_name'] = False
if meas.station_name is None:
self.user['messages'].append(['Site Info: Station name not entered;', 2, 2])
self.user['status'] = 'caution'
self.user['sta_name'] = True
# Check for Station Number
self.user['sta_number'] = False
if meas.station_number is None:
self.user['messages'].append(['Site Info: Station number not entered;', 2, 2])
self.user['status'] = 'caution'
self.user['sta_name'] = True
def depths_qa(self, meas):
"""Apply quality checks to depth data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize variables
n_transects = len(meas.transects)
self.depths['q_total'] = np.tile(np.nan, n_transects)
self.depths['q_max_run'] = np.tile(np.nan, n_transects)
self.depths['q_total_caution'] = np.tile(False, n_transects)
self.depths['q_run_caution'] = np.tile(False, n_transects)
self.depths['q_total_warning'] = np.tile(False, n_transects)
self.depths['q_run_warning'] = np.tile(False, n_transects)
self.depths['all_invalid'] = np.tile(False, n_transects)
self.depths['messages'] = []
self.depths['status'] = 'good'
self.depths['draft'] = 0
checked = []
drafts = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
in_transect_idx = transect.in_transect_idx
depths_selected = getattr(transect.depths, transect.depths.selected)
drafts.append(depths_selected.draft_use_m)
# Determine valid measured depths
if transect.depths.composite:
depth_na = depths_selected.depth_source_ens[in_transect_idx] != 'NA'
depth_in = depths_selected.depth_source_ens[in_transect_idx] != 'IN'
depth_valid = np.all(np.vstack((depth_na, depth_in)), 0)
else:
depth_valid_temp = depths_selected.valid_data[in_transect_idx]
depth_nan = depths_selected.depth_processed_m[in_transect_idx] != np.nan
depth_valid = np.all(np.vstack((depth_nan, depth_valid_temp)), 0)
if not np.any(depth_valid):
self.depths['all_invalid'][n] = True
# Compute QA characteristics
q_total, q_max_run, number_invalid_ensembles = QAData.invalid_qa(depth_valid, meas.discharge[n])
self.depths['q_total'][n] = q_total
self.depths['q_max_run'][n] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Apply total interpolated discharge threshold
if q_total_percent > self.q_total_threshold_warning:
self.depths['q_total_warning'][n] = True
elif q_total_percent > self.q_total_threshold_caution:
self.depths['q_total_caution'][n] = True
# Apply interpolated discharge run thresholds
if q_max_run_percent > self.q_run_threshold_warning:
self.depths['q_run_warning'][n] = True
elif q_max_run_percent > self.q_run_threshold_caution:
self.depths['q_run_caution'][n] = True
if checked:
# Create array of all unique draft values
draft_check = np.unique(np.round(drafts, 3))
# Check draft consistency
if len(draft_check) > 1:
self.depths['status'] = 'caution'
self.depths['draft'] = 1
self.depths['messages'].append(['Depth: Transducer depth is not consistent among transects;', 2, 10])
# Check for zero draft
if np.any(np.less(draft_check, 0.01)):
self.depths['status'] = 'warning'
self.depths['draft'] = 2
self.depths['messages'].append(['DEPTH: Transducer depth is too shallow, likely 0;', 1, 10])
# Check consecutive interpolated discharge criteria
if np.any(self.depths['q_run_warning']):
self.depths['messages'].append(['DEPTH: Int. Q for consecutive invalid ensembles exceeds '
+ '%2.0f % self.q_run_threshold_warning' + '%;', 1, 10])
self.depths['status'] = 'warning'
elif np.any(self.depths['q_run_caution']):
self.depths['messages'].append(['Depth: Int. Q for consecutive invalid ensembles exceeds '
+ '%2.0f % self.q_run_threshold_caution' + '%;', 2, 10])
self.depths['status'] = 'caution'
# Check total interpolated discharge criteria
if np.any(self.depths['q_total_warning']):
self.depths['messages'].append(['DEPTH: Int. Q for invalid ensembles in a transect exceeds '
+ '%2.0f % self.q_total_threshold_warning' + '%;', 1, 10])
self.depths['status'] = 'warning'
elif np.any(self.depths['q_total_caution']):
self.depths['messages'].append(['Depth: Int. Q for invalid ensembles in a transect exceeds '
+ '%2.0f % self.q_total_threshold_caution' + '%;', 2, 10])
self.depths['status'] = 'caution'
# Check if all depths are invalid
if np.any(self.depths['all_invalid']):
self.depths['messages'].append(['DEPTH: There are no valid depths for one or more transects.', 2, 10])
self.depths['status'] = 'warning'
else:
self.depths['status'] = 'inactive'
def boat_qa(self, meas):
"""Apply quality checks to boat data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize variables
n_transects = len(meas.transects)
data_type = {'BT': {'class': 'bt_vel', 'warning': 'BT-', 'caution': 'bt-',
'filter': [('All: ', 0), ('Original: ', 1), ('ErrorVel: ', 2),
('VertVel: ', 3), ('Other: ', 4), ('3Beams: ', 5)]},
'GGA': {'class': 'gga_vel', 'warning': 'GGA-', 'caution': 'gga-',
'filter': [('All: ', 0), ('Original: ', 1), ('DGPS: ', 2),
('Altitude: ', 3), ('Other: ', 4), ('HDOP: ', 5)]},
'VTG': {'class': 'vtg_vel', 'warning': 'VTG-', 'caution': 'vtg-',
'filter': [('All: ', 0), ('Original: ', 1), ('HDOP: ', 5)]}}
for dt_key, dt_value in data_type.items():
boat = getattr(self, dt_value['class'])
# Initialize dictionaries for each data type
boat['q_total_caution'] = np.tile(False, (n_transects, 6))
boat['q_max_run_caution'] = np.tile(False, (n_transects, 6))
boat['q_total_warning'] = np.tile(False, (n_transects, 6))
boat['q_max_run_warning'] = np.tile(False, (n_transects, 6))
boat['all_invalid'] = np.tile(False, n_transects)
boat['q_total'] = np.tile(np.nan, (n_transects, 6))
boat['q_max_run'] = np.tile(np.nan, (n_transects, 6))
boat['messages'] = []
status_switch = 0
avg_speed_check = 0
# Check the results of each filter
for dt_filter in dt_value['filter']:
boat['status'] = 'inactive'
# Quality check each transect
for n, transect in enumerate(meas.transects):
# Evaluate on transects used in the discharge computation
if transect.checked:
in_transect_idx = transect.in_transect_idx
# Check to see if data are available for the data_type
if getattr(transect.boat_vel, dt_value['class']) is not None:
boat['status'] = 'good'
# Compute quality characteristics
valid = getattr(transect.boat_vel, dt_value['class']).valid_data[dt_filter[1],
in_transect_idx]
q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
boat['q_total'][n, dt_filter[1]] = q_total
boat['q_max_run'][n, dt_filter[1]] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Check if all invalid
if dt_filter[1] == 0 and not np.any(valid):
boat['all_invalid'][n] = True
# Apply total interpolated discharge threshold
if q_total_percent > self.q_total_threshold_warning:
boat['q_total_warning'][n, dt_filter[1]] = True
elif q_total_percent > self.q_total_threshold_caution:
boat['q_total_caution'][n, dt_filter[1]] = True
# Apply interpolated discharge run thresholds
if q_max_run_percent > self.q_run_threshold_warning:
boat['q_max_run_warning'][n, dt_filter[1]] = True
elif q_max_run_percent > self.q_run_threshold_caution:
boat['q_max_run_caution'][n, dt_filter[1]] = True
# Check boat velocity for vtg data
if dt_key is 'VTG' and transect.boat_vel.selected is 'vtg_vel' and avg_speed_check == 0:
avg_speed = np.nanmean((transect.boat_vel.vtg_vel.u_mps**2
+ transect.boat_vel.vtg_vel.v_mps**2)**0.5)
if avg_speed < 0.24:
boat['q_total_caution'][n, dt_filter[1]] = True
boat['messages'].append(
['vtg-AvgSpeed: VTG data may not be accurate for average boat speed less than'
+ '0.24 m/s (0.8 ft/s);', 2, 8])
avg_speed_check = 1
# Create message for consecutive invalid discharge
if boat['q_max_run_warning'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_filter[0] +
'Int. Q for consecutive invalid ensembles exceeds ' +
'%3.1f' % self.q_run_threshold_warning + '%;', 1, module_code])
status_switch = 2
elif boat['q_max_run_caution'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['caution'] + dt_filter[0] +
'Int. Q for consecutive invalid ensembles exceeds ' +
'%3.1f' % self.q_run_threshold_caution + '%;', 2, module_code])
if status_switch < 1:
status_switch = 1
# Create message for total invalid discharge
if boat['q_total_warning'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_filter[0] +
'Int. Q for invalid ensembles in a transect exceeds ' +
'%3.1f' % self.q_total_threshold_warning + '%;', 1, module_code])
status_switch = 2
elif boat['q_max_run_caution'][:, dt_filter[1]].any():
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['caution'] + dt_filter[0] +
'Int. Q for invalid ensembles in a transect exceeds ' +
'%3.1f' % self.q_total_threshold_caution + '%;', 2, module_code])
if status_switch < 1:
status_switch = 1
# Create message for all data invalid
if boat['all_invalid'].any():
boat['status'] = 'warning'
if dt_key is 'BT':
module_code = 7
else:
module_code = 8
boat['messages'].append(
[dt_value['warning'] + dt_value['filter'][0][0] +
'There are no valid data for one or more transects.;', 1, module_code])
# Set status
if status_switch == 2:
boat['status'] = 'warning'
elif status_switch == 1:
boat['status'] = 'caution'
setattr(self, dt_value['class'], boat)
def water_qa(self, meas):
"""Apply quality checks to water data.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Initialize filter labels and indices
prefix = ['All: ', 'Original: ', 'ErrorVel: ', 'VertVel: ', 'Other: ', '3Beams: ', 'SNR:']
if meas.transects[0].adcp.manufacturer is 'TRDI':
filter_index = [0, 1, 2, 3, 4, 5]
else:
filter_index = [0, 1, 2, 3, 4, 5, 7]
n_transects = len(meas.transects)
n_filters = len(filter_index) + 1
# Initialize dictionaries for each data type
self.w_vel['q_total_caution'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_max_run_caution'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_total_warning'] = np.tile(False, (n_transects, n_filters))
self.w_vel['q_max_run_warning'] = np.tile(False, (n_transects, n_filters))
self.w_vel['all_invalid'] = np.tile(False, n_transects)
self.w_vel['q_total'] = np.tile(np.nan, (n_transects, n_filters))
self.w_vel['q_max_run'] = np.tile(np.nan, (n_transects, n_filters))
self.w_vel['messages'] = []
status_switch = 0
# TODO if meas had a property checked as list it would save creating that list multiple times
checked = []
for transect in meas.transects:
checked.append(transect.checked)
# At least one transect is being used to compute discharge
if any(checked):
# Loop through filters
for prefix_idx, filter_idx in enumerate(filter_index):
# Loop through transects
for n, transect in enumerate(meas.transects):
if transect.checked:
valid_original = np.any(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)
# Determine what data each filter have marked invalid. Original invalid data are excluded
valid = np.any(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)
if filter_idx > 1:
valid_int = valid.astype(int) - valid_original.astype(int)
valid = valid_int != -1
# Check if all data are invalid
if filter_idx == 0:
if np.nansum(valid.astype(int)) < 1:
self.w_vel['all_invalid'][n] = True
# TODO seems like the rest of this should be under else of all invalid or multiple messages
# generated.
# Compute characteristics
q_total, q_max_run, number_invalid_ens = QAData.invalid_qa(valid, meas.discharge[n])
self.w_vel['q_total'][n, filter_idx] = q_total
self.w_vel['q_max_run'][n, filter_idx] = q_max_run
# Compute percentage compared to total
q_total_percent = np.abs((q_total / meas.discharge[n].total) * 100)
q_max_run_percent = np.abs((q_max_run / meas.discharge[n].total) * 100)
# Check total invalid discharge in ensembles for warning
if q_total_percent > self.q_total_threshold_warning:
self.w_vel['q_total_warning'][n, filter_idx] = True
# Apply run or cluster thresholds
if q_max_run_percent > self.q_run_threshold_warning:
self.w_vel['q_max_run_warning'][n, filter_idx] = True
elif q_max_run_percent > self.q_run_threshold_caution:
self.w_vel['q_max_run_caution'][n, filter_idx] = True
# Compute percent discharge interpolated for both cells and ensembles
# This approach doesn't exclude original data
valid_cells = transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T
q_invalid_total = np.nansum(meas.discharge[n].middle_cells[np.logical_not(valid_cells)]) \
+ np.nansum(meas.discharge[n].top_ens[np.logical_not(valid)]) \
+ np.nansum(meas.discharge[n].bottom_ens[np.logical_not(valid)])
q_invalid_total_percent = (q_invalid_total / meas.discharge[n].total) * 100
if q_invalid_total_percent > self.q_total_threshold_caution:
self.w_vel['q_total_caution'][n, filter_idx] = True
# Generate messages for ensemble run or clusters
if np.any(self.w_vel['q_max_run_warning'][:, filter_idx]):
self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+ 'Int. Q for consecutive invalid ensembles exceeds '
+ '%3.0f' % self.q_run_threshold_warning
+ '%;', 1, 11])
status_switch = 2
elif np.any(self.w_vel['q_max_run_caution'][:, filter_idx]):
self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+ 'Int. Q for consecutive invalid ensembles exceeds '
+ '%3.0f' % self.q_run_threshold_caution
+ '%;', 2, 11])
if status_switch < 1:
status_switch = 1
# Generate message for total_invalid Q
if np.any(self.w_vel['q_total_warning'][:, filter_idx]):
self.w_vel['messages'].append(['WT-' + prefix[prefix_idx]
+ 'Int. Q for invalid ensembles in a transect exceeds '
+ '%3.0f' % self.q_total_threshold_warning
+ '%;', 1, 11])
status_switch = 2
elif np.any(self.w_vel['q_total_caution'][:, filter_idx]):
self.w_vel['messages'].append(['wt-' + prefix[prefix_idx]
+ 'Int. Q for invalid cells and ensembles in a transect exceeds '
+ '%3.0f' % self.q_total_threshold_caution
+ '%;', 2, 11])
if status_switch < 1:
status_switch = 1
# Generate message for all invalid
if np.any(self.w_vel['all_invalid']):
self.w_vel['messages'].append(['WT-', prefix[0], 'There are no valid data for one or more transects.',
1, 11])
status_switch = 2
# Set status
self.w_vel['status'] = 'good'
if status_switch == 2:
self.w_vel['status'] = 'warning'
elif status_switch == 1:
self.w_vel['status'] = 'caution'
else:
self.w_vel['status'] = 'inactive'
def extrapolation_qa(self, meas):
"""Apply quality checks to extrapolation methods
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.extrapolation['messages'] = []
checked = []
discharges = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
discharges.append(meas.discharge[n])
if any(checked):
self.extrapolation['status'] = 'good'
extrap_uncertainty = Uncertainty.uncertainty_extrapolation(meas, discharges)
if np.abs(extrap_uncertainty) > 2:
self.extrapolation['messages'].append(['Extrapolation: The extrapolation uncertainty is more than '
+ '2 percent;', 2, 12])
self.extrapolation['messages'].append([' Carefully review the extrapolation;', 2, 12])
self.extrapolation['status'] = 'caution'
else:
self.extrapolation['status'] = 'inactive'
def edges_qa(self, meas):
"""Apply quality checks to edge estimates
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Intialize variables
self.edges['messages'] = []
checked = []
left_q = []
right_q = []
total_q = []
edge_dist_left = []
edge_dist_right = []
dist_moved_left = []
dist_moved_right = []
dist_made_good = []
left_type = []
right_type = []
for n, transect in enumerate(meas.transects):
checked.append(transect.checked)
if transect.checked:
left_q.append(meas.discharge[n].left)
right_q.append(meas.discharge[n].right)
total_q.append(meas.discharge[n].total)
dmr, dml, dmg = QAData.edge_distance_moved(transect)
dist_moved_right.append(dmr)
dist_moved_left.append(dml)
dist_made_good.append(dmg)
edge_dist_left.append(transect.edges.left.distance_m)
edge_dist_right.append(transect.edges.right.distance_m)
left_type.append(transect.edges.left.type)
right_type.append(transect.edges.right.type)
if any(checked):
# Set default status to good
self.edges['status'] = 'good'
# Check left edge q > 5%
self.edges['left_q'] = 0
left_q_percent = (np.nanmean(left_q) / np.nanmean(total_q)) * 100
if np.abs(left_q_percent) > 5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Left edge Q is greater than 5%;', 2, 13])
self.edges['left_q'] = 1
# Check right edge q > 5%
self.edges['right_q'] = 0
right_q_percent = (np.nanmean(right_q) / np.nanmean(total_q)) * 100
if np.abs(right_q_percent) > 5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Right edge Q is greater than 5%;', 2, 13])
self.edges['right_q'] = 1
# Check for consistent sign
q_positive = []
self.edges['left_sign'] = 0
for q in left_q:
if q >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1 and left_q_percent > 0.5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Sign of left edge Q is not consistent;', 2, 13])
self.edges['left_sign'] = 1
q_positive = []
self.edges['right_sign'] = 0
for q in right_q:
if q >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1 and right_q_percent > 0.5:
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Sign of right edge Q is not consistent;', 2, 13])
self.edges['right_sign'] = 1
# Check distance moved
dmg_5_percent = 0.05 * np.nanmean(dist_made_good)
avg_right_edge_dist = np.nanmean(edge_dist_right)
right_threshold = np.nanmin([dmg_5_percent, avg_right_edge_dist])
self.edges['right_dist_moved_idx'] = np.where(dist_moved_right > right_threshold)[0]
if np.any(self.edges['right_dist_moved_idx']):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Excessive boat movement in right edge ensembles;', 2, 13])
avg_left_edge_dist = np.nanmean(edge_dist_left)
left_threshold = np.nanmin([dmg_5_percent, avg_left_edge_dist])
self.edges['left_dist_moved_idx'] = np.where(dist_moved_left > left_threshold)[0]
if np.any(self.edges['left_dist_moved_idx']):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: Excessive boat movement in left edge ensembles;', 2, 13])
# Check for edge ensembles marked invalid due to excluded distance
for transect in meas.transects:
if transect.checked:
ens_sum_excluded_data = np.nansum(transect.w_vel.valid_data[6, :, :], 0)
cells_above_sl = np.nansum(transect.w_vel.cells_above_sl, 0)
ens_excluded_data = np.not_equal(ens_sum_excluded_data, cells_above_sl)
if any(ens_excluded_data):
self.edges['status'] = 'caution'
self.edges['messages'].append(['Edges: The excluded distance caused invalid ensembles '
+ 'in an edge, check edge distance;', 2, 13])
break
# Check edges for zero discharge
self.edges['left_zero'] = 0
left_zero_idx = np.where(left_q == 0)[0]
if left_zero_idx:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Left edge has zero Q;', 1, 13])
self.edges['left_zero'] = 2
self.edges['right_zero'] = 0
right_zero_idx = np.where(right_q == 0)[0]
if right_zero_idx:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Right edge has zero Q;', 1, 13])
self.edges['right_zero'] = 2
# Check consistent edge type
self.edges['left_type'] = 0
if len(np.unique(left_type)) > 1:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Left edge type is not consistent;', 1, 13])
self.edges['left_type'] = 2
self.edges['right_type'] = 0
if len(np.unique(right_type)) > 1:
self.edges['status'] = 'warning'
self.edges['messages'].append(['EDGES: Right edge type is not consistent;', 1, 13])
self.edges['right_type'] = 2
else:
self.edges['status'] = 'inactive'
@staticmethod
def invalid_qa(valid, discharge):
"""Computes the total invalid discharge in ensembles that have invalid data. The function also computes
the maximum run or cluster of ensembles with the maximum interpolated discharge.
Parameters
----------
valid: np.array(bool)
Array identifying valid and invalid ensembles.
discharge: QComp
Object of class QComp
Returns
-------
q_invalid_total: float
Total interpolated discharge in invalid ensembles
q_invalid_max_run: float
Maximum interpolated discharge in a run or cluster of invalid ensembles
ens_invalid: int
Total number of invalid ensembles
"""
# Create bool for invalid data
invalid = np.logical_not(valid)
q_invalid_total = np.nansum(discharge.middle_ens[invalid]) + np.nansum(discharge.top_ens[invalid]) \
+ np.nansum(discharge.bottom_ens[invalid])
# Compute total number of invalid ensembles
ens_invalid = np.sum(invalid)
# Compute the indices of where changes occur
valid_int = np.insert(valid.astype(int), 0, -1)
valid_int = np.append(valid_int, -1)
valid_run = np.where(np.diff(valid_int) != 0)[0]
run_length = np.diff(valid_run)
run_length0 = run_length[(valid[0] == 1)::2]
n_runs = len(run_length0)
if valid[0] is True:
n_start = 1
else:
n_start = 0
n_end = len(valid_run)-1
if n_runs > 1:
m = 0
q_invalid_run = []
for n in range(n_start, n_end, 2):
m += 1
idx_start = valid_run[n]
idx_end = valid_run[n+1]
q_invalid_run.append(np.nansum(discharge.middle_ens[idx_start:idx_end])
+ np.nansum(discharge.top_ens[idx_start:idx_end])
+ np.nansum(discharge.bottom_ens[idx_start:idx_end]))
# Determine the maximum discharge in a single run
q_invalid_max_run = np.nanmax(np.abs(q_invalid_run))
else:
q_invalid_max_run = 0
return q_invalid_total, q_invalid_max_run, ens_invalid
@staticmethod
def edge_distance_moved(transect):
"""Computes the boat movement during edge ensemble collection.
Parameters
----------
transect: Transect
Object of class Transect
Returns
-------
right_dist_moved: float
Distance in m moved during collection of right edge samples
left_dist_moved: float
Distance in m moved during collection of left edge samples
dmg: float
Distance made good for the entire transect
"""
boat_selected = getattr(transect.boat_vel, transect.boat_vel.selected)
ens_duration = transect.date_time.ens_duration_sec
# Get boat velocities
if boat_selected is not None:
u_processed = boat_selected.u_processed_mps
v_processed = boat_selected.v_processed_mps
else:
u_processed = np.tile(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)
v_processed = np.tile(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)
# Compute boat coordinates
x_processed = np.nancumsum(u_processed * ens_duration)
y_processed = np.nancumsum(v_processed * ens_duration)
dmg = (x_processed[-1]**2 + y_processed[-1]**2)**0.5
# Compute left distance moved
# TODO should be a dist moved function
left_edge_idx = QComp.edge_ensembles('left', transect)
if len(left_edge_idx) > 0:
boat_x = x_processed[left_edge_idx[-1]] - x_processed[left_edge_idx[0]]
boat_y = y_processed[left_edge_idx[-1]] - y_processed[left_edge_idx[0]]
left_dist_moved = (boat_x**2 + boat_y**2)**0.5
else:
left_dist_moved = np.nan
# Compute right distance moved
right_edge_idx = QComp.edge_ensembles('right', transect)
if len(right_edge_idx) > 0:
boat_x = x_processed[right_edge_idx[-1]] - x_processed[right_edge_idx[0]]
boat_y = y_processed[right_edge_idx[-1]] - y_processed[right_edge_idx[0]]
right_dist_moved = (boat_x ** 2 + boat_y ** 2) ** 0.5
else:
right_dist_moved = np.nan
return right_dist_moved, left_dist_moved, dmg
|
[
"numpy.sum",
"numpy.abs",
"numpy.greater",
"numpy.isnan",
"numpy.tile",
"numpy.round",
"numpy.unique",
"numpy.nanmean",
"Classes.Uncertainty.Uncertainty.uncertainty_extrapolation",
"numpy.logical_not",
"numpy.append",
"Classes.QComp.QComp.edge_ensembles",
"numpy.less",
"numpy.nansum",
"Classes.Uncertainty.Uncertainty.uncertainty_q_random",
"numpy.asarray",
"numpy.not_equal",
"numpy.hstack",
"numpy.nanmax",
"numpy.vstack",
"numpy.nanstd",
"numpy.nancumsum",
"numpy.nanmin",
"numpy.any",
"numpy.diff",
"numpy.array",
"numpy.where"
] |
[((11642, 11657), 'numpy.any', 'np.any', (['checked'], {}), '(checked)\n', (11648, 11657), True, 'import numpy as np\n'), ((23573, 23585), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (23581, 23585), True, 'import numpy as np\n'), ((24064, 24079), 'numpy.any', 'np.any', (['checked'], {}), '(checked)\n', (24070, 24079), True, 'import numpy as np\n'), ((33643, 33671), 'numpy.tile', 'np.tile', (['np.nan', 'n_transects'], {}), '(np.nan, n_transects)\n', (33650, 33671), True, 'import numpy as np\n'), ((33707, 33735), 'numpy.tile', 'np.tile', (['np.nan', 'n_transects'], {}), '(np.nan, n_transects)\n', (33714, 33735), True, 'import numpy as np\n'), ((33777, 33804), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33784, 33804), True, 'import numpy as np\n'), ((33844, 33871), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33851, 33871), True, 'import numpy as np\n'), ((33913, 33940), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33920, 33940), True, 'import numpy as np\n'), ((33980, 34007), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (33987, 34007), True, 'import numpy as np\n'), ((34045, 34072), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (34052, 34072), True, 'import numpy as np\n'), ((47610, 47650), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47617, 47650), True, 'import numpy as np\n'), ((47693, 47733), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47700, 47733), True, 'import numpy as np\n'), ((47774, 47814), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47781, 47814), True, 'import numpy as np\n'), ((47857, 47897), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, n_filters)'], {}), '(False, (n_transects, n_filters))\n', (47864, 47897), True, 'import numpy as np\n'), ((47934, 47961), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (47941, 47961), True, 'import numpy as np\n'), ((47994, 48035), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, n_filters)'], {}), '(np.nan, (n_transects, n_filters))\n', (48001, 48035), True, 'import numpy as np\n'), ((48070, 48111), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, n_filters)'], {}), '(np.nan, (n_transects, n_filters))\n', (48077, 48111), True, 'import numpy as np\n'), ((62914, 62935), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (62928, 62935), True, 'import numpy as np\n'), ((63175, 63190), 'numpy.sum', 'np.sum', (['invalid'], {}), '(invalid)\n', (63181, 63190), True, 'import numpy as np\n'), ((63322, 63346), 'numpy.append', 'np.append', (['valid_int', '(-1)'], {}), '(valid_int, -1)\n', (63331, 63346), True, 'import numpy as np\n'), ((63425, 63443), 'numpy.diff', 'np.diff', (['valid_run'], {}), '(valid_run)\n', (63432, 63443), True, 'import numpy as np\n'), ((65519, 65559), 'numpy.nancumsum', 'np.nancumsum', (['(u_processed * ens_duration)'], {}), '(u_processed * ens_duration)\n', (65531, 65559), True, 'import numpy as np\n'), ((65582, 65622), 'numpy.nancumsum', 'np.nancumsum', (['(v_processed * ens_duration)'], {}), '(v_processed * ens_duration)\n', (65594, 65622), True, 'import numpy as np\n'), ((65794, 65832), 'Classes.QComp.QComp.edge_ensembles', 'QComp.edge_ensembles', (['"""left"""', 'transect'], {}), "('left', transect)\n", (65814, 65832), False, 'from Classes.QComp import QComp\n'), ((66211, 66250), 'Classes.QComp.QComp.edge_ensembles', 'QComp.edge_ensembles', (['"""right"""', 'transect'], {}), "('right', transect)\n", (66231, 66250), False, 'from Classes.QComp import QComp\n'), ((3937, 3956), 'numpy.asarray', 'np.asarray', (['checked'], {}), '(checked)\n', (3947, 3956), True, 'import numpy as np\n'), ((11795, 11808), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (11803, 11808), True, 'import numpy as np\n'), ((37224, 37260), 'numpy.any', 'np.any', (["self.depths['q_run_warning']"], {}), "(self.depths['q_run_warning'])\n", (37230, 37260), True, 'import numpy as np\n'), ((37911, 37949), 'numpy.any', 'np.any', (["self.depths['q_total_warning']"], {}), "(self.depths['q_total_warning'])\n", (37917, 37949), True, 'import numpy as np\n'), ((38602, 38636), 'numpy.any', 'np.any', (["self.depths['all_invalid']"], {}), "(self.depths['all_invalid'])\n", (38608, 38636), True, 'import numpy as np\n'), ((40045, 40077), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40052, 40077), True, 'import numpy as np\n'), ((40118, 40150), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40125, 40150), True, 'import numpy as np\n'), ((40189, 40221), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40196, 40221), True, 'import numpy as np\n'), ((40262, 40294), 'numpy.tile', 'np.tile', (['(False)', '(n_transects, 6)'], {}), '(False, (n_transects, 6))\n', (40269, 40294), True, 'import numpy as np\n'), ((40329, 40356), 'numpy.tile', 'np.tile', (['(False)', 'n_transects'], {}), '(False, n_transects)\n', (40336, 40356), True, 'import numpy as np\n'), ((40387, 40420), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, 6)'], {}), '(np.nan, (n_transects, 6))\n', (40394, 40420), True, 'import numpy as np\n'), ((40453, 40486), 'numpy.tile', 'np.tile', (['np.nan', '(n_transects, 6)'], {}), '(np.nan, (n_transects, 6))\n', (40460, 40486), True, 'import numpy as np\n'), ((53759, 53792), 'numpy.any', 'np.any', (["self.w_vel['all_invalid']"], {}), "(self.w_vel['all_invalid'])\n", (53765, 53792), True, 'import numpy as np\n'), ((54898, 54953), 'Classes.Uncertainty.Uncertainty.uncertainty_extrapolation', 'Uncertainty.uncertainty_extrapolation', (['meas', 'discharges'], {}), '(meas, discharges)\n', (54935, 54953), False, 'from Classes.Uncertainty import Uncertainty\n'), ((63059, 63099), 'numpy.nansum', 'np.nansum', (['discharge.bottom_ens[invalid]'], {}), '(discharge.bottom_ens[invalid])\n', (63068, 63099), True, 'import numpy as np\n'), ((65307, 65370), 'numpy.tile', 'np.tile', (['np.nan', 'transect.boat_vel.bt_vel.u_processed_mps.shape'], {}), '(np.nan, transect.boat_vel.bt_vel.u_processed_mps.shape)\n', (65314, 65370), True, 'import numpy as np\n'), ((65397, 65460), 'numpy.tile', 'np.tile', (['np.nan', 'transect.boat_vel.bt_vel.v_processed_mps.shape'], {}), '(np.nan, transect.boat_vel.bt_vel.v_processed_mps.shape)\n', (65404, 65460), True, 'import numpy as np\n'), ((12105, 12129), 'numpy.not_equal', 'np.not_equal', (['heading', '(0)'], {}), '(heading, 0)\n', (12117, 12129), True, 'import numpy as np\n'), ((24106, 24121), 'numpy.nanmax', 'np.nanmax', (['temp'], {}), '(temp)\n', (24115, 24121), True, 'import numpy as np\n'), ((24124, 24139), 'numpy.nanmin', 'np.nanmin', (['temp'], {}), '(temp)\n', (24133, 24139), True, 'import numpy as np\n'), ((35654, 35701), 'numpy.abs', 'np.abs', (['(q_total / meas.discharge[n].total * 100)'], {}), '(q_total / meas.discharge[n].total * 100)\n', (35660, 35701), True, 'import numpy as np\n'), ((35740, 35789), 'numpy.abs', 'np.abs', (['(q_max_run / meas.discharge[n].total * 100)'], {}), '(q_max_run / meas.discharge[n].total * 100)\n', (35746, 35789), True, 'import numpy as np\n'), ((36551, 36570), 'numpy.round', 'np.round', (['drafts', '(3)'], {}), '(drafts, 3)\n', (36559, 36570), True, 'import numpy as np\n'), ((36915, 36941), 'numpy.less', 'np.less', (['draft_check', '(0.01)'], {}), '(draft_check, 0.01)\n', (36922, 36941), True, 'import numpy as np\n'), ((37541, 37577), 'numpy.any', 'np.any', (["self.depths['q_run_caution']"], {}), "(self.depths['q_run_caution'])\n", (37547, 37577), True, 'import numpy as np\n'), ((38234, 38272), 'numpy.any', 'np.any', (["self.depths['q_total_caution']"], {}), "(self.depths['q_total_caution'])\n", (38240, 38272), True, 'import numpy as np\n'), ((51729, 51783), 'numpy.any', 'np.any', (["self.w_vel['q_max_run_warning'][:, filter_idx]"], {}), "(self.w_vel['q_max_run_warning'][:, filter_idx])\n", (51735, 51783), True, 'import numpy as np\n'), ((52743, 52795), 'numpy.any', 'np.any', (["self.w_vel['q_total_warning'][:, filter_idx]"], {}), "(self.w_vel['q_total_warning'][:, filter_idx])\n", (52749, 52795), True, 'import numpy as np\n'), ((54970, 54996), 'numpy.abs', 'np.abs', (['extrap_uncertainty'], {}), '(extrap_uncertainty)\n', (54976, 54996), True, 'import numpy as np\n'), ((57005, 57027), 'numpy.abs', 'np.abs', (['left_q_percent'], {}), '(left_q_percent)\n', (57011, 57027), True, 'import numpy as np\n'), ((58929, 58956), 'numpy.nanmean', 'np.nanmean', (['edge_dist_right'], {}), '(edge_dist_right)\n', (58939, 58956), True, 'import numpy as np\n'), ((58991, 59038), 'numpy.nanmin', 'np.nanmin', (['[dmg_5_percent, avg_right_edge_dist]'], {}), '([dmg_5_percent, avg_right_edge_dist])\n', (59000, 59038), True, 'import numpy as np\n'), ((59159, 59201), 'numpy.any', 'np.any', (["self.edges['right_dist_moved_idx']"], {}), "(self.edges['right_dist_moved_idx'])\n", (59165, 59201), True, 'import numpy as np\n'), ((59412, 59438), 'numpy.nanmean', 'np.nanmean', (['edge_dist_left'], {}), '(edge_dist_left)\n', (59422, 59438), True, 'import numpy as np\n'), ((59472, 59518), 'numpy.nanmin', 'np.nanmin', (['[dmg_5_percent, avg_left_edge_dist]'], {}), '([dmg_5_percent, avg_left_edge_dist])\n', (59481, 59518), True, 'import numpy as np\n'), ((59636, 59677), 'numpy.any', 'np.any', (["self.edges['left_dist_moved_idx']"], {}), "(self.edges['left_dist_moved_idx'])\n", (59642, 59677), True, 'import numpy as np\n'), ((62962, 63002), 'numpy.nansum', 'np.nansum', (['discharge.middle_ens[invalid]'], {}), '(discharge.middle_ens[invalid])\n', (62971, 63002), True, 'import numpy as np\n'), ((63005, 63042), 'numpy.nansum', 'np.nansum', (['discharge.top_ens[invalid]'], {}), '(discharge.top_ens[invalid])\n', (63014, 63042), True, 'import numpy as np\n'), ((64254, 64275), 'numpy.abs', 'np.abs', (['q_invalid_run'], {}), '(q_invalid_run)\n', (64260, 64275), True, 'import numpy as np\n'), ((6876, 6929), 'Classes.Uncertainty.Uncertainty.uncertainty_q_random', 'Uncertainty.uncertainty_q_random', (['discharges', '"""total"""'], {}), "(discharges, 'total')\n", (6908, 6929), False, 'from Classes.Uncertainty import Uncertainty\n'), ((19248, 19265), 'numpy.unique', 'np.unique', (['magvar'], {}), '(magvar)\n', (19257, 19265), True, 'import numpy as np\n'), ((19998, 20020), 'numpy.asarray', 'np.asarray', (['pitch_mean'], {}), '(pitch_mean)\n', (20008, 20020), True, 'import numpy as np\n'), ((20525, 20546), 'numpy.asarray', 'np.asarray', (['roll_mean'], {}), '(roll_mean)\n', (20535, 20546), True, 'import numpy as np\n'), ((21061, 21082), 'numpy.asarray', 'np.asarray', (['pitch_std'], {}), '(pitch_std)\n', (21071, 21082), True, 'import numpy as np\n'), ((21384, 21404), 'numpy.asarray', 'np.asarray', (['roll_std'], {}), '(roll_std)\n', (21394, 21404), True, 'import numpy as np\n'), ((23980, 24017), 'numpy.hstack', 'np.hstack', (['(temp, temp_selected.data)'], {}), '((temp, temp_selected.data))\n', (23989, 24017), True, 'import numpy as np\n'), ((25450, 25490), 'numpy.abs', 'np.abs', (["(user - meas.ext_temp_chk['adcp'])"], {}), "(user - meas.ext_temp_chk['adcp'])\n", (25456, 25490), True, 'import numpy as np\n'), ((35219, 35238), 'numpy.any', 'np.any', (['depth_valid'], {}), '(depth_valid)\n', (35225, 35238), True, 'import numpy as np\n'), ((52186, 52240), 'numpy.any', 'np.any', (["self.w_vel['q_max_run_caution'][:, filter_idx]"], {}), "(self.w_vel['q_max_run_caution'][:, filter_idx])\n", (52192, 52240), True, 'import numpy as np\n'), ((53202, 53254), 'numpy.any', 'np.any', (["self.w_vel['q_total_caution'][:, filter_idx]"], {}), "(self.w_vel['q_total_caution'][:, filter_idx])\n", (53208, 53254), True, 'import numpy as np\n'), ((56942, 56960), 'numpy.nanmean', 'np.nanmean', (['left_q'], {}), '(left_q)\n', (56952, 56960), True, 'import numpy as np\n'), ((56963, 56982), 'numpy.nanmean', 'np.nanmean', (['total_q'], {}), '(total_q)\n', (56973, 56982), True, 'import numpy as np\n'), ((57408, 57431), 'numpy.abs', 'np.abs', (['right_q_percent'], {}), '(right_q_percent)\n', (57414, 57431), True, 'import numpy as np\n'), ((58864, 58890), 'numpy.nanmean', 'np.nanmean', (['dist_made_good'], {}), '(dist_made_good)\n', (58874, 58890), True, 'import numpy as np\n'), ((59092, 59136), 'numpy.where', 'np.where', (['(dist_moved_right > right_threshold)'], {}), '(dist_moved_right > right_threshold)\n', (59100, 59136), True, 'import numpy as np\n'), ((59571, 59613), 'numpy.where', 'np.where', (['(dist_moved_left > left_threshold)'], {}), '(dist_moved_left > left_threshold)\n', (59579, 59613), True, 'import numpy as np\n'), ((60793, 60814), 'numpy.where', 'np.where', (['(left_q == 0)'], {}), '(left_q == 0)\n', (60801, 60814), True, 'import numpy as np\n'), ((61123, 61145), 'numpy.where', 'np.where', (['(right_q == 0)'], {}), '(right_q == 0)\n', (61131, 61145), True, 'import numpy as np\n'), ((63376, 63394), 'numpy.diff', 'np.diff', (['valid_int'], {}), '(valid_int)\n', (63383, 63394), True, 'import numpy as np\n'), ((4861, 4912), 'numpy.where', 'np.where', (['(transect.date_time.ens_duration_sec > 1.5)'], {}), '(transect.date_time.ens_duration_sec > 1.5)\n', (4869, 4912), True, 'import numpy as np\n'), ((7471, 7492), 'numpy.unique', 'np.unique', (['q_positive'], {}), '(q_positive)\n', (7480, 7492), True, 'import numpy as np\n'), ((16762, 16800), 'numpy.nanmean', 'np.nanmean', (['pitch_source_selected.data'], {}), '(pitch_source_selected.data)\n', (16772, 16800), True, 'import numpy as np\n'), ((16839, 16876), 'numpy.nanstd', 'np.nanstd', (['pitch_source_selected.data'], {}), '(pitch_source_selected.data)\n', (16848, 16876), True, 'import numpy as np\n'), ((16915, 16952), 'numpy.nanmean', 'np.nanmean', (['roll_source_selected.data'], {}), '(roll_source_selected.data)\n', (16925, 16952), True, 'import numpy as np\n'), ((16990, 17026), 'numpy.nanstd', 'np.nanstd', (['roll_source_selected.data'], {}), '(roll_source_selected.data)\n', (16999, 17026), True, 'import numpy as np\n'), ((20218, 20240), 'numpy.asarray', 'np.asarray', (['pitch_mean'], {}), '(pitch_mean)\n', (20228, 20240), True, 'import numpy as np\n'), ((20742, 20763), 'numpy.asarray', 'np.asarray', (['roll_mean'], {}), '(roll_mean)\n', (20752, 20763), True, 'import numpy as np\n'), ((28665, 28686), 'numpy.unique', 'np.unique', (['file_names'], {}), '(file_names)\n', (28674, 28686), True, 'import numpy as np\n'), ((32092, 32107), 'numpy.unique', 'np.unique', (['loop'], {}), '(loop)\n', (32101, 32107), True, 'import numpy as np\n'), ((34875, 34906), 'numpy.vstack', 'np.vstack', (['(depth_na, depth_in)'], {}), '((depth_na, depth_in))\n', (34884, 34906), True, 'import numpy as np\n'), ((35150, 35190), 'numpy.vstack', 'np.vstack', (['(depth_nan, depth_valid_temp)'], {}), '((depth_nan, depth_valid_temp))\n', (35159, 35190), True, 'import numpy as np\n'), ((48763, 48833), 'numpy.any', 'np.any', (['transect.w_vel.valid_data[1, :, transect.in_transect_idx].T', '(0)'], {}), '(transect.w_vel.valid_data[1, :, transect.in_transect_idx].T, 0)\n', (48769, 48833), True, 'import numpy as np\n'), ((48981, 49060), 'numpy.any', 'np.any', (['transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T', '(0)'], {}), '(transect.w_vel.valid_data[filter_idx, :, transect.in_transect_idx].T, 0)\n', (48987, 49060), True, 'import numpy as np\n'), ((50042, 50089), 'numpy.abs', 'np.abs', (['(q_total / meas.discharge[n].total * 100)'], {}), '(q_total / meas.discharge[n].total * 100)\n', (50048, 50089), True, 'import numpy as np\n'), ((50136, 50185), 'numpy.abs', 'np.abs', (['(q_max_run / meas.discharge[n].total * 100)'], {}), '(q_max_run / meas.discharge[n].total * 100)\n', (50142, 50185), True, 'import numpy as np\n'), ((57340, 57359), 'numpy.nanmean', 'np.nanmean', (['right_q'], {}), '(right_q)\n', (57350, 57359), True, 'import numpy as np\n'), ((57362, 57381), 'numpy.nanmean', 'np.nanmean', (['total_q'], {}), '(total_q)\n', (57372, 57381), True, 'import numpy as np\n'), ((60070, 60118), 'numpy.nansum', 'np.nansum', (['transect.w_vel.valid_data[6, :, :]', '(0)'], {}), '(transect.w_vel.valid_data[6, :, :], 0)\n', (60079, 60118), True, 'import numpy as np\n'), ((60160, 60203), 'numpy.nansum', 'np.nansum', (['transect.w_vel.cells_above_sl', '(0)'], {}), '(transect.w_vel.cells_above_sl, 0)\n', (60169, 60203), True, 'import numpy as np\n'), ((60248, 60299), 'numpy.not_equal', 'np.not_equal', (['ens_sum_excluded_data', 'cells_above_sl'], {}), '(ens_sum_excluded_data, cells_above_sl)\n', (60260, 60299), True, 'import numpy as np\n'), ((61491, 61511), 'numpy.unique', 'np.unique', (['left_type'], {}), '(left_type)\n', (61500, 61511), True, 'import numpy as np\n'), ((61791, 61812), 'numpy.unique', 'np.unique', (['right_type'], {}), '(right_type)\n', (61800, 61812), True, 'import numpy as np\n'), ((64097, 64147), 'numpy.nansum', 'np.nansum', (['discharge.bottom_ens[idx_start:idx_end]'], {}), '(discharge.bottom_ens[idx_start:idx_end])\n', (64106, 64147), True, 'import numpy as np\n'), ((5014, 5060), 'numpy.nansum', 'np.nansum', (['transect.date_time.ens_duration_sec'], {}), '(transect.date_time.ens_duration_sec)\n', (5023, 5060), True, 'import numpy as np\n'), ((25902, 25918), 'numpy.nanmean', 'np.nanmean', (['temp'], {}), '(temp)\n', (25912, 25918), True, 'import numpy as np\n'), ((41893, 41940), 'numpy.abs', 'np.abs', (['(q_total / meas.discharge[n].total * 100)'], {}), '(q_total / meas.discharge[n].total * 100)\n', (41899, 41940), True, 'import numpy as np\n'), ((41991, 42040), 'numpy.abs', 'np.abs', (['(q_max_run / meas.discharge[n].total * 100)'], {}), '(q_max_run / meas.discharge[n].total * 100)\n', (41997, 42040), True, 'import numpy as np\n'), ((57969, 57990), 'numpy.unique', 'np.unique', (['q_positive'], {}), '(q_positive)\n', (57978, 57990), True, 'import numpy as np\n'), ((58520, 58541), 'numpy.unique', 'np.unique', (['q_positive'], {}), '(q_positive)\n', (58529, 58541), True, 'import numpy as np\n'), ((63920, 63970), 'numpy.nansum', 'np.nansum', (['discharge.middle_ens[idx_start:idx_end]'], {}), '(discharge.middle_ens[idx_start:idx_end])\n', (63929, 63970), True, 'import numpy as np\n'), ((64010, 64057), 'numpy.nansum', 'np.nansum', (['discharge.top_ens[idx_start:idx_end]'], {}), '(discharge.top_ens[idx_start:idx_end])\n', (64019, 64057), True, 'import numpy as np\n'), ((5116, 5175), 'numpy.nansum', 'np.nansum', (['transect.date_time.ens_duration_sec[idx_missing]'], {}), '(transect.date_time.ens_duration_sec[idx_missing])\n', (5125, 5175), True, 'import numpy as np\n'), ((5698, 5743), 'numpy.isnan', 'np.isnan', (['transect.date_time.ens_duration_sec'], {}), '(transect.date_time.ens_duration_sec)\n', (5706, 5743), True, 'import numpy as np\n'), ((17704, 17765), 'numpy.where', 'np.where', (['(pitch_data > heading_source_selected.pitch_limit[0])'], {}), '(pitch_data > heading_source_selected.pitch_limit[0])\n', (17712, 17765), True, 'import numpy as np\n'), ((17807, 17868), 'numpy.where', 'np.where', (['(pitch_data < heading_source_selected.pitch_limit[1])'], {}), '(pitch_data < heading_source_selected.pitch_limit[1])\n', (17815, 17868), True, 'import numpy as np\n'), ((18464, 18524), 'numpy.where', 'np.where', (['(roll_data > heading_source_selected.pitch_limit[0])'], {}), '(roll_data > heading_source_selected.pitch_limit[0])\n', (18472, 18524), True, 'import numpy as np\n'), ((18566, 18626), 'numpy.where', 'np.where', (['(roll_data < heading_source_selected.pitch_limit[1])'], {}), '(roll_data < heading_source_selected.pitch_limit[1])\n', (18574, 18626), True, 'import numpy as np\n'), ((18965, 19012), 'numpy.where', 'np.where', (['(heading_source_selected.mag_error > 2)'], {}), '(heading_source_selected.mag_error > 2)\n', (18973, 19012), True, 'import numpy as np\n'), ((29099, 29121), 'numpy.array', 'np.array', (['test_quality'], {}), '(test_quality)\n', (29107, 29121), True, 'import numpy as np\n'), ((29739, 29761), 'numpy.array', 'np.array', (['mb_test_type'], {}), '(mb_test_type)\n', (29747, 29761), True, 'import numpy as np\n'), ((43257, 43358), 'numpy.nanmean', 'np.nanmean', (['((transect.boat_vel.vtg_vel.u_mps ** 2 + transect.boat_vel.vtg_vel.v_mps **\n 2) ** 0.5)'], {}), '((transect.boat_vel.vtg_vel.u_mps ** 2 + transect.boat_vel.\n vtg_vel.v_mps ** 2) ** 0.5)\n', (43267, 43358), True, 'import numpy as np\n'), ((5341, 5400), 'numpy.nansum', 'np.nansum', (['transect.date_time.ens_duration_sec[idx_missing]'], {}), '(transect.date_time.ens_duration_sec[idx_missing])\n', (5350, 5400), True, 'import numpy as np\n'), ((9798, 9841), 'numpy.greater', 'np.greater', (['corr_table[3:, :]', 'qa_threshold'], {}), '(corr_table[3:, :], qa_threshold)\n', (9808, 9841), True, 'import numpy as np\n'), ((9971, 10024), 'numpy.greater', 'np.greater', (['corr_table[7, :]', '(corr_table[0, :] * 0.25)'], {}), '(corr_table[7, :], corr_table[0, :] * 0.25)\n', (9981, 10024), True, 'import numpy as np\n'), ((30767, 30789), 'numpy.array', 'np.array', (['test_quality'], {}), '(test_quality)\n', (30775, 30789), True, 'import numpy as np\n'), ((42152, 42165), 'numpy.any', 'np.any', (['valid'], {}), '(valid)\n', (42158, 42165), True, 'import numpy as np\n'), ((51354, 51375), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (51368, 51375), True, 'import numpy as np\n'), ((31241, 31263), 'numpy.array', 'np.array', (['test_quality'], {}), '(test_quality)\n', (31249, 31263), True, 'import numpy as np\n'), ((51161, 51188), 'numpy.logical_not', 'np.logical_not', (['valid_cells'], {}), '(valid_cells)\n', (51175, 51188), True, 'import numpy as np\n'), ((51259, 51280), 'numpy.logical_not', 'np.logical_not', (['valid'], {}), '(valid)\n', (51273, 51280), True, 'import numpy as np\n'), ((10190, 10209), 'numpy.sum', 'np.sum', (['lag_7_check'], {}), '(lag_7_check)\n', (10196, 10209), True, 'import numpy as np\n'), ((10165, 10186), 'numpy.sum', 'np.sum', (['all_lag_check'], {}), '(all_lag_check)\n', (10171, 10186), True, 'import numpy as np\n')]
|
from utils import *
from chinese_checkers.TinyChineseCheckersGame import ChineseCheckersGame
from chinese_checkers.tensorflow.ResNet import NNetWrapper as nn
from chinese_checkers.Evaluator import Evaluator
from MCTS import MCTS
from chinese_checkers.InitializeAgent import InitializeAgent
from chinese_checkers.GreedyAgent import GreedyAgent
from chinese_checkers.TinyGUI import GUI
import numpy as np
args = dotdict({
'numMCTSSims': 2,
'cpuct': 15,
'max_steps': 600,
'load_folder_file': ('checkpoint', 41),
})
args2 = dotdict({
'numMCTSSims': 200,
'cpuct': 15,
'max_steps': 600,
'load_folder_file': ('checkpoint', 12),
})
game = ChineseCheckersGame()
gui = GUI(1)
nn1 = nn(game)
nn1.load_first_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
mcts1 = MCTS(game, nn1, args)
# nn2 = nn(game)
# nn2.load_first_checkpoint(args2.load_folder_file[0], args2.load_folder_file[1])
# mcts2 = MCTS(game, nn2, args2)
actor = InitializeAgent(game)
forward = GreedyAgent(game)
evaluator = Evaluator(None, mcts1, mcts1, game, gui, True)
scores_all = np.zeros((3, 3))
steps_all = 0
wrong_win_all = 0
for _ in range(20):
scores, steps, wrong_win = evaluator.play_game(1, 1)
for p in range(3):
if scores[p] == 3:
scores_all[p,0] += 1
elif scores[p] == 1:
scores_all[p,1] += 1
else:
scores_all[p,2] += 1
steps_all += steps
wrong_win_all += wrong_win
print(scores_all)
|
[
"chinese_checkers.Evaluator.Evaluator",
"chinese_checkers.GreedyAgent.GreedyAgent",
"chinese_checkers.InitializeAgent.InitializeAgent",
"chinese_checkers.TinyChineseCheckersGame.ChineseCheckersGame",
"numpy.zeros",
"MCTS.MCTS",
"chinese_checkers.tensorflow.ResNet.NNetWrapper",
"chinese_checkers.TinyGUI.GUI"
] |
[((667, 688), 'chinese_checkers.TinyChineseCheckersGame.ChineseCheckersGame', 'ChineseCheckersGame', ([], {}), '()\n', (686, 688), False, 'from chinese_checkers.TinyChineseCheckersGame import ChineseCheckersGame\n'), ((695, 701), 'chinese_checkers.TinyGUI.GUI', 'GUI', (['(1)'], {}), '(1)\n', (698, 701), False, 'from chinese_checkers.TinyGUI import GUI\n'), ((708, 716), 'chinese_checkers.tensorflow.ResNet.NNetWrapper', 'nn', (['game'], {}), '(game)\n', (710, 716), True, 'from chinese_checkers.tensorflow.ResNet import NNetWrapper as nn\n'), ((803, 824), 'MCTS.MCTS', 'MCTS', (['game', 'nn1', 'args'], {}), '(game, nn1, args)\n', (807, 824), False, 'from MCTS import MCTS\n'), ((967, 988), 'chinese_checkers.InitializeAgent.InitializeAgent', 'InitializeAgent', (['game'], {}), '(game)\n', (982, 988), False, 'from chinese_checkers.InitializeAgent import InitializeAgent\n'), ((999, 1016), 'chinese_checkers.GreedyAgent.GreedyAgent', 'GreedyAgent', (['game'], {}), '(game)\n', (1010, 1016), False, 'from chinese_checkers.GreedyAgent import GreedyAgent\n'), ((1030, 1076), 'chinese_checkers.Evaluator.Evaluator', 'Evaluator', (['None', 'mcts1', 'mcts1', 'game', 'gui', '(True)'], {}), '(None, mcts1, mcts1, game, gui, True)\n', (1039, 1076), False, 'from chinese_checkers.Evaluator import Evaluator\n'), ((1090, 1106), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1098, 1106), True, 'import numpy as np\n')]
|
from tensorflow.keras.utils import Sequence
import os
import pandas as pd
import random
import numpy as np
class DataGenerator(Sequence):
def __init__(self,
path_args,
batch_size: int,
shuffle: bool,
mode: str):
self.x_img_path = './train/'
self.x_label_path = './label/'
self.mode = mode
# train
self.x_img = os.listdir(self.x_img_path)
self.x_label = os.listdir(self.x_label_path)
# TODO validation and test dataset
self.x_list = []
self.y_list = []
self.load_dataset()
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def load_dataset(self):
for i, j in enumerate(self.x_img):
self.x_list.append(input_data)
self.y_list.append(result_data.astype(np.float))
def get_data_len(self):
return len(self.x_list), len(self.y_list)
def __len__(self):
return int(np.floor(len(self.x_list) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.x_list))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def get_input(self, index):
return self.x_list[index * self.batch_size:(index + 1) * self.batch_size]
def get_target(self, index):
return self.y_list[index * self.batch_size:(index + 1) * self.batch_size]
def __getitem__(self, i):
# collect batch data
start = i * self.batch_size
stop = (i + 1) * self.batch_size
data = []
y_data = []
for j in range(start, stop):
data.append(self.x_list[j])
y_data.append(self.y_list[j])
# transpose list of lists
batch = [np.stack(samples, axis=0) for samples in zip(*data)]
y_batch = [np.stack(samples, axis=0) for samples in zip(*y_data)]
# newer version of tf/keras want batch to be in tuple rather than list
return tuple(batch), tuple(y_batch)
|
[
"numpy.stack",
"os.listdir",
"numpy.random.shuffle"
] |
[((425, 452), 'os.listdir', 'os.listdir', (['self.x_img_path'], {}), '(self.x_img_path)\n', (435, 452), False, 'import os\n'), ((476, 505), 'os.listdir', 'os.listdir', (['self.x_label_path'], {}), '(self.x_label_path)\n', (486, 505), False, 'import os\n'), ((1198, 1229), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (1215, 1229), True, 'import numpy as np\n'), ((1808, 1833), 'numpy.stack', 'np.stack', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (1816, 1833), True, 'import numpy as np\n'), ((1880, 1905), 'numpy.stack', 'np.stack', (['samples'], {'axis': '(0)'}), '(samples, axis=0)\n', (1888, 1905), True, 'import numpy as np\n')]
|
root = 'data/'
import numpy as np
from ffjord.datasets.power import POWER
from ffjord.datasets.gas import GAS
from ffjord.datasets.hepmass import HEPMASS
from ffjord.datasets.miniboone import MINIBOONE
from ffjord.datasets.bsds300 import BSDS300
from .synthetic import EightGaussians
from .synthetic import Checkerboard
from .synthetic import TwoSpirals
from .mnist import MNIST_4x4, MNIST_7x7, MNIST_8x8, MNIST_16x16, MNIST_28x28
from utils import order_variables_partial_correlation
all_datasets = [
'power', 'gas', 'hepmass', 'miniboone', 'bsds300', '8gaussians',
'checkerboard', '2spirals', 'mnist_4x4', 'mnist_7x7', 'mnist_8x8',
'mnist_16x16', 'mnist_28x28']
def subsample_train_data(data, subsample_size):
rng = np.random.RandomState(seed=42)
rng.shuffle(data.trn.x)
data.trn.x = data.trn.x[:subsample_size]
def do_optimal_ordering(data, tr=False):
ordering = order_variables_partial_correlation(data.trn.x, tr=tr)
data.trn.x = data.trn.x[:, ordering]
data.val.x = data.val.x[:, ordering]
data.tst.x = data.tst.x[:, ordering]
def load_data(name, optimal_order=False, subsample_size=None, tr=False):
if name == 'power':
data = POWER()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'gas':
data = GAS()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'hepmass':
data = HEPMASS()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'miniboone':
data = MINIBOONE()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == 'bsds300':
data = BSDS300()
if subsample_size is not None:
subsample_train_data(data, subsample_size)
if optimal_order:
do_optimal_ordering(data, tr=tr)
return data
elif name == '8gaussians':
return EightGaussians()
elif name == 'checkerboard':
return Checkerboard()
elif name == '2spirals':
return TwoSpirals()
elif name == 'mnist_4x4':
return MNIST_4x4(optimal_order)
elif name == 'mnist_7x7':
return MNIST_7x7(optimal_order)
elif name == 'mnist_8x8':
return MNIST_8x8(optimal_order)
elif name == 'mnist_16x16':
return MNIST_16x16(optimal_order)
elif name == 'mnist_28x28':
return MNIST_28x28(optimal_order)
|
[
"ffjord.datasets.gas.GAS",
"ffjord.datasets.miniboone.MINIBOONE",
"numpy.random.RandomState",
"ffjord.datasets.hepmass.HEPMASS",
"ffjord.datasets.power.POWER",
"ffjord.datasets.bsds300.BSDS300",
"utils.order_variables_partial_correlation"
] |
[((738, 768), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (759, 768), True, 'import numpy as np\n'), ((899, 953), 'utils.order_variables_partial_correlation', 'order_variables_partial_correlation', (['data.trn.x'], {'tr': 'tr'}), '(data.trn.x, tr=tr)\n', (934, 953), False, 'from utils import order_variables_partial_correlation\n'), ((1191, 1198), 'ffjord.datasets.power.POWER', 'POWER', ([], {}), '()\n', (1196, 1198), False, 'from ffjord.datasets.power import POWER\n'), ((1443, 1448), 'ffjord.datasets.gas.GAS', 'GAS', ([], {}), '()\n', (1446, 1448), False, 'from ffjord.datasets.gas import GAS\n'), ((1697, 1706), 'ffjord.datasets.hepmass.HEPMASS', 'HEPMASS', ([], {}), '()\n', (1704, 1706), False, 'from ffjord.datasets.hepmass import HEPMASS\n'), ((1957, 1968), 'ffjord.datasets.miniboone.MINIBOONE', 'MINIBOONE', ([], {}), '()\n', (1966, 1968), False, 'from ffjord.datasets.miniboone import MINIBOONE\n'), ((2217, 2226), 'ffjord.datasets.bsds300.BSDS300', 'BSDS300', ([], {}), '()\n', (2224, 2226), False, 'from ffjord.datasets.bsds300 import BSDS300\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df = pd.DataFrame({'Group': ['A', 'A', 'A', 'B', 'C', 'B', 'B', 'C', 'A', 'C'],
'Apple': np.random.rand(10),'Orange': np.random.rand(10)})
# df = df[['Group','Apple','Orange']]
dd = pd.melt(df, id_vars=['Group'], value_vars=['Apple', 'Orange'], var_name='Fruits')
sns.boxplot(x='Group', y='value', data=dd, hue='Fruits')
plt.show()
|
[
"numpy.random.rand",
"pandas.melt",
"seaborn.boxplot",
"matplotlib.pyplot.show"
] |
[((296, 382), 'pandas.melt', 'pd.melt', (['df'], {'id_vars': "['Group']", 'value_vars': "['Apple', 'Orange']", 'var_name': '"""Fruits"""'}), "(df, id_vars=['Group'], value_vars=['Apple', 'Orange'], var_name=\n 'Fruits')\n", (303, 382), True, 'import pandas as pd\n'), ((378, 434), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Group"""', 'y': '"""value"""', 'data': 'dd', 'hue': '"""Fruits"""'}), "(x='Group', y='value', data=dd, hue='Fruits')\n", (389, 434), True, 'import seaborn as sns\n'), ((435, 445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (443, 445), True, 'import matplotlib.pyplot as plt\n'), ((203, 221), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (217, 221), True, 'import numpy as np\n'), ((232, 250), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (246, 250), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import keras
import keras.backend as K
import re
import cv2
import numpy as np
np.set_printoptions(threshold='nan')
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
def get_train_test_dataset():
if os.path.exists('./data/train.npz'):
dataset = np.load('./data/train.npz')
print('{} already exits.'.format('./data/train.npz'))
return (dataset['x'], dataset['y'])
x = list_pictures('./test_dataset', ext='png')
y = [item[:-4] + '_posmap.jpg' for item in x]
filted_x = []
filted_y = []
for ix, iy in zip(x, y):
if os.path.exists(ix) and os.path.exists(iy):
filted_x.append(ix)
filted_y.append(iy)
else:
print('{} or {} not exits.'.format(ix, iy))
x = [cv2.imread(item) for item in filted_x]
y = [cv2.imread(item) for item in filted_y]
x = np.array(x)
y = np.array(y)
if not os.path.exists('./data'):
os.makedirs('./data')
np.savez('./data/train.npz', x=x, y=y)
return (x, y)
def res_block(x, filters):
# stage1
shortcut = x
shortcut = keras.layers.Conv2D(
filters, (1, 1), strides=(2, 2), padding='same')(shortcut)
x = keras.layers.Conv2D(
filters / 2, (1, 1), strides=(1, 1), padding='same', activation='relu')(x)
x = keras.layers.Conv2D(
filters / 2, (4, 4), strides=(2, 2), padding='same', activation='relu')(x)
x = keras.layers.Conv2D(
filters, (1, 1), strides=(1, 1), padding='same')(x)
x = keras.layers.Add()([x, shortcut])
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
# stage2
shortcut = x
x = keras.layers.Conv2D(
filters / 2, (1, 1), strides=(1, 1), padding='same', activation='relu')(x)
x = keras.layers.Conv2D(
filters / 2, (4, 4), strides=(1, 1), padding='same', activation='relu')(x)
x = keras.layers.Conv2D(
filters, (1, 1), strides=(1, 1), padding='same')(x)
x = keras.layers.Add()([x, shortcut])
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
return x
def get_regress_model():
input = keras.layers.Input(shape=(256, 256, 3))
x = keras.layers.Conv2D(
16, (4, 4), strides=(1, 1), padding='same', activation='relu')(input)
x = res_block(x, 32)
x = res_block(x, 64)
x = res_block(x, 128)
x = res_block(x, 256)
x = res_block(x, 512)
x = keras.layers.Conv2DTranspose(512, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(
256, (4, 4), strides=(2, 2), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(256, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(256, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(
128, (4, 4), strides=(2, 2), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(128, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(128, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(
64, (4, 4), strides=(2, 2), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(64, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(64, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(
32, (4, 4), strides=(2, 2), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(32, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(
16, (4, 4), strides=(2, 2), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(16, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(3, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(3, (4, 4), padding='same', activation='relu')(x)
x = keras.layers.Conv2DTranspose(3, (4, 4), padding='same')(x)
model = keras.Model(input, x)
return model
def preprocess_input(x, y=None):
x = x.astype(np.float32)
x = keras.applications.xception.preprocess_input(x)
if y is not None:
y = y.astype(np.float32)
y /= 256.0
return (x, y)
loss_mask = cv2.imread('./data/uv-data/uv_weight_mask.png')
face_mask = cv2.imread('./data/uv-data/uv_face_mask.png')
loss_mask = np.where(face_mask > 0, loss_mask, face_mask)
loss_mask = loss_mask.astype(np.float32)
loss_mask /= 16.0
def mean_squared_error_with_mask(y_true, y_pred):
mask = K.constant(loss_mask)
return K.mean(K.mean(K.square(y_pred - y_true) * mask, axis=-1), axis=-1)
def lr_adjustor(epoch):
base_lr = 0.001
if epoch < 100:
return base_lr
base_lr *= .1
if epoch < 150:
return base_lr
base_lr *= .1
return base_lr
def train():
(x, y) = get_train_test_dataset()
# x = np.concatenate([x for i in range(20)])
# y = np.concatenate([y for i in range(20)])
print('x shape -> {}, y shape -> {}.'.format(x.shape, y.shape))
(x, y) = preprocess_input(x, y)
model = get_regress_model()
model.summary()
model.load_weights('./weights.100-0.0137.hdf5')
# keras.utils.plot_model(model, show_shapes=True)
opti = keras.optimizers.Adam(lr=0.001)
if not os.path.exists('./weights'):
os.makedirs('./weights')
callbacks = [
keras.callbacks.LearningRateScheduler(lr_adjustor),
keras.callbacks.CSVLogger('train.log'),
keras.callbacks.ModelCheckpoint(
'./weights/weights.{epoch:02d}-{loss:.4f}.hdf5',
monitor='loss',
save_best_only=True,
period=10)]
model.compile(opti, loss=mean_squared_error_with_mask)
model.fit(x, y, batch_size=16, epochs=200, callbacks=callbacks)
def test():
(x, y) = get_train_test_dataset()
# x = np.concatenate([x for i in range(20)])
# y = np.concatenate([y for i in range(20)])
print('x shape -> {}, y shape -> {}.'.format(x.shape, y.shape))
(x, y) = preprocess_input(x, y)
model = get_regress_model()
model.summary()
# model.load_weights('./weights.100-0.0137.hdf5')
model.load_weights('./Data/net-data/weights.190-0.0010.hdf5')
if not os.path.exists('./result'):
os.makedirs('./result')
y = model.predict(x)
for index, i in enumerate(y):
i *= 255
i = i.astype(np.uint8)
savename = os.path.join('./result', str(index) + '.png')
cv2.imwrite(savename, i)
if __name__ == "__main__":
# train()
test()
|
[
"numpy.load",
"os.walk",
"keras.layers.Input",
"keras.callbacks.LearningRateScheduler",
"os.path.join",
"numpy.set_printoptions",
"keras.backend.constant",
"cv2.imwrite",
"os.path.exists",
"keras.Model",
"keras.callbacks.ModelCheckpoint",
"keras.applications.xception.preprocess_input",
"keras.optimizers.Adam",
"keras.layers.Conv2DTranspose",
"re.match",
"keras.layers.Conv2D",
"numpy.savez",
"keras.layers.BatchNormalization",
"os.makedirs",
"keras.layers.Activation",
"keras.layers.Add",
"cv2.imread",
"numpy.where",
"numpy.array",
"keras.callbacks.CSVLogger",
"keras.backend.square"
] |
[((176, 212), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '"""nan"""'}), "(threshold='nan')\n", (195, 212), True, 'import numpy as np\n'), ((4558, 4605), 'cv2.imread', 'cv2.imread', (['"""./data/uv-data/uv_weight_mask.png"""'], {}), "('./data/uv-data/uv_weight_mask.png')\n", (4568, 4605), False, 'import cv2\n'), ((4618, 4663), 'cv2.imread', 'cv2.imread', (['"""./data/uv-data/uv_face_mask.png"""'], {}), "('./data/uv-data/uv_face_mask.png')\n", (4628, 4663), False, 'import cv2\n'), ((4676, 4721), 'numpy.where', 'np.where', (['(face_mask > 0)', 'loss_mask', 'face_mask'], {}), '(face_mask > 0, loss_mask, face_mask)\n', (4684, 4721), True, 'import numpy as np\n'), ((471, 505), 'os.path.exists', 'os.path.exists', (['"""./data/train.npz"""'], {}), "('./data/train.npz')\n", (485, 505), False, 'import os\n'), ((1118, 1129), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1126, 1129), True, 'import numpy as np\n'), ((1138, 1149), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1146, 1149), True, 'import numpy as np\n'), ((1221, 1259), 'numpy.savez', 'np.savez', (['"""./data/train.npz"""'], {'x': 'x', 'y': 'y'}), "('./data/train.npz', x=x, y=y)\n", (1229, 1259), True, 'import numpy as np\n'), ((2409, 2448), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(256, 256, 3)'}), '(shape=(256, 256, 3))\n', (2427, 2448), False, 'import keras\n'), ((4293, 4314), 'keras.Model', 'keras.Model', (['input', 'x'], {}), '(input, x)\n', (4304, 4314), False, 'import keras\n'), ((4404, 4451), 'keras.applications.xception.preprocess_input', 'keras.applications.xception.preprocess_input', (['x'], {}), '(x)\n', (4448, 4451), False, 'import keras\n'), ((4844, 4865), 'keras.backend.constant', 'K.constant', (['loss_mask'], {}), '(loss_mask)\n', (4854, 4865), True, 'import keras.backend as K\n'), ((5556, 5587), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (5577, 5587), False, 'import keras\n'), ((285, 306), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (297, 306), False, 'import os\n'), ((525, 552), 'numpy.load', 'np.load', (['"""./data/train.npz"""'], {}), "('./data/train.npz')\n", (532, 552), True, 'import numpy as np\n'), ((1023, 1039), 'cv2.imread', 'cv2.imread', (['item'], {}), '(item)\n', (1033, 1039), False, 'import cv2\n'), ((1071, 1087), 'cv2.imread', 'cv2.imread', (['item'], {}), '(item)\n', (1081, 1087), False, 'import cv2\n'), ((1161, 1185), 'os.path.exists', 'os.path.exists', (['"""./data"""'], {}), "('./data')\n", (1175, 1185), False, 'import os\n'), ((1195, 1216), 'os.makedirs', 'os.makedirs', (['"""./data"""'], {}), "('./data')\n", (1206, 1216), False, 'import os\n'), ((1352, 1420), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['filters', '(1, 1)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(filters, (1, 1), strides=(2, 2), padding='same')\n", (1371, 1420), False, 'import keras\n'), ((1448, 1543), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(filters / 2)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters / 2, (1, 1), strides=(1, 1), padding='same',\n activation='relu')\n", (1467, 1543), False, 'import keras\n'), ((1560, 1655), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(filters / 2)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters / 2, (4, 4), strides=(2, 2), padding='same',\n activation='relu')\n", (1579, 1655), False, 'import keras\n'), ((1672, 1740), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['filters', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""'}), "(filters, (1, 1), strides=(1, 1), padding='same')\n", (1691, 1740), False, 'import keras\n'), ((1761, 1779), 'keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (1777, 1779), False, 'import keras\n'), ((1803, 1836), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1834, 1836), False, 'import keras\n'), ((1848, 1879), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (1871, 1879), False, 'import keras\n'), ((1922, 2017), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(filters / 2)', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters / 2, (1, 1), strides=(1, 1), padding='same',\n activation='relu')\n", (1941, 2017), False, 'import keras\n'), ((2034, 2129), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(filters / 2)', '(4, 4)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters / 2, (4, 4), strides=(1, 1), padding='same',\n activation='relu')\n", (2053, 2129), False, 'import keras\n'), ((2146, 2214), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['filters', '(1, 1)'], {'strides': '(1, 1)', 'padding': '"""same"""'}), "(filters, (1, 1), strides=(1, 1), padding='same')\n", (2165, 2214), False, 'import keras\n'), ((2235, 2253), 'keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (2251, 2253), False, 'import keras\n'), ((2277, 2310), 'keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2308, 2310), False, 'import keras\n'), ((2322, 2353), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2345, 2353), False, 'import keras\n'), ((2457, 2544), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(16)', '(4, 4)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(16, (4, 4), strides=(1, 1), padding='same', activation=\n 'relu')\n", (2476, 2544), False, 'import keras\n'), ((2693, 2769), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(512)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(512, (4, 4), padding='same', activation='relu')\n", (2721, 2769), False, 'import keras\n'), ((2782, 2878), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(256, (4, 4), strides=(2, 2), padding='same',\n activation='relu')\n", (2810, 2878), False, 'import keras\n'), ((2895, 2971), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(256)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(256, (4, 4), padding='same', activation='relu')\n", (2923, 2971), False, 'import keras\n'), ((2983, 3059), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(256)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(256, (4, 4), padding='same', activation='relu')\n", (3011, 3059), False, 'import keras\n'), ((3072, 3168), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(128, (4, 4), strides=(2, 2), padding='same',\n activation='relu')\n", (3100, 3168), False, 'import keras\n'), ((3185, 3261), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(128)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(128, (4, 4), padding='same', activation='relu')\n", (3213, 3261), False, 'import keras\n'), ((3273, 3349), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(128)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(128, (4, 4), padding='same', activation='relu')\n", (3301, 3349), False, 'import keras\n'), ((3362, 3457), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (4, 4), strides=(2, 2), padding='same',\n activation='relu')\n", (3390, 3457), False, 'import keras\n'), ((3474, 3549), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(64)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (4, 4), padding='same', activation='relu')\n", (3502, 3549), False, 'import keras\n'), ((3561, 3636), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(64)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (4, 4), padding='same', activation='relu')\n", (3589, 3636), False, 'import keras\n'), ((3649, 3744), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(32)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, (4, 4), strides=(2, 2), padding='same',\n activation='relu')\n", (3677, 3744), False, 'import keras\n'), ((3761, 3836), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(32)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, (4, 4), padding='same', activation='relu')\n", (3789, 3836), False, 'import keras\n'), ((3849, 3944), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(16)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(16, (4, 4), strides=(2, 2), padding='same',\n activation='relu')\n", (3877, 3944), False, 'import keras\n'), ((3961, 4036), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(16)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(16, (4, 4), padding='same', activation='relu')\n", (3989, 4036), False, 'import keras\n'), ((4049, 4123), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(3)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(3, (4, 4), padding='same', activation='relu')\n", (4077, 4123), False, 'import keras\n'), ((4135, 4209), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(3)', '(4, 4)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(3, (4, 4), padding='same', activation='relu')\n", (4163, 4209), False, 'import keras\n'), ((4221, 4276), 'keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(3)', '(4, 4)'], {'padding': '"""same"""'}), "(3, (4, 4), padding='same')\n", (4249, 4276), False, 'import keras\n'), ((5599, 5626), 'os.path.exists', 'os.path.exists', (['"""./weights"""'], {}), "('./weights')\n", (5613, 5626), False, 'import os\n'), ((5636, 5660), 'os.makedirs', 'os.makedirs', (['"""./weights"""'], {}), "('./weights')\n", (5647, 5660), False, 'import os\n'), ((5687, 5737), 'keras.callbacks.LearningRateScheduler', 'keras.callbacks.LearningRateScheduler', (['lr_adjustor'], {}), '(lr_adjustor)\n', (5724, 5737), False, 'import keras\n'), ((5747, 5785), 'keras.callbacks.CSVLogger', 'keras.callbacks.CSVLogger', (['"""train.log"""'], {}), "('train.log')\n", (5772, 5785), False, 'import keras\n'), ((5795, 5928), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['"""./weights/weights.{epoch:02d}-{loss:.4f}.hdf5"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)', 'period': '(10)'}), "('./weights/weights.{epoch:02d}-{loss:.4f}.hdf5'\n , monitor='loss', save_best_only=True, period=10)\n", (5826, 5928), False, 'import keras\n'), ((6540, 6566), 'os.path.exists', 'os.path.exists', (['"""./result"""'], {}), "('./result')\n", (6554, 6566), False, 'import os\n'), ((6576, 6599), 'os.makedirs', 'os.makedirs', (['"""./result"""'], {}), "('./result')\n", (6587, 6599), False, 'import os\n'), ((6780, 6804), 'cv2.imwrite', 'cv2.imwrite', (['savename', 'i'], {}), '(savename, i)\n', (6791, 6804), False, 'import cv2\n'), ((341, 359), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (348, 359), False, 'import os\n'), ((390, 431), 're.match', 're.match', (["('([\\\\w]+\\\\.(?:' + ext + '))')", 'f'], {}), "('([\\\\w]+\\\\.(?:' + ext + '))', f)\n", (398, 431), False, 'import re\n'), ((837, 855), 'os.path.exists', 'os.path.exists', (['ix'], {}), '(ix)\n', (851, 855), False, 'import os\n'), ((860, 878), 'os.path.exists', 'os.path.exists', (['iy'], {}), '(iy)\n', (874, 878), False, 'import os\n'), ((4891, 4916), 'keras.backend.square', 'K.square', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (4899, 4916), True, 'import keras.backend as K\n')]
|
import cv2
import numpy as np
def label2rgb(label_np):
print(label_np)
label_color = np.argmax(label_np, axis=0)
label_color = label_color / np.max(label_color) * 255
print(label_color)
n = label_color.astype(np.uint8)
n = np.array(n)
print(type(n))
label_color = cv2.applyColorMap(n, 'jet')
return label_color
|
[
"cv2.applyColorMap",
"numpy.max",
"numpy.array",
"numpy.argmax"
] |
[((95, 122), 'numpy.argmax', 'np.argmax', (['label_np'], {'axis': '(0)'}), '(label_np, axis=0)\n', (104, 122), True, 'import numpy as np\n'), ((249, 260), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (257, 260), True, 'import numpy as np\n'), ((298, 325), 'cv2.applyColorMap', 'cv2.applyColorMap', (['n', '"""jet"""'], {}), "(n, 'jet')\n", (315, 325), False, 'import cv2\n'), ((155, 174), 'numpy.max', 'np.max', (['label_color'], {}), '(label_color)\n', (161, 174), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from tpot import TPOTClassifier
from sklearn.model_selection import train_test_split
benchmark = pd.read_pickle('us_pct.pickle') # us overall housing price index percentage change
HPI = pd.read_pickle('HPI_complete.pickle') # all of the state data, thirty year mortgage, unemployment rate, GDP, SP500
HPI = HPI.join(benchmark['United States'])
# all in percentage change since the start of the data (1975-01-01)
HPI.dropna(inplace=True)
housing_pct = HPI.pct_change()
housing_pct.replace([np.inf, -np.inf], np.nan, inplace=True)
housing_pct['US_HPI_future'] = housing_pct['United States'].shift(-1)
housing_pct.dropna(inplace=True)
def create_labels(cur_hpi, fut_hpi):
if fut_hpi > cur_hpi:
return 1
else:
return 0
housing_pct['label'] = list(map(create_labels, housing_pct['United States'], housing_pct['US_HPI_future']))
# housing_pct['ma_apply_example'] = housing_pct['M30'].rolling(window=10).apply(moving_average)
# print(housing_pct.tail())
X = np.array(housing_pct.drop(['label', 'US_HPI_future'], 1))
y = np.array(housing_pct['label'])
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.25)
tpot = TPOTClassifier(generations=10, population_size=20, verbosity=2)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
tpot.export('HPI_tpot_pipeline.py')
|
[
"pandas.read_pickle",
"sklearn.model_selection.train_test_split",
"numpy.array",
"tpot.TPOTClassifier"
] |
[((139, 170), 'pandas.read_pickle', 'pd.read_pickle', (['"""us_pct.pickle"""'], {}), "('us_pct.pickle')\n", (153, 170), True, 'import pandas as pd\n'), ((229, 266), 'pandas.read_pickle', 'pd.read_pickle', (['"""HPI_complete.pickle"""'], {}), "('HPI_complete.pickle')\n", (243, 266), True, 'import pandas as pd\n'), ((1086, 1116), 'numpy.array', 'np.array', (["housing_pct['label']"], {}), "(housing_pct['label'])\n", (1094, 1116), True, 'import numpy as np\n'), ((1153, 1191), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)'}), '(X, y, test_size=0.25)\n', (1169, 1191), False, 'from sklearn.model_selection import train_test_split\n'), ((1199, 1262), 'tpot.TPOTClassifier', 'TPOTClassifier', ([], {'generations': '(10)', 'population_size': '(20)', 'verbosity': '(2)'}), '(generations=10, population_size=20, verbosity=2)\n', (1213, 1262), False, 'from tpot import TPOTClassifier\n')]
|
#Important Modules
from flask import Flask,render_template, url_for ,flash , redirect
import pickle
from flask import request
import numpy as np
import os
from flask import send_from_directory
#from this import SQLAlchemy
app=Flask(__name__,template_folder='template')
@app.route("/")
@app.route("/home")
def home():
return render_template("home.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/heart")
def heart():
return render_template("heart.html")
def ValuePredictor(to_predict_list, size):
to_predict = np.array(to_predict_list).reshape(1,size)
if(size==13):#Heart
loaded_model = pickle.load(open("Heart_model_new.pkl","rb"))
result =loaded_model.predict(to_predict)
return result[0]
@app.route('/result',methods = ["POST"])
def result():
if request.method == 'POST':
to_predict_list = request.form.to_dict()
to_predict_list=list(to_predict_list.values())
to_predict_list = list(map(float, to_predict_list))
if(len(to_predict_list)==13):#heart
result = ValuePredictor(to_predict_list,13)
if(int(result)==1):
prediction="The patient's heart seems to be healthy."
else:
prediction="The patient's heart does not seems to be healthy."
return(render_template("result.html", prediction=prediction))
if __name__ == "__main__":
app.run(debug=True)
|
[
"numpy.array",
"flask.Flask",
"flask.request.form.to_dict",
"flask.render_template"
] |
[((233, 276), 'flask.Flask', 'Flask', (['__name__'], {'template_folder': '"""template"""'}), "(__name__, template_folder='template')\n", (238, 276), False, 'from flask import Flask, render_template, url_for, flash, redirect\n'), ((340, 368), 'flask.render_template', 'render_template', (['"""home.html"""'], {}), "('home.html')\n", (355, 368), False, 'from flask import Flask, render_template, url_for, flash, redirect\n'), ((417, 446), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (432, 446), False, 'from flask import Flask, render_template, url_for, flash, redirect\n'), ((493, 522), 'flask.render_template', 'render_template', (['"""heart.html"""'], {}), "('heart.html')\n", (508, 522), False, 'from flask import Flask, render_template, url_for, flash, redirect\n'), ((1354, 1407), 'flask.render_template', 'render_template', (['"""result.html"""'], {'prediction': 'prediction'}), "('result.html', prediction=prediction)\n", (1369, 1407), False, 'from flask import Flask, render_template, url_for, flash, redirect\n'), ((905, 927), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (925, 927), False, 'from flask import request\n'), ((585, 610), 'numpy.array', 'np.array', (['to_predict_list'], {}), '(to_predict_list)\n', (593, 610), True, 'import numpy as np\n')]
|
import cv2 as cv
import numpy as np
if __name__ == "__main__":
img = cv.imread('../../assets/test1.jpg')
height, width = img.shape[:2] # rows, columns
# translating the img 200 pixels right (x axis)
translation_matrix = np.float32([[1, 0, 200], [0, 1, 0]])
output = cv.warpAffine(img, translation_matrix, (width, height))
cv.imshow('1) 200 Pixels right', output)
# translating the img 50 pixels down (y axis)
translation_matrix = np.float32([[1, 0, 0], [0, 1, 50]])
output = cv.warpAffine(img, translation_matrix, (width, height))
cv.imshow('2) 50 Pixels Down', output)
# translating the img in both x-y axis.
translation_matrix = np.float32([[1, 0, 200], [0, 1, 50]])
output = cv.warpAffine(img, translation_matrix, (width, height))
cv.imshow('3) (dx, dy) = (200, 50)', output)
# translating without getting cropped (by increasing the output size)
translation_matrix = np.float32([[1, 0, 200], [0, 1, 50]])
output = cv.warpAffine(img, translation_matrix, (width + 200, height + 50))
cv.imshow("4) Preventing Crop", output)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.float32",
"cv2.imread",
"cv2.warpAffine",
"cv2.imshow"
] |
[((79, 114), 'cv2.imread', 'cv.imread', (['"""../../assets/test1.jpg"""'], {}), "('../../assets/test1.jpg')\n", (88, 114), True, 'import cv2 as cv\n'), ((245, 281), 'numpy.float32', 'np.float32', (['[[1, 0, 200], [0, 1, 0]]'], {}), '([[1, 0, 200], [0, 1, 0]])\n', (255, 281), True, 'import numpy as np\n'), ((295, 350), 'cv2.warpAffine', 'cv.warpAffine', (['img', 'translation_matrix', '(width, height)'], {}), '(img, translation_matrix, (width, height))\n', (308, 350), True, 'import cv2 as cv\n'), ((355, 395), 'cv2.imshow', 'cv.imshow', (['"""1) 200 Pixels right"""', 'output'], {}), "('1) 200 Pixels right', output)\n", (364, 395), True, 'import cv2 as cv\n'), ((472, 507), 'numpy.float32', 'np.float32', (['[[1, 0, 0], [0, 1, 50]]'], {}), '([[1, 0, 0], [0, 1, 50]])\n', (482, 507), True, 'import numpy as np\n'), ((527, 582), 'cv2.warpAffine', 'cv.warpAffine', (['img', 'translation_matrix', '(width, height)'], {}), '(img, translation_matrix, (width, height))\n', (540, 582), True, 'import cv2 as cv\n'), ((587, 625), 'cv2.imshow', 'cv.imshow', (['"""2) 50 Pixels Down"""', 'output'], {}), "('2) 50 Pixels Down', output)\n", (596, 625), True, 'import cv2 as cv\n'), ((696, 733), 'numpy.float32', 'np.float32', (['[[1, 0, 200], [0, 1, 50]]'], {}), '([[1, 0, 200], [0, 1, 50]])\n', (706, 733), True, 'import numpy as np\n'), ((747, 802), 'cv2.warpAffine', 'cv.warpAffine', (['img', 'translation_matrix', '(width, height)'], {}), '(img, translation_matrix, (width, height))\n', (760, 802), True, 'import cv2 as cv\n'), ((807, 851), 'cv2.imshow', 'cv.imshow', (['"""3) (dx, dy) = (200, 50)"""', 'output'], {}), "('3) (dx, dy) = (200, 50)', output)\n", (816, 851), True, 'import cv2 as cv\n'), ((952, 989), 'numpy.float32', 'np.float32', (['[[1, 0, 200], [0, 1, 50]]'], {}), '([[1, 0, 200], [0, 1, 50]])\n', (962, 989), True, 'import numpy as np\n'), ((1003, 1069), 'cv2.warpAffine', 'cv.warpAffine', (['img', 'translation_matrix', '(width + 200, height + 50)'], {}), '(img, translation_matrix, (width + 200, height + 50))\n', (1016, 1069), True, 'import cv2 as cv\n'), ((1074, 1113), 'cv2.imshow', 'cv.imshow', (['"""4) Preventing Crop"""', 'output'], {}), "('4) Preventing Crop', output)\n", (1083, 1113), True, 'import cv2 as cv\n'), ((1119, 1132), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1129, 1132), True, 'import cv2 as cv\n'), ((1137, 1159), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1157, 1159), True, 'import cv2 as cv\n')]
|
import numpy as np
import vrep
import ctypes
import math
import sys
import time
sim_dt = 0.01
dt = 0.001
SYNC = True
vrep_mode = vrep.simx_opmode_oneshot
def b( num ):
""" forces magnitude to be 1 or less """
if abs( num ) > 1.0:
return math.copysign( 1.0, num )
else:
return num
def convert_angles( ang ):
""" Converts Euler angles from x-y-z to z-x-y convention """
s1 = math.sin(ang[0])
s2 = math.sin(ang[1])
s3 = math.sin(ang[2])
c1 = math.cos(ang[0])
c2 = math.cos(ang[1])
c3 = math.cos(ang[2])
pitch = math.asin( b(c1*c3*s2-s1*s3) )
cp = math.cos(pitch)
# just in case
if cp == 0:
cp = 0.000001
yaw = math.asin( b((c1*s3+c3*s1*s2)/cp) ) #flipped
# Fix for getting the quadrants right
if c3 < 0 and yaw > 0:
yaw = math.pi - yaw
elif c3 < 0 and yaw < 0:
yaw = -math.pi - yaw
roll = math.asin( b((c3*s1+c1*s2*s3)/cp) ) #flipped
return [roll, pitch, yaw]
class Quadcopter( object ):
"""
This callable class will return the state of the quadcopter relative to its
target whenever it is called. It will also accept motor commands which will be
sent to the quadcopter in V-REP.
"""
def __init__( self, max_target_distance=4, noise=False,
noise_std=None, dodging=True,
target_func=None, cid=None,ori_mode=False
):
self.ori_mode = ori_mode
# If a cid is specified, assume the connection has already been
# established and should remain open
if cid is None:
vrep.simxFinish(-1) # just in case, close all opened connections
self.cid = vrep.simxStart('127.0.0.1',19997,True,True,5000,5)
else:
self.cid = cid
if self.cid != -1:
print ('Connected to V-REP remote API server, client id: %s' % self.cid)
vrep.simxStartSimulation( self.cid, vrep.simx_opmode_oneshot )
if SYNC:
vrep.simxSynchronous( self.cid, True )
else:
print ('Failed connecting to V-REP remote API server')
self.exit()
err, self.copter = vrep.simxGetObjectHandle(self.cid, "Quadricopter_base",
vrep.simx_opmode_oneshot_wait )
err, self.target = vrep.simxGetObjectHandle(self.cid, "Quadricopter_target",
vrep.simx_opmode_oneshot_wait )
# Reset the motor commands to zero
packedData=vrep.simxPackFloats([0,0,0,0])
raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData)
err = vrep.simxSetStringSignal(self.cid, "rotorTargetVelocities",
raw_bytes,
vrep_mode)
self.pos = [0,0,0]
self.pos_err = [0,0,0]
self.t_pos = [0,0,0]
self.lin = [0,0,0]
self.ori = [0,0,0]
self.ori_err = [0,0,0]
self.t_ori = [0,0,0]
self.ang = [0,0,0]
self.count = 0
# Maximum target distance error that can be returned
self.max_target_distance = max_target_distance
# If noise is being modelled
if noise_std is not None:
self.noise = True
else:
self.noise = False
# Standard Deviation of the noise for the 4 state variables
self.noise_std = noise_std
# Overwrite the get_target method if the target is to be controlled by a
# function instead of by V-REP
if target_func is not None:
self.step = 0
self.target_func = target_func
def get_target():
self.t_pos, self.t_ori = self.target_func( self.step )
self.step += 1
self.get_target = get_target
def stop( self ):
"""
Stops the simulation
"""
err = vrep.simxStopSimulation( self.cid, vrep.simx_opmode_oneshot_wait )
time.sleep(0.01) # Maybe this will prevent V-REP from crashing as often
return hasattr(self, 'failed') # Returns true if this is a failed run
def reset( self ):
err = vrep.simxStopSimulation(self.cid, vrep.simx_opmode_oneshot_wait)
time.sleep(1)
self.pos_err = [0,0,0]
self.ori_err = [0,0,0]
self.lin = [0,0,0]
self.ang = [0,0,0]
err = vrep.simxStartSimulation(self.cid, vrep.simx_opmode_oneshot_wait)
if SYNC:
vrep.simxSynchronous( self.cid, True )
def exit( self ):
self.failed = True
exit(1)
def get_target( self ):
err, self.t_ori = vrep.simxGetObjectOrientation(self.cid, self.target, -1,
vrep_mode )
err, self.t_pos = vrep.simxGetObjectPosition(self.cid, self.target, -1,
vrep_mode )
# Convert orientations to z-y-x convention
self.t_ori = convert_angles(self.t_ori)
def calculate_error( self ):
# Return the state variables
err, self.ori = vrep.simxGetObjectOrientation(self.cid, self.copter, -1,
vrep_mode )
err, self.pos = vrep.simxGetObjectPosition(self.cid, self.copter, -1,
vrep_mode )
err, self.lin, self.ang = vrep.simxGetObjectVelocity(self.cid, self.copter,
vrep_mode )
self.ori = convert_angles(self.ori)
# Apply noise to each measurement if required
#FIXME this is a dumb way to do this, clean it up later
# if self.noise:
# n_pos = np.random.normal(0,self.noise_std[0],3)
# n_lin = np.random.normal(0,self.noise_std[1],3)
# n_ori = np.random.normal(0,self.noise_std[2],3)
# n_ang = np.random.normal(0,self.noise_std[3],3)
# for i in range(3):
# self.pos[i] += n_pos[i]
# self.lin[i] += n_lin[i]
# self.ori[i] += n_ori[i]
# self.ang[i] += n_ang[i]
#TODO: might have to wrap angles here
# Find the error
self.ori_err = [self.t_ori[0] - self.ori[0],
self.t_ori[1] - self.ori[1],
self.t_ori[2] - self.ori[2]]
# print(self.ori_err)
cz = math.cos(self.ori[2])
sz = math.sin(self.ori[2])
x_err = self.t_pos[0] - self.pos[0]
y_err = self.t_pos[1] - self.pos[1]
if not self.ori_mode:
self.pos_err = [ x_err * cz + y_err * sz,
-x_err * sz + y_err * cz,
self.t_pos[2] - self.pos[2]]
else:
self.pos_err = [0,0,
self.t_pos[2] - self.pos[2]]
# print(self.pos_err)
self.lin = [self.lin[0]*cz+self.lin[1]*sz, -self.lin[0]*sz+self.lin[1]*cz, self.lin[2]]
self.ang = [self.ang[0]*cz+self.ang[1]*sz, -self.ang[0]*sz+self.ang[1]*cz, self.ang[2]]
for i in range(3):
if self.ori_err[i] > math.pi:
self.ori_err[i] -= 2 * math.pi
elif self.ori_err[i] < -math.pi:
self.ori_err[i] += 2 * math.pi
def send_motor_commands( self, values ):
# Limit motors by max and min values
motor_values = np.zeros(4)
for i in range(4):
"""
if values[i] > 30:
motor_values[i] = 30
elif values[i] < 0:
motor_values[i] = 0
else:
motor_values[i] = values[i]
"""
motor_values[i] = values[i]
packedData=vrep.simxPackFloats(motor_values.flatten())
raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData)
err = vrep.simxSetStringSignal(self.cid, "rotorTargetVelocities",
raw_bytes,
vrep_mode)
def handle_input( self, values ):
# Send motor commands to V-REP
self.send_motor_commands( values )
# Retrieve target location
self.get_target()
# Calculate state error
self.calculate_error()
def bound( self, value ):
if abs( value ) > self.max_target_distance:
return math.copysign( self.max_target_distance, value )
else:
return value
def get_state( self ):
"""
Returns the current state. Used for recording benchmarks of performance
"""
return [self.pos, self.ori,
self.lin, self.ang,
self.t_pos, self.t_ori]
def handle_output( self ):
l = math.sqrt(self.pos_err[0]**2 + self.pos_err[1]**2)
bl = self.bound(l)
r = (bl+.1)/(l+.1)
return [r*self.pos_err[0], r*self.pos_err[1], self.bound(self.pos_err[2]),
self.lin[0], self.lin[1], self.lin[2],
self.ori_err[0], self.ori_err[1], self.ori_err[2],
self.ang[0], self.ang[1], self.ang[2]]
def __call__( self, t, values ):
""" This class will be callable within a nengo node. It will accept as input
the control signals for each rotor, and will output the relevant state
variables (position, velocity, orientation, angular velocity).
"""
self.count += 1
if self.count == int(round(sim_dt/dt)):
self.count = 0
self.handle_input( values )
if SYNC:
vrep.simxSynchronousTrigger( self.cid )
return self.handle_output()
class FullStateQuadcopter( Quadcopter ):
"""
Returns both egocentric and allocentric information about the state
"""
def handle_output( self ):
l = math.sqrt(self.pos_err[0]**2 + self.pos_err[1]**2)
bl = self.bound(l)
r = (bl+.1)/(l+.1)
return [r*self.pos_err[0], r*self.pos_err[1], self.bound(self.pos_err[2]),
self.lin[0], self.lin[1], self.lin[2],
self.ori_err[0], self.ori_err[1], self.ori_err[2],
self.ang[0], self.ang[1], self.ang[2],
self.pos[0], self.pos[1], self.pos[2],
self.ori[0], self.ori[1], self.ori[2],
]
class FullStateTargetQuadcopter( Quadcopter ):
"""
Returns both egocentric and allocentric information about the state
as well as the state of the target
"""
def handle_output( self ):
l = math.sqrt(self.pos_err[0]**2 + self.pos_err[1]**2)
bl = self.bound(l)
r = (bl+.1)/(l+.1)
return [r*self.pos_err[0], r*self.pos_err[1], self.bound(self.pos_err[2]),
self.lin[0], self.lin[1], self.lin[2],
self.ori_err[0], self.ori_err[1], self.ori_err[2],
self.ang[0], self.ang[1], self.ang[2],
self.pos[0], self.pos[1], self.pos[2],
self.ori[0], self.ori[1], self.ori[2],
self.t_pos[0], self.t_pos[1], self.t_pos[2],
self.t_ori[0], self.t_ori[1], self.t_ori[2],
]
class TargetControlledQuadcopter( Quadcopter ):
"""
The target location is sent as input to this node, rather than reading from a
manually controlled target in V-REP. This class is designed to be used for
hyperopt tuning of gains.
"""
def set_target( self, v ):
self.t_pos = [v[0], v[1], v[2]]
self.t_ori = [0, 0, v[3]]
def handle_input( self, values ):
"""
First four elements of values are the motor commands.
The next four are x,y,z,yaw of the target
"""
self.send_motor_commands( values[:4] )
self.set_target( values[4:] )
self.calculate_error()
def exit( self ):
# When running hyperopt to find gains, should not exit program if one
# trial fails
self.failed = True
class AdaptiveController( object ):
"""
Adaptive controller based on Slotine's methods and physics model from the
python quadcopter simulator
"""
def __init__( self, adaptive=True, dt=0.001, initial_param=None ):
# When false, parameter updating does not occur
self.adaptive = adaptive
self.initialize_parameters( initial_param )
# Gain set
k1 = 0.43352026190263104
k2 = 2.0 * 2
k3 = 0.5388202808181405
k4 = 1.65 * 2
k5 = 2.5995452450850185
k6 = 0.802872750102059 * 2
k7 = 0.5990281657438163
k8 = 2.8897310746350824 * 2
ak1 = 0.026210965785217845
ak2 = 2.0 * 5
ak3 = 0.027614986033826894
ak4 = 1.65 * 5
ak6 = k6
ak8 = k8
self.K = np.matrix([[ 0, 0, k2, 0, 0,-k4, 0, 0, 0, 0, 0, 0],
[ 0, k1, 0, 0,-k3, 0,-k5, 0, 0, k7, 0, 0],
[-k1, 0, 0, k3, 0, 0, 0,-k5, 0, 0, k7, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0,-k6, 0, 0, k8] ])
self.AK = np.matrix([[ 0, 0, ak2, 0, 0,-ak4, 0, 0, 0, 0, 0, 0],
[ 0, ak1, 0, 0,-ak3, 0, 0, 0, 0, 0, 0, 0],
[-ak1, 0, 0, ak3, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, -ak6, 0, 0, ak8] ])
self.task_to_rotor = np.matrix([[ 1,-1, 1, 1],
[ 1,-1,-1,-1],
[ 1, 1,-1, 1],
[ 1, 1, 1,-1] ])
self.control_matrix = self.task_to_rotor * self.K
self.adaptive_matrix = self.task_to_rotor * self.AK
self.error = np.matrix([[0.0], # x
[0.0], # y
[0.0], # z
[0.0], # dx
[0.0], # dy
[0.0], # dz
[0.0], # roll
[0.0], # pitch
[0.0], # yaw
[0.0], # droll
[0.0], # dpitch
[0.0], # dyaw
])
self.learning_rate = 1
self.dt = dt
def initialize_parameters( self, initial_param ):
# Unknown Constant Vector
self.param = np.matrix([[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
[0.0],
])
# If initial parameters are specified, set them now
if initial_param is not None:
for i in range(len(initial_param)):
self.param[i,0] = initial_param[i]
def compute_Y( self ):
"""
Generate the Y matrix
"""
# TODO: this might need to be allocentric, or the equations changed for
# egocentric
c1 = math.cos( self.error[6,0] )
c2 = math.cos( self.error[7,0] )
c3 = math.cos( self.error[8,0] )
s1 = math.sin( self.error[6,0] )
s2 = math.sin( self.error[7,0] )
s3 = math.sin( self.error[8,0] )
at = c1*s2*c3 + s1*s3
bt = c1*s2*s3 - s1*c3
ct = c1*c2
a = at / (at*at + bt*bt + ct*ct)
b = bt / (at*at + bt*bt + ct*ct)
c = ct / (at*at + bt*bt + ct*ct)
ax = a*abs(self.error[3,0])*self.error[3,0]
by = b*abs(self.error[4,0])*self.error[4,0]
cz = c*abs(self.error[5,0])*self.error[5,0]
rxy = self.error[9,0]*self.error[10,0]
rxz = self.error[9,0]*self.error[1,0]
ryz = self.error[10,0]*self.error[11,0]
"""
self.Y = np.matrix([[ax, by, cz, c, 0, -rxz, -rxy],
[ax, by, cz, c, -ryz, 0, rxy],
[ax, by, cz, c, 0, rxz, -rxy],
[ax, by, cz, c, ryz, 0, rxy],
])
"""
# Trying out different orientation of rotor blades
self.Y = np.matrix([[ax, by, cz, c, -ryz, rxz, rxy],
[ax, by, cz, c, -ryz, -rxz, -rxy],
[ax, by, cz, c, ryz, -rxz, rxy],
[ax, by, cz, c, ryz, rxz, -rxy],
])
def compute_rotor_velocities( self ):
"""
Generate the four rotor velocities to control the quadcopter
"""
self.compute_Y()
# Calculate rotor velocities
w = self.Y * self.param +\
self.control_matrix * self.error
#self.adaptive_matrix * self.error
if self.adaptive:
dparam = self.learning_rate *\
self.Y.T *\
( self.control_matrix * self.error ) *\
self.dt
#( self.adaptive_matrix * self.error ) *\
#self.dt
# Update the parameter estimates
self.param += dparam
return [ w[0,0], w[1,0], w[2,0], w[3,0] ]
def __call__( self, t, values ):
""" This class will be callable within a nengo node. It will accept as input
the 12D state error and will output desired rotor velocities
"""
# Insert state into error matrix
for i in range(len(values)):
self.error[i,0] = values[i]
# Compute desired rotor velocities
return self.compute_rotor_velocities()
|
[
"vrep.simxGetObjectVelocity",
"vrep.simxSynchronousTrigger",
"math.copysign",
"vrep.simxStart",
"vrep.simxSynchronous",
"vrep.simxGetObjectHandle",
"vrep.simxSetStringSignal",
"math.cos",
"vrep.simxGetObjectPosition",
"vrep.simxStopSimulation",
"math.sqrt",
"vrep.simxFinish",
"math.sin",
"time.sleep",
"numpy.matrix",
"vrep.simxGetObjectOrientation",
"numpy.zeros",
"vrep.simxPackFloats",
"vrep.simxStartSimulation"
] |
[((396, 412), 'math.sin', 'math.sin', (['ang[0]'], {}), '(ang[0])\n', (404, 412), False, 'import math\n'), ((420, 436), 'math.sin', 'math.sin', (['ang[1]'], {}), '(ang[1])\n', (428, 436), False, 'import math\n'), ((444, 460), 'math.sin', 'math.sin', (['ang[2]'], {}), '(ang[2])\n', (452, 460), False, 'import math\n'), ((468, 484), 'math.cos', 'math.cos', (['ang[0]'], {}), '(ang[0])\n', (476, 484), False, 'import math\n'), ((492, 508), 'math.cos', 'math.cos', (['ang[1]'], {}), '(ang[1])\n', (500, 508), False, 'import math\n'), ((516, 532), 'math.cos', 'math.cos', (['ang[2]'], {}), '(ang[2])\n', (524, 532), False, 'import math\n'), ((584, 599), 'math.cos', 'math.cos', (['pitch'], {}), '(pitch)\n', (592, 599), False, 'import math\n'), ((249, 272), 'math.copysign', 'math.copysign', (['(1.0)', 'num'], {}), '(1.0, num)\n', (262, 272), False, 'import math\n'), ((2124, 2215), 'vrep.simxGetObjectHandle', 'vrep.simxGetObjectHandle', (['self.cid', '"""Quadricopter_base"""', 'vrep.simx_opmode_oneshot_wait'], {}), "(self.cid, 'Quadricopter_base', vrep.\n simx_opmode_oneshot_wait)\n", (2148, 2215), False, 'import vrep\n'), ((2287, 2380), 'vrep.simxGetObjectHandle', 'vrep.simxGetObjectHandle', (['self.cid', '"""Quadricopter_target"""', 'vrep.simx_opmode_oneshot_wait'], {}), "(self.cid, 'Quadricopter_target', vrep.\n simx_opmode_oneshot_wait)\n", (2311, 2380), False, 'import vrep\n'), ((2488, 2521), 'vrep.simxPackFloats', 'vrep.simxPackFloats', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2507, 2521), False, 'import vrep\n'), ((2619, 2704), 'vrep.simxSetStringSignal', 'vrep.simxSetStringSignal', (['self.cid', '"""rotorTargetVelocities"""', 'raw_bytes', 'vrep_mode'], {}), "(self.cid, 'rotorTargetVelocities', raw_bytes,\n vrep_mode)\n", (2643, 2704), False, 'import vrep\n'), ((3894, 3958), 'vrep.simxStopSimulation', 'vrep.simxStopSimulation', (['self.cid', 'vrep.simx_opmode_oneshot_wait'], {}), '(self.cid, vrep.simx_opmode_oneshot_wait)\n', (3917, 3958), False, 'import vrep\n'), ((3969, 3985), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3979, 3985), False, 'import time\n'), ((4158, 4222), 'vrep.simxStopSimulation', 'vrep.simxStopSimulation', (['self.cid', 'vrep.simx_opmode_oneshot_wait'], {}), '(self.cid, vrep.simx_opmode_oneshot_wait)\n', (4181, 4222), False, 'import vrep\n'), ((4231, 4244), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4241, 4244), False, 'import time\n'), ((4375, 4440), 'vrep.simxStartSimulation', 'vrep.simxStartSimulation', (['self.cid', 'vrep.simx_opmode_oneshot_wait'], {}), '(self.cid, vrep.simx_opmode_oneshot_wait)\n', (4399, 4440), False, 'import vrep\n'), ((4632, 4699), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.cid', 'self.target', '(-1)', 'vrep_mode'], {}), '(self.cid, self.target, -1, vrep_mode)\n', (4661, 4699), False, 'import vrep\n'), ((4779, 4843), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.cid', 'self.target', '(-1)', 'vrep_mode'], {}), '(self.cid, self.target, -1, vrep_mode)\n', (4805, 4843), False, 'import vrep\n'), ((5096, 5163), 'vrep.simxGetObjectOrientation', 'vrep.simxGetObjectOrientation', (['self.cid', 'self.copter', '(-1)', 'vrep_mode'], {}), '(self.cid, self.copter, -1, vrep_mode)\n', (5125, 5163), False, 'import vrep\n'), ((5237, 5301), 'vrep.simxGetObjectPosition', 'vrep.simxGetObjectPosition', (['self.cid', 'self.copter', '(-1)', 'vrep_mode'], {}), '(self.cid, self.copter, -1, vrep_mode)\n', (5263, 5301), False, 'import vrep\n'), ((5381, 5441), 'vrep.simxGetObjectVelocity', 'vrep.simxGetObjectVelocity', (['self.cid', 'self.copter', 'vrep_mode'], {}), '(self.cid, self.copter, vrep_mode)\n', (5407, 5441), False, 'import vrep\n'), ((6416, 6437), 'math.cos', 'math.cos', (['self.ori[2]'], {}), '(self.ori[2])\n', (6424, 6437), False, 'import math\n'), ((6451, 6472), 'math.sin', 'math.sin', (['self.ori[2]'], {}), '(self.ori[2])\n', (6459, 6472), False, 'import math\n'), ((7396, 7407), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (7404, 7407), True, 'import numpy as np\n'), ((7843, 7928), 'vrep.simxSetStringSignal', 'vrep.simxSetStringSignal', (['self.cid', '"""rotorTargetVelocities"""', 'raw_bytes', 'vrep_mode'], {}), "(self.cid, 'rotorTargetVelocities', raw_bytes,\n vrep_mode)\n", (7867, 7928), False, 'import vrep\n'), ((8741, 8795), 'math.sqrt', 'math.sqrt', (['(self.pos_err[0] ** 2 + self.pos_err[1] ** 2)'], {}), '(self.pos_err[0] ** 2 + self.pos_err[1] ** 2)\n', (8750, 8795), False, 'import math\n'), ((9821, 9875), 'math.sqrt', 'math.sqrt', (['(self.pos_err[0] ** 2 + self.pos_err[1] ** 2)'], {}), '(self.pos_err[0] ** 2 + self.pos_err[1] ** 2)\n', (9830, 9875), False, 'import math\n'), ((10536, 10590), 'math.sqrt', 'math.sqrt', (['(self.pos_err[0] ** 2 + self.pos_err[1] ** 2)'], {}), '(self.pos_err[0] ** 2 + self.pos_err[1] ** 2)\n', (10545, 10590), False, 'import math\n'), ((12650, 12840), 'numpy.matrix', 'np.matrix', (['[[0, 0, k2, 0, 0, -k4, 0, 0, 0, 0, 0, 0], [0, k1, 0, 0, -k3, 0, -k5, 0, 0,\n k7, 0, 0], [-k1, 0, 0, k3, 0, 0, 0, -k5, 0, 0, k7, 0], [0, 0, 0, 0, 0, \n 0, 0, 0, -k6, 0, 0, k8]]'], {}), '([[0, 0, k2, 0, 0, -k4, 0, 0, 0, 0, 0, 0], [0, k1, 0, 0, -k3, 0, -\n k5, 0, 0, k7, 0, 0], [-k1, 0, 0, k3, 0, 0, 0, -k5, 0, 0, k7, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, -k6, 0, 0, k8]])\n', (12659, 12840), True, 'import numpy as np\n'), ((12954, 13146), 'numpy.matrix', 'np.matrix', (['[[0, 0, ak2, 0, 0, -ak4, 0, 0, 0, 0, 0, 0], [0, ak1, 0, 0, -ak3, 0, 0, 0, 0,\n 0, 0, 0], [-ak1, 0, 0, ak3, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, -ak6, 0, 0, ak8]]'], {}), '([[0, 0, ak2, 0, 0, -ak4, 0, 0, 0, 0, 0, 0], [0, ak1, 0, 0, -ak3, \n 0, 0, 0, 0, 0, 0, 0], [-ak1, 0, 0, ak3, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, -ak6, 0, 0, ak8]])\n', (12963, 13146), True, 'import numpy as np\n'), ((13280, 13353), 'numpy.matrix', 'np.matrix', (['[[1, -1, 1, 1], [1, -1, -1, -1], [1, 1, -1, 1], [1, 1, 1, -1]]'], {}), '([[1, -1, 1, 1], [1, -1, -1, -1], [1, 1, -1, 1], [1, 1, 1, -1]])\n', (13289, 13353), True, 'import numpy as np\n'), ((13594, 13694), 'numpy.matrix', 'np.matrix', (['[[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0\n ], [0.0]]'], {}), '([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [\n 0.0], [0.0], [0.0]])\n', (13603, 13694), True, 'import numpy as np\n'), ((14253, 14313), 'numpy.matrix', 'np.matrix', (['[[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]])\n', (14262, 14313), True, 'import numpy as np\n'), ((14861, 14887), 'math.cos', 'math.cos', (['self.error[6, 0]'], {}), '(self.error[6, 0])\n', (14869, 14887), False, 'import math\n'), ((14898, 14924), 'math.cos', 'math.cos', (['self.error[7, 0]'], {}), '(self.error[7, 0])\n', (14906, 14924), False, 'import math\n'), ((14935, 14961), 'math.cos', 'math.cos', (['self.error[8, 0]'], {}), '(self.error[8, 0])\n', (14943, 14961), False, 'import math\n'), ((14972, 14998), 'math.sin', 'math.sin', (['self.error[6, 0]'], {}), '(self.error[6, 0])\n', (14980, 14998), False, 'import math\n'), ((15009, 15035), 'math.sin', 'math.sin', (['self.error[7, 0]'], {}), '(self.error[7, 0])\n', (15017, 15035), False, 'import math\n'), ((15046, 15072), 'math.sin', 'math.sin', (['self.error[8, 0]'], {}), '(self.error[8, 0])\n', (15054, 15072), False, 'import math\n'), ((15860, 16010), 'numpy.matrix', 'np.matrix', (['[[ax, by, cz, c, -ryz, rxz, rxy], [ax, by, cz, c, -ryz, -rxz, -rxy], [ax,\n by, cz, c, ryz, -rxz, rxy], [ax, by, cz, c, ryz, rxz, -rxy]]'], {}), '([[ax, by, cz, c, -ryz, rxz, rxy], [ax, by, cz, c, -ryz, -rxz, -\n rxy], [ax, by, cz, c, ryz, -rxz, rxy], [ax, by, cz, c, ryz, rxz, -rxy]])\n', (15869, 16010), True, 'import numpy as np\n'), ((1547, 1566), 'vrep.simxFinish', 'vrep.simxFinish', (['(-1)'], {}), '(-1)\n', (1562, 1566), False, 'import vrep\n'), ((1635, 1690), 'vrep.simxStart', 'vrep.simxStart', (['"""127.0.0.1"""', '(19997)', '(True)', '(True)', '(5000)', '(5)'], {}), "('127.0.0.1', 19997, True, True, 5000, 5)\n", (1649, 1690), False, 'import vrep\n'), ((1852, 1912), 'vrep.simxStartSimulation', 'vrep.simxStartSimulation', (['self.cid', 'vrep.simx_opmode_oneshot'], {}), '(self.cid, vrep.simx_opmode_oneshot)\n', (1876, 1912), False, 'import vrep\n'), ((4468, 4504), 'vrep.simxSynchronous', 'vrep.simxSynchronous', (['self.cid', '(True)'], {}), '(self.cid, True)\n', (4488, 4504), False, 'import vrep\n'), ((8365, 8411), 'math.copysign', 'math.copysign', (['self.max_target_distance', 'value'], {}), '(self.max_target_distance, value)\n', (8378, 8411), False, 'import math\n'), ((1952, 1988), 'vrep.simxSynchronous', 'vrep.simxSynchronous', (['self.cid', '(True)'], {}), '(self.cid, True)\n', (1972, 1988), False, 'import vrep\n'), ((9572, 9609), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.cid'], {}), '(self.cid)\n', (9599, 9609), False, 'import vrep\n')]
|
from typing import Any
import pytest
from pytestqt.qtbot import QtBot
from qtpy.QtCore import Signal, QObject
import numpy as np
from pydm.application import PyDMApplication
from pydm.data_plugins.calc_plugin import epics_string, epics_unsigned
from pydm.widgets.channel import PyDMChannel
@pytest.mark.parametrize(
"input_string,expected",
[
(np.array((0x6f, 0x6b, 0x61, 0x79, 0, 42), dtype=np.int8), "okay"),
(np.array((0x6f, 0x6b, 0x61, 0x79), dtype=np.int8), "okay"),
(np.array((0, 0x6f, 0x6b, 0x61, 0x79, 0, 42, 42), dtype=np.int8), ""),
],
)
def test_epics_string(input_string: str, expected: str):
assert epics_string(input_string) == expected
@pytest.mark.parametrize(
"input_int,bits,expected",
[
(100, 32, 100),
(-1, 8, 255),
(-2, 4, 0b1110),
],
)
def test_epics_unsigned(input_int: int, bits: int, expected: int):
assert epics_unsigned(input_int, bits) == expected
@pytest.mark.parametrize(
"calc,input1,expected1,input2,expected2",
[
('val + 3', 0, 3, 1, 4),
('int(np.abs(val))', -5, 5, -10, 10),
('math.floor(val)', 3.4, 3, 5.7, 5),
('epics_string(val)',
np.array((0x61, 0), dtype=np.int8), 'a',
np.array((0x62, 0), dtype=np.int8), 'b'),
('epics_unsigned(val, 8)', -1, 255, -2, 254),
]
)
def test_calc_plugin(
qapp: PyDMApplication,
qtbot: QtBot,
calc: str,
input1: Any,
expected1: Any,
input2: Any,
expected2: Any,
):
class SigHolder(QObject):
sig = Signal(type(input1))
sig_holder = SigHolder()
type_str = str(type(input1))
local_addr = f'loc://test_calc_plugin_local_{calc}'
local_ch = PyDMChannel(
address=f'{local_addr}?type={type_str}&init={input1}',
value_signal=sig_holder.sig,
)
local_ch.connect()
calc_values = []
def new_calc_value(val: Any):
calc_values.append(val)
calc_addr = f'calc://test_calc_plugin_calc_{calc}'
calc_ch = PyDMChannel(
address=f'{calc_addr}?val={local_addr}&expr={calc}',
value_slot=new_calc_value,
)
calc_ch.connect()
sig_holder.sig.emit(input1)
def has_value():
assert len(calc_values) >= 1
qtbot.wait_until(has_value)
assert calc_values[0] == expected1
calc_values.clear()
sig_holder.sig.emit(input2)
qtbot.wait_until(has_value)
assert calc_values[0] == expected2
|
[
"pydm.data_plugins.calc_plugin.epics_string",
"pydm.widgets.channel.PyDMChannel",
"pydm.data_plugins.calc_plugin.epics_unsigned",
"numpy.array",
"pytest.mark.parametrize"
] |
[((698, 797), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_int,bits,expected"""', '[(100, 32, 100), (-1, 8, 255), (-2, 4, 14)]'], {}), "('input_int,bits,expected', [(100, 32, 100), (-1, 8,\n 255), (-2, 4, 14)])\n", (721, 797), False, 'import pytest\n'), ((1717, 1816), 'pydm.widgets.channel.PyDMChannel', 'PyDMChannel', ([], {'address': 'f"""{local_addr}?type={type_str}&init={input1}"""', 'value_signal': 'sig_holder.sig'}), "(address=f'{local_addr}?type={type_str}&init={input1}',\n value_signal=sig_holder.sig)\n", (1728, 1816), False, 'from pydm.widgets.channel import PyDMChannel\n'), ((2017, 2113), 'pydm.widgets.channel.PyDMChannel', 'PyDMChannel', ([], {'address': 'f"""{calc_addr}?val={local_addr}&expr={calc}"""', 'value_slot': 'new_calc_value'}), "(address=f'{calc_addr}?val={local_addr}&expr={calc}', value_slot\n =new_calc_value)\n", (2028, 2113), False, 'from pydm.widgets.channel import PyDMChannel\n'), ((656, 682), 'pydm.data_plugins.calc_plugin.epics_string', 'epics_string', (['input_string'], {}), '(input_string)\n', (668, 682), False, 'from pydm.data_plugins.calc_plugin import epics_string, epics_unsigned\n'), ((918, 949), 'pydm.data_plugins.calc_plugin.epics_unsigned', 'epics_unsigned', (['input_int', 'bits'], {}), '(input_int, bits)\n', (932, 949), False, 'from pydm.data_plugins.calc_plugin import epics_string, epics_unsigned\n'), ((364, 415), 'numpy.array', 'np.array', (['(111, 107, 97, 121, 0, 42)'], {'dtype': 'np.int8'}), '((111, 107, 97, 121, 0, 42), dtype=np.int8)\n', (372, 415), True, 'import numpy as np\n'), ((440, 484), 'numpy.array', 'np.array', (['(111, 107, 97, 121)'], {'dtype': 'np.int8'}), '((111, 107, 97, 121), dtype=np.int8)\n', (448, 484), True, 'import numpy as np\n'), ((509, 567), 'numpy.array', 'np.array', (['(0, 111, 107, 97, 121, 0, 42, 42)'], {'dtype': 'np.int8'}), '((0, 111, 107, 97, 121, 0, 42, 42), dtype=np.int8)\n', (517, 567), True, 'import numpy as np\n'), ((1205, 1237), 'numpy.array', 'np.array', (['(97, 0)'], {'dtype': 'np.int8'}), '((97, 0), dtype=np.int8)\n', (1213, 1237), True, 'import numpy as np\n'), ((1255, 1287), 'numpy.array', 'np.array', (['(98, 0)'], {'dtype': 'np.int8'}), '((98, 0), dtype=np.int8)\n', (1263, 1287), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
def main():
path = "C:\\Users\\enesa\\Documents\\MATLAB\\blobs_objects.jpg"
img = cv2.imread(path, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
filter1 = np.array(([0, -1, 0], [-1, 5, -1], [0, -1, 0]), np.float32) #Sharpening Filter
output = cv2.filter2D(img, -1, filter1) #convolution filter
blur = cv2.GaussianBlur(img,(5,5),0)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,170,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
###########################################################################################################################
# Create a simple filter. The kernel slides through the image (as in 2D convolution).
kernel = np.ones((3, 3), np.uint8)
# Create a Rectangular Structuring Element
se1 = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
# Create a Elliptical Structuring Element
se2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
# Apply Erosion method over the image with kernel
erosion = cv2.erode(thresh,se1,iterations = 1)
# Apply Dilation method over the image with kernel
dilation = cv2.dilate(thresh,se2,iterations = 1)
# Noise removal using Morphological closing operation
closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel, iterations = 4)
# Noise removal using Morphological opening operation
opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel, iterations = 1)
###########################################################################################################################
dilation = 255 - dilation # Complementing Operation
_, contours, _ = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("{} Objects have detected!".format(len(contours)))
original = cv2.imread(path, 1)
original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)
sayac = 0
for i in contours:
# perimeter = cv2.arcLength(i,True)
# if perimeter > 20:
sayac = sayac +1
#cv2.drawContours(img, contours, -1, (0, 0, 255), 2)
x,y,w,h = cv2.boundingRect(i)
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(img, str(sayac), (x+10, y+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
#plt.plot(cx, cy, color='red', marker='o', linestyle='dashed', linewidth=2, markersize=12) # belilenen noktaya x isareti koy.
#cv2.putText(img, 'x', (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
#cv2.putText(closing, str(sayac), (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 255), 1)
print("{} Objects have drown!".format(sayac))
###########################################################################################################################
# output = [original, img]
# titles = ['Original', 'Contours']
# for i in range(2):
# plt.subplot(1, 2, i+1)
# plt.imshow(output[i])
# plt.title(titles[i])
# plt.xticks([])
# plt.yticks([])
cv2.imshow('Orignal Image', img)
#cv2.imshow('Erosion Image', erosion)
cv2.imshow('Dilation Image', dilation)
cv2.imshow('Closing Image', closing)
cv2.imshow('Opening Image', opening)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
[
"cv2.GaussianBlur",
"cv2.boundingRect",
"matplotlib.pyplot.show",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.getStructuringElement",
"cv2.threshold",
"cv2.morphologyEx",
"cv2.waitKey",
"numpy.ones",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.array",
"cv2.rectangle",
"cv2.erode",
"cv2.imshow",
"cv2.findContours"
] |
[((158, 177), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (168, 177), False, 'import cv2\n'), ((186, 222), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (198, 222), False, 'import cv2\n'), ((238, 297), 'numpy.array', 'np.array', (['([0, -1, 0], [-1, 5, -1], [0, -1, 0])', 'np.float32'], {}), '(([0, -1, 0], [-1, 5, -1], [0, -1, 0]), np.float32)\n', (246, 297), True, 'import numpy as np\n'), ((328, 358), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'filter1'], {}), '(img, -1, filter1)\n', (340, 358), False, 'import cv2\n'), ((390, 422), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (406, 422), False, 'import cv2\n'), ((431, 469), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2GRAY'], {}), '(blur, cv2.COLOR_BGR2GRAY)\n', (443, 469), False, 'import cv2\n'), ((487, 553), 'cv2.threshold', 'cv2.threshold', (['gray', '(170)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(gray, 170, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (500, 553), False, 'import cv2\n'), ((777, 802), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (784, 802), True, 'import numpy as np\n'), ((859, 908), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (884, 908), False, 'import cv2\n'), ((961, 1013), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(5, 5)'], {}), '(cv2.MORPH_ELLIPSE, (5, 5))\n', (986, 1013), False, 'import cv2\n'), ((1081, 1117), 'cv2.erode', 'cv2.erode', (['thresh', 'se1'], {'iterations': '(1)'}), '(thresh, se1, iterations=1)\n', (1090, 1117), False, 'import cv2\n'), ((1186, 1223), 'cv2.dilate', 'cv2.dilate', (['thresh', 'se2'], {'iterations': '(1)'}), '(thresh, se2, iterations=1)\n', (1196, 1223), False, 'import cv2\n'), ((1294, 1359), 'cv2.morphologyEx', 'cv2.morphologyEx', (['dilation', 'cv2.MORPH_CLOSE', 'kernel'], {'iterations': '(4)'}), '(dilation, cv2.MORPH_CLOSE, kernel, iterations=4)\n', (1310, 1359), False, 'import cv2\n'), ((1433, 1496), 'cv2.morphologyEx', 'cv2.morphologyEx', (['erosion', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(1)'}), '(erosion, cv2.MORPH_OPEN, kernel, iterations=1)\n', (1449, 1496), False, 'import cv2\n'), ((1705, 1771), 'cv2.findContours', 'cv2.findContours', (['dilation', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1721, 1771), False, 'import cv2\n'), ((1850, 1869), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (1860, 1869), False, 'import cv2\n'), ((1886, 1927), 'cv2.cvtColor', 'cv2.cvtColor', (['original', 'cv2.COLOR_BGR2RGB'], {}), '(original, cv2.COLOR_BGR2RGB)\n', (1898, 1927), False, 'import cv2\n'), ((2989, 3021), 'cv2.imshow', 'cv2.imshow', (['"""Orignal Image"""', 'img'], {}), "('Orignal Image', img)\n", (2999, 3021), False, 'import cv2\n'), ((3065, 3103), 'cv2.imshow', 'cv2.imshow', (['"""Dilation Image"""', 'dilation'], {}), "('Dilation Image', dilation)\n", (3075, 3103), False, 'import cv2\n'), ((3107, 3143), 'cv2.imshow', 'cv2.imshow', (['"""Closing Image"""', 'closing'], {}), "('Closing Image', closing)\n", (3117, 3143), False, 'import cv2\n'), ((3147, 3183), 'cv2.imshow', 'cv2.imshow', (['"""Opening Image"""', 'opening'], {}), "('Opening Image', opening)\n", (3157, 3183), False, 'import cv2\n'), ((3191, 3201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3199, 3201), True, 'import matplotlib.pyplot as plt\n'), ((3205, 3219), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3216, 3219), False, 'import cv2\n'), ((3222, 3245), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3243, 3245), False, 'import cv2\n'), ((2116, 2135), 'cv2.boundingRect', 'cv2.boundingRect', (['i'], {}), '(i)\n', (2132, 2135), False, 'import cv2\n'), ((2139, 2197), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (2152, 2197), False, 'import cv2\n')]
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line script to train expert policies.
Picks best seed of train_rl for each (environment, reward) pair specified.
"""
import math
import os
from typing import Any, Mapping, Optional
from imitation.util import util
import numpy as np
import ray
import sacred
import tabulate
from evaluating_rewards import serialize
from evaluating_rewards.experiments import env_rewards
from evaluating_rewards.scripts import rl_common, script_utils
experts_ex = sacred.Experiment("train_experts")
rl_common.make_config(experts_ex)
@experts_ex.config
def default_config():
"""Default configuration."""
log_root = serialize.get_output_dir() # where results are written to
configs = {}
run_tag = "default"
_ = locals()
del _
@experts_ex.config
def default_env_rewards(configs):
"""Set default env-reward pair in `configs` entry if it is empty.
This is needed since if we were to define it in `default_config` it would be impossible
to delete it given how Sacred dictionary merging works.
"""
if not configs:
configs = { # noqa: F401
"evaluating_rewards/PointMassLine-v0": {
"evaluating_rewards/PointMassGroundTruth-v0": {"dummy": {}}
},
}
@experts_ex.config
def logging_config(log_root, run_tag):
"""Logging configuration: timestamp plus unique UUID."""
log_dir = os.path.join(log_root, "train_experts", run_tag, util.make_unique_timestamp())
_ = locals()
del _
def _make_ground_truth_configs():
"""Ground truth configs.
Separate function to avoid polluting Sacred ConfigScope with local variables."""
configs = {}
for env, gt_reward in env_rewards.GROUND_TRUTH_REWARDS_BY_ENV.items():
cfg = rl_common.CONFIG_BY_ENV.get(env, {})
configs.setdefault(env, {}).setdefault(str(gt_reward), {})["dummy"] = cfg
return configs
@experts_ex.named_config
def ground_truth():
"""Train RL expert on all configured environments with the ground-truth reward."""
configs = _make_ground_truth_configs()
run_tag = "ground_truth"
_ = locals()
del _
@experts_ex.named_config
def point_maze_wrong_target():
"""Train RL policies on a "wrong" reward in PointMaze to get a bad visitation distribution."""
configs = {
env: {
"evaluating_rewards/PointMazeWrongTargetWithCtrl-v0": {
"dummy": dict(rl_common.CONFIG_BY_ENV[env])
}
}
for env in ("imitation/PointMazeLeftVel-v0", "imitation/PointMazeRightVel-v0")
}
run_tag = "point_maze_wrong_target"
_ = locals()
del _
@experts_ex.named_config
def test():
"""Unit test config."""
locals().update(**rl_common.FAST_CONFIG)
configs = {
"evaluating_rewards/PointMassLine-v0": {
"evaluating_rewards/PointMassGroundTruth-v0": {"dummy": {}},
}
}
run_tag = "test"
_ = locals()
del _
def _filter_key(k: str) -> Optional[str]:
"""Returns None if key k should be omitted; otherwise returns the (possibly modified) key."""
if k.startswith("return_"):
return None
elif k.endswith("_max") or k.endswith("_min"):
return None
else:
k = k.replace("monitor_return", "mr")
k = k.replace("wrapped_return", "wr")
return k
def tabulate_stats(stats: rl_common.Stats) -> str:
"""Pretty-prints the statistics in `stats` in a table."""
res = []
for (env_name, (reward_type, reward_path)), vs in stats.items():
for seed, (x, _log_dir) in enumerate(vs):
row = {
"env_name": env_name,
"reward_type": reward_type,
"reward_path": reward_path,
"seed": seed,
}
row.update(x)
filtered_row = {}
for k, v in row.items():
if k.endswith("_std"):
k = k[:-4] + "_se"
v = v / math.sqrt(row["n_traj"])
new_k = _filter_key(k)
if new_k is not None:
filtered_row[new_k] = v
res.append(filtered_row)
return tabulate.tabulate(res, headers="keys")
def select_best(stats: rl_common.Stats, log_dir: str) -> None:
"""Pick the best seed for each environment-reward pair in `stats`.
Concretely, chooses the seed with highest mean return, and:
- Adds a symlink `best` in the same directory as the seeds;
- Adds a key "best" that is `True` for the winning seed and `False` otherwise.
Note this modifies `stats` in-place.
For experiments where `reward_type` is not `None` (i.e. we are using a wrapped reward),
uses `wrapped_return_mean` for selection. Otherwise, uses `monitor_return_mean` (the
environment ground-truth return).
Args:
stats: The statistics to select the best seed from. Note this is modified in-place.
log_dir: The log directory for this experiment.
"""
for key, single_stats in stats.items():
env_name, (reward_type, reward_path) = key
return_key = "wrapped_return_mean" if reward_type else "monitor_return_mean"
threshold = env_rewards.THRESHOLDS.get(key, -np.inf)
returns = [x[return_key] for x, _log in single_stats]
best_seed = np.argmax(returns)
base_dir = os.path.join(
log_dir,
script_utils.sanitize_path(env_name),
script_utils.sanitize_path(reward_type),
script_utils.sanitize_path(reward_path),
)
# make symlink relative so it'll work even if directory structure is copied/moved
os.symlink(str(best_seed), os.path.join(base_dir, "best"))
for v, _log in single_stats:
v["pass"] = v[return_key] > threshold
v["best"] = False
best_v, _best_log = single_stats[best_seed]
best_v["best"] = True
if not best_v["pass"]:
print(
f"WARNING: ({env_name}, {reward_type}, {reward_path}) did not meet threshold: "
f"{best_v[return_key]} < {threshold}"
)
@experts_ex.main
def train_experts(
ray_kwargs: Mapping[str, Any],
num_cpus_fudge_factor: float,
global_configs: Mapping[str, Any],
configs: Mapping[str, Mapping[str, Mapping[str, Any]]],
log_dir: str,
) -> rl_common.Stats:
"""Entry-point into script to train expert policies specified by config.
Args:
ray_kwargs: arguments passed to `ray.init`.
num_cpus_fudge_factor: factor by which to scale `num_vec` to compute CPU requirements.
global_configs: configuration to apply to all environment-reward pairs.
configs: configuration for each environment-reward pair.
log_dir: the root directory to log experiments to.
Returns:
Statistics `stats` for all policies, where
`stats[(env_name, (reward_type, reward_path))][i]`
are the statistics for seed `i` of the given environment and reward pair.
"""
ray.init(**ray_kwargs)
try:
stats = rl_common.parallel_training(global_configs, configs, num_cpus_fudge_factor, log_dir)
select_best(stats, log_dir)
finally:
ray.shutdown()
print(tabulate_stats(stats))
return stats
if __name__ == "__main__":
script_utils.experiment_main(experts_ex, "train_experts")
|
[
"imitation.util.util.make_unique_timestamp",
"ray.init",
"evaluating_rewards.scripts.script_utils.sanitize_path",
"evaluating_rewards.scripts.rl_common.parallel_training",
"math.sqrt",
"numpy.argmax",
"evaluating_rewards.scripts.rl_common.make_config",
"evaluating_rewards.scripts.script_utils.experiment_main",
"evaluating_rewards.serialize.get_output_dir",
"evaluating_rewards.scripts.rl_common.CONFIG_BY_ENV.get",
"tabulate.tabulate",
"ray.shutdown",
"sacred.Experiment",
"evaluating_rewards.experiments.env_rewards.THRESHOLDS.get",
"os.path.join",
"evaluating_rewards.experiments.env_rewards.GROUND_TRUTH_REWARDS_BY_ENV.items"
] |
[((1039, 1073), 'sacred.Experiment', 'sacred.Experiment', (['"""train_experts"""'], {}), "('train_experts')\n", (1056, 1073), False, 'import sacred\n'), ((1074, 1107), 'evaluating_rewards.scripts.rl_common.make_config', 'rl_common.make_config', (['experts_ex'], {}), '(experts_ex)\n', (1095, 1107), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((1199, 1225), 'evaluating_rewards.serialize.get_output_dir', 'serialize.get_output_dir', ([], {}), '()\n', (1223, 1225), False, 'from evaluating_rewards import serialize\n'), ((2255, 2302), 'evaluating_rewards.experiments.env_rewards.GROUND_TRUTH_REWARDS_BY_ENV.items', 'env_rewards.GROUND_TRUTH_REWARDS_BY_ENV.items', ([], {}), '()\n', (2300, 2302), False, 'from evaluating_rewards.experiments import env_rewards\n'), ((4719, 4757), 'tabulate.tabulate', 'tabulate.tabulate', (['res'], {'headers': '"""keys"""'}), "(res, headers='keys')\n", (4736, 4757), False, 'import tabulate\n'), ((7586, 7608), 'ray.init', 'ray.init', ([], {}), '(**ray_kwargs)\n', (7594, 7608), False, 'import ray\n'), ((7877, 7934), 'evaluating_rewards.scripts.script_utils.experiment_main', 'script_utils.experiment_main', (['experts_ex', '"""train_experts"""'], {}), "(experts_ex, 'train_experts')\n", (7905, 7934), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((2004, 2032), 'imitation.util.util.make_unique_timestamp', 'util.make_unique_timestamp', ([], {}), '()\n', (2030, 2032), False, 'from imitation.util import util\n'), ((2318, 2354), 'evaluating_rewards.scripts.rl_common.CONFIG_BY_ENV.get', 'rl_common.CONFIG_BY_ENV.get', (['env', '{}'], {}), '(env, {})\n', (2345, 2354), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((5743, 5783), 'evaluating_rewards.experiments.env_rewards.THRESHOLDS.get', 'env_rewards.THRESHOLDS.get', (['key', '(-np.inf)'], {}), '(key, -np.inf)\n', (5769, 5783), False, 'from evaluating_rewards.experiments import env_rewards\n'), ((5867, 5885), 'numpy.argmax', 'np.argmax', (['returns'], {}), '(returns)\n', (5876, 5885), True, 'import numpy as np\n'), ((7635, 7723), 'evaluating_rewards.scripts.rl_common.parallel_training', 'rl_common.parallel_training', (['global_configs', 'configs', 'num_cpus_fudge_factor', 'log_dir'], {}), '(global_configs, configs, num_cpus_fudge_factor,\n log_dir)\n', (7662, 7723), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((7777, 7791), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (7789, 7791), False, 'import ray\n'), ((5952, 5988), 'evaluating_rewards.scripts.script_utils.sanitize_path', 'script_utils.sanitize_path', (['env_name'], {}), '(env_name)\n', (5978, 5988), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((6002, 6041), 'evaluating_rewards.scripts.script_utils.sanitize_path', 'script_utils.sanitize_path', (['reward_type'], {}), '(reward_type)\n', (6028, 6041), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((6055, 6094), 'evaluating_rewards.scripts.script_utils.sanitize_path', 'script_utils.sanitize_path', (['reward_path'], {}), '(reward_path)\n', (6081, 6094), False, 'from evaluating_rewards.scripts import rl_common, script_utils\n'), ((6231, 6261), 'os.path.join', 'os.path.join', (['base_dir', '"""best"""'], {}), "(base_dir, 'best')\n", (6243, 6261), False, 'import os\n'), ((4524, 4548), 'math.sqrt', 'math.sqrt', (["row['n_traj']"], {}), "(row['n_traj'])\n", (4533, 4548), False, 'import math\n')]
|
import numpy as np
from sklearn.preprocessing import FunctionTransformer
from ..wrappers import wrap
def linearize(X):
X = np.asarray(X)
return np.reshape(X, (X.shape[0], -1))
class Linearize(FunctionTransformer):
"""Extracts features by simply concatenating all elements of the data into
one long vector."""
def __init__(self, **kwargs):
super().__init__(func=linearize, **kwargs)
def SampleLinearize(**kwargs):
return wrap([Linearize, "sample"], **kwargs)
def CheckpointSampleLinearize(**kwargs):
return wrap([Linearize, "sample", "checkpoint"], **kwargs)
|
[
"numpy.asarray",
"numpy.reshape"
] |
[((131, 144), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (141, 144), True, 'import numpy as np\n'), ((156, 187), 'numpy.reshape', 'np.reshape', (['X', '(X.shape[0], -1)'], {}), '(X, (X.shape[0], -1))\n', (166, 187), True, 'import numpy as np\n')]
|
import matplotlib
matplotlib.use('Agg')
#matplotlib.use("gtk")
#matplotlib.use('Qt5Agg')
from rectify_vars_and_wald_functions import *
import pickle
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
sys.path.insert(1, '../../le_experiments/')
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
from pathlib import Path
import glob
import numpy as np
import read_config
from output_format import H_ALGO_ACTION_FAILURE, H_ALGO_ACTION_SUCCESS, H_ALGO_ACTION, H_ALGO_OBSERVED_REWARD
from output_format import H_ALGO_ESTIMATED_MU, H_ALGO_ESTIMATED_V, H_ALGO_ESTIMATED_ALPHA, H_ALGO_ESTIMATED_BETA
from output_format import H_ALGO_PROB_BEST_ACTION, H_ALGO_NUM_TRIALS
import beta_bernoulli
import scipy.stats
from scipy.stats import spearmanr
from scipy.stats import pearsonr
#import thompson_policy
import ipdb
EPSILON_PROB = .000001
DESIRED_POWER = 0.8
DESIRED_ALPHA = 0.05
SMALL_SIZE = 10
MEDIUM_SIZE = 13
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=8.5) # fontsize of the tick labels
plt.rc('ytick', labelsize=10) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
def plot_minssratio_vs_algs(ax, df_list, x_label, y_label):
# ipdb.set_trace()
idx = 0
ind = np.arange(4)
ax.set_xticks(ind)
labels = ('Uniform', 'EG0pt3', 'EG0pt1', 'TS')
ax.set_xticklabels(labels)
for df in df_list:
df[df[y_label] > 1.0] = 1/(df[df[y_label] > 1.0]) #Ratio is smaller sample size/ larger sample size
df_reject = df[df[x_label] == True]
x_idx = np.zeros(len(df_reject[x_label])) + idx
jitter = np.random.normal(0, 0.1, len(x_idx))/2
if idx == 0:
ax.scatter(x_idx + jitter,df_reject[y_label], color = 'red', label = "Rejected Null With Wald Test")
else:
ax.scatter(x_idx + jitter,df_reject[y_label], color = 'red')
df_accept = df[df[x_label] == False]
x_idx = np.zeros(len(df_accept[x_label])) + idx
jitter = np.random.normal(0, 0.1, len(x_idx))/2
if idx == 0:
ax.scatter(x_idx + jitter, df_accept[y_label], color = 'blue', label = "Failed to Reject Null With Wald Test")
else:
ax.scatter(x_idx + jitter, df_accept[y_label], color = 'blue')
idx +=1
def scatter_ratio(df = None, to_check_eg0pt1 = None, to_check_eg0pt3 = None, to_check_unif = None, to_check_ipw = None, n = None, num_sims = None, load_df = True, \
title = None,\
to_check_ts = None):
'''
'''
if load_df == True:
with open(to_check_eg0pt1, 'rb') as f:
df_eg0pt1 = pickle.load(f)
with open(to_check_eg0pt3, 'rb') as f:
df_eg0pt3 = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ipw != None:
ipw_t1_list = np.load(to_check_ipw)
if to_check_ts != None:
with open(to_check_ts, 'rb') as t:
df_ts = pickle.load(t)
# SE = np.sqrt(mean_1*(1 - mean_1)/sample_size_1 + mean_2*(1 - mean_2)/sample_size_2)
df_eg0pt1 = df_eg0pt1.dropna()
wald_pval_eg0pt1 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_eg0pt1["Wald Rejected"] = wald_pval_eg0pt1 < 0.05
df_eg0pt1.to_csv("overview_csvs/EG0pt1/eg0pt1_overview_noNa_n={}.csv".format(n))
df_eg0pt3 = df_eg0pt3.dropna()
wald_pval_eg0pt3 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_eg0pt3["Wald Rejected"] = wald_pval_eg0pt3 < 0.05
df_eg0pt3.to_csv("overview_csvs/EG0pt3/eg0pt3_overview_noNa_n={}.csv".format(n))
df_ts = df_ts.dropna()
wald_pval_ts = (1 - scipy.stats.norm.cdf(np.abs(df_ts["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_ts["Wald Rejected"] = wald_pval_ts < 0.05
df_ts.to_csv("overview_csvs/TS/ts_overview_noNa_n={}.csv".format(n))
df_unif = df_unif.dropna()
wald_pval_unif = (1 - scipy.stats.norm.cdf(np.abs(df_unif["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_unif["Wald Rejected"] = wald_pval_unif < 0.05 #print(data)
df_unif.to_csv("overview_csvs/unif/unif_overview_noNa_n={}.csv".format(n))
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps].dropna()
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
df_list = [df_for_num_steps_unif, df_for_num_steps_eg0pt3, df_for_num_steps_eg0pt1, df_for_num_steps_ts]
# df_list = [df_for_num_steps_eg0pt1]
#df_list = [df_for_num_steps_ts]
# df_list = [df_for_num_steps_unif]
y_label = "ratio"
x_label = "Wald Rejected"
plot_minssratio_vs_algs(ax = ax[i], df_list = df_list, x_label = x_label, y_label = y_label)
num_replications = len(df_for_num_steps_eg0pt1)
#
ax[i].set_xlabel("Number of participants = {} = {}".format(size_vars[i], num_steps))
ax[i].legend()
ax[i].set_ylim(0,1.02)
ax[i].set_ylabel("Minimum Sample Size Ratio \n Min($\\frac{n_1}{n_2}$, $\\frac{n_2}{n_1}$)")
i +=1
fig.suptitle(title)
#fig.tight_layout(rect=[0, 0.03, 1, 0.90])
# if not os.path.isdir("plots"):
# os.path.mkdir("plots")
save_dir_ne = "../simulation_analysis_saves/scatter_ratio_waldreject/NoEffect/"
save_dir_e = "../simulation_analysis_saves/scatter_ratio_waldreject/Effect/"
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = save_dir_ne + "{}.png".format(title)
save_str_e = save_dir_e + "{}.png".format(title)
# save_str_ne = "../simulation_analysis_saves/scatter_ratio_waldreject/NoEffect/{}.png".format(title)
# save_str_e = "../simulation_analysis_saves/scatter_ratio_waldreject/Effect/{}.png".format(title)
if "No Effect" in title:
print("saving to ", save_str_ne)
fig.savefig(save_str_ne, bbox_inches = "tight")
elif "With Effect" in title:
print("saving to ", save_str_e, bbox_inches = "tight")
fig.savefig(save_str_e)
#plt.show()
plt.clf()
plt.close()
def plot_correlation(fig, ax, df_list, x_label, y_label, num_steps, ax_idx):
# ipdb.set_trace()
idx = 0
df = df_list[0]
# for df in df_list: #This loop not needed
df_reject = df[df["Wald Rejected"] == True]
xvals = np.abs(df_reject[x_label]/num_steps - 0.5) #Ratio is smaller sample size/ larger sample size
yvals = np.abs(df_reject[y_label.format(2)] - df_reject[y_label.format(1)]) #Ratio is smaller sample size/ larger sample size
if ax_idx == 0:
ax.scatter(xvals, yvals, color = 'red', label = "Rejected Null With Wald Test")
else:
ax.scatter(xvals,yvals, color = 'red')
df_accept = df[df["Wald Rejected"] == False]
xvals = np.abs(df_accept[x_label]/num_steps - 0.5) #Ratio is smaller sample size/ larger sample size
yvals = np.abs(df_accept[y_label.format(2)] - df_accept[y_label.format(1)]) #Ratio is smaller sample size/ larger sample size
if len(df) == 0:
ipdb.set_trace()
print()
proportion_reject = len(df_reject)/len(df)
yvals_all = np.abs(df[y_label.format(2)] - df[y_label.format(1)]) #Ratio is smaller sample size/ larger sample size
xvals_all = np.abs(df[x_label]/num_steps - 0.5) #Ratio is smaller sample size/ larger sample size
proportion_reject = np.round(proportion_reject, 3)
coeff, p = spearmanr(xvals_all, yvals_all)
coeff = np.round(coeff, 3)
p = np.round(p, 3)
coeff_pear, p_pear = pearsonr(xvals_all, yvals_all)
coeff_pear = np.round(coeff_pear, 3)
p_pear = np.round(p_pear, 3)
if ax_idx == 0:
ax.scatter(xvals, yvals, color = 'blue', label = "Failed to Reject Null With Wald Test")
ax.legend(loc = "upper center", bbox_to_anchor = (1.2, 1.276))
else:
ax.scatter(xvals,yvals , color = 'blue')
ax.text(0.02, 0.75,"Proprtion Rejected (Power/Type 1 Error) = {} \nSpearman's Correlation Coefficent = {} \nwith pvalue = {}\n Pearon's Correlation Coefficent = {} \nwith pvalue = {}".format(proportion_reject, coeff, p, coeff_pear, p_pear))
# if ax_idx == 0 and 0:
# leg1 = ax.legend((p_red[0], p_blue[0]), "Rejected Null Hypothesis With Wald Test", "Failed To Reject Null Hypothesis With Wald Test", bbox_to_anchor = (1.0, 1.076))
# ax.add_artist(leg1)
# handles, labels = ax.get_legend_handles_labels()
# fig.legend(handles, ["a","g"], loc='upper right', prop={'size': 50})
def scatter_correlation_helper_outer(df = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, n = None, num_sims = None, load_df = True, \
title = None,\
df_ts = None, effect_size = 0):
alg_key_list = ["TS", "EG0pt1", "EG0pt3", "Uniform"]
alg_key_list = ["TS"]
alg_name_list = ["Thompson Sampling (TS)","Epsilon Greedy 0.1 (EG0.1)","Epsilon Greedy 0.3 (EG0.3)", "Uniform"]
alg_name_list = ["Thompson Sampling (TS)"]
for alg_key, alg_name in zip(alg_key_list, alg_name_list):
if effect_size == 0:
title_scatter_corr = "{} ".format(alg_name) + "Difference in arm means (|$\hatp_1$ - $\hatp_2$|) vs. |Proportion of samples in Condtion 1 - 0.5|" + " For n = {} \n Across {} Simulations \n No Effect $p_1$ = $p_2$ = 0.5".format(n, num_sims)
else:
title_scatter_corr = "{} ".format(alg_name) + "Difference in arm means (|$\hatp_1$ - $\hatp_2$|) vs. |Proportion of samples in Condtion 1 - 0.5|" + " For n = {} \n Across {} Simulations \n With Effect Size {}".format(n, num_sims, effect_size)
scatter_correlation(df_eg0pt1 = df_eg0pt1 , df_eg0pt3 = df_eg0pt3,\
df_unif = df_unif, df_ts = df_ts,\
title = title_scatter_corr, \
n = n, num_sims = num_sims, alg_key = alg_key)
def scatter_correlation(df = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, n = None, num_sims = None, load_df = True, \
title = None,\
df_ts = None, alg_key = "TS"):
'''
maybe something like |proportion condition 1 - 0.5| vs. difference in means? Something which captures the imbalance directly
'''
df_eg0pt1 = df_eg0pt1
wald_pval_eg0pt1 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt1["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
#df_eg0pt1["Wald Rejected"] = wald_pval_eg0pt1 < 0.05
df_eg0pt1["Wald Rejected"] = df_eg0pt1["wald_pval"] < 0.05
#df_eg0pt3 = df_eg0pt3.dropna()
wald_pval_eg0pt3 = (1 - scipy.stats.norm.cdf(np.abs(df_eg0pt3["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_eg0pt3["Wald Rejected"] = df_eg0pt3["wald_pval"] < 0.05
#df_ts = df_ts.dropna()
wald_pval_ts = (1 - scipy.stats.norm.cdf(np.abs(df_ts["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_ts["Wald Rejected"] = df_ts["wald_pval"] < 0.05
# df_unif = df_unif.dropna()
wald_pval_unif = (1 - scipy.stats.norm.cdf(np.abs(df_unif["wald_type_stat"].dropna())))*2 #Two sided, symetric, so compare to 0.05
df_unif["Wald Rejected"] = df_unif["wald_pval"] < 0.05
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
alg_key = "TS" #ALWAYS TS
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps]
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps]
#df_list = [df_for_num_steps_unif, df_for_num_steps_eg0pt3, df_for_num_steps_eg0pt1, df_for_num_steps_ts]
# df_list = [df_for_num_steps_eg0pt3]
alg_dict = {"TS":df_for_num_steps_ts, "EG0pt1":df_for_num_steps_eg0pt1, "EG0pt3":df_for_num_steps_eg0pt3, "Uniform":df_for_num_steps_unif}
df_list = [alg_dict[alg_key]]
# df_list = [df_for_num_steps_ts]
#df_list = [df_for_num_steps_ts]
# df_list = [df_for_num_steps_unif]
# bins = np.arange(0, 1.01, .025)
x_label = "sample_size_1"
y_label = "mean_{}"
if len(df_list[0]) == 0:
ipdb.set_trace()
plot_correlation(fig, ax = ax[i], df_list = df_list, x_label = x_label, y_label = y_label, num_steps = num_steps, ax_idx = i)
num_replications = len(df_for_num_steps_eg0pt1)
#
#
ax[i].set_xlabel("|Proportion of samples in Condtion 1 - 0.5| For Number of participants = {} = {}".format(size_vars[i], num_steps))
# ax[i].legend()
ax[i].set_ylim(0,1.02)
ax[i].set_xlim(0, 0.501)
ax[i].set_ylabel("Difference in Arm Mean Estimates |$\hatp1$ - $\hatp2$|")
i +=1
fig.suptitle(title)
fig.subplots_adjust(top=0.80)
# fig.tight_layout(rect=[0, 0.03, 1, 0.90])
# if not os.path.isdir("plots"):
# os.path.mkdir("plots")
save_dir_ne = "../simulation_analysis_saves/scatter_correlation/NoEffect/"
save_dir_e = "../simulation_analysis_saves/scatter_correlation/Effect/"
Path(save_dir_ne).mkdir(parents=True, exist_ok=True)
Path(save_dir_e).mkdir(parents=True, exist_ok=True)
save_str_ne = save_dir_ne + "{}.png".format(title)
save_str_e = save_dir_e + "{}.png".format(title)
# save_str_ne = "../simulation_analysis_saves/scatter_correlation/NoEffect/{}/{}.png".format(alg_key, title)
# save_str_e = "../simulation_analysis_saves/scatter_correlation/Effect/{}/{}.png".format(alg_key, title)
if "No Effect" in title:
print("saving to ", save_str_ne)
fig.savefig(save_str_ne, bbox_inches = "tight")
elif "With Effect" in title:
print("saving to ", save_str_e)
fig.savefig(save_str_e, bbox_inches = "tight")
#plt.show()
plt.clf()
plt.close()
|
[
"numpy.load",
"numpy.abs",
"matplotlib.pyplot.clf",
"ipdb.set_trace",
"matplotlib.pyplot.close",
"scipy.stats.spearmanr",
"sys.path.insert",
"scipy.stats.pearsonr",
"pathlib.Path",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.rc",
"pickle.load",
"numpy.round",
"matplotlib.pyplot.subplots"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((225, 268), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../le_experiments/"""'], {}), "(1, '../../le_experiments/')\n", (240, 268), False, 'import sys\n'), ((1001, 1032), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'SMALL_SIZE'}), "('font', size=SMALL_SIZE)\n", (1007, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1108), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'SMALL_SIZE'}), "('axes', titlesize=SMALL_SIZE)\n", (1078, 1108), True, 'import matplotlib.pyplot as plt\n'), ((1142, 1179), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'MEDIUM_SIZE'}), "('axes', labelsize=MEDIUM_SIZE)\n", (1148, 1179), True, 'import matplotlib.pyplot as plt\n'), ((1216, 1246), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(8.5)'}), "('xtick', labelsize=8.5)\n", (1222, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1280, 1309), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(10)'}), "('ytick', labelsize=10)\n", (1286, 1309), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1380), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'SMALL_SIZE'}), "('legend', fontsize=SMALL_SIZE)\n", (1349, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1441), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (1408, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1580, 1592), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (1589, 1592), True, 'import numpy as np\n'), ((4787, 4805), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (4799, 4805), True, 'import matplotlib.pyplot as plt\n'), ((7111, 7120), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7118, 7120), True, 'import matplotlib.pyplot as plt\n'), ((7125, 7136), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7134, 7136), True, 'import matplotlib.pyplot as plt\n'), ((7380, 7424), 'numpy.abs', 'np.abs', (['(df_reject[x_label] / num_steps - 0.5)'], {}), '(df_reject[x_label] / num_steps - 0.5)\n', (7386, 7424), True, 'import numpy as np\n'), ((7833, 7877), 'numpy.abs', 'np.abs', (['(df_accept[x_label] / num_steps - 0.5)'], {}), '(df_accept[x_label] / num_steps - 0.5)\n', (7839, 7877), True, 'import numpy as np\n'), ((8302, 8339), 'numpy.abs', 'np.abs', (['(df[x_label] / num_steps - 0.5)'], {}), '(df[x_label] / num_steps - 0.5)\n', (8308, 8339), True, 'import numpy as np\n'), ((8412, 8442), 'numpy.round', 'np.round', (['proportion_reject', '(3)'], {}), '(proportion_reject, 3)\n', (8420, 8442), True, 'import numpy as np\n'), ((8458, 8489), 'scipy.stats.spearmanr', 'spearmanr', (['xvals_all', 'yvals_all'], {}), '(xvals_all, yvals_all)\n', (8467, 8489), False, 'from scipy.stats import spearmanr\n'), ((8502, 8520), 'numpy.round', 'np.round', (['coeff', '(3)'], {}), '(coeff, 3)\n', (8510, 8520), True, 'import numpy as np\n'), ((8529, 8543), 'numpy.round', 'np.round', (['p', '(3)'], {}), '(p, 3)\n', (8537, 8543), True, 'import numpy as np\n'), ((8570, 8600), 'scipy.stats.pearsonr', 'pearsonr', (['xvals_all', 'yvals_all'], {}), '(xvals_all, yvals_all)\n', (8578, 8600), False, 'from scipy.stats import pearsonr\n'), ((8618, 8641), 'numpy.round', 'np.round', (['coeff_pear', '(3)'], {}), '(coeff_pear, 3)\n', (8626, 8641), True, 'import numpy as np\n'), ((8655, 8674), 'numpy.round', 'np.round', (['p_pear', '(3)'], {}), '(p_pear, 3)\n', (8663, 8674), True, 'import numpy as np\n'), ((12318, 12336), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (12330, 12336), True, 'import matplotlib.pyplot as plt\n'), ((15140, 15149), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15147, 15149), True, 'import matplotlib.pyplot as plt\n'), ((15154, 15165), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15163, 15165), True, 'import matplotlib.pyplot as plt\n'), ((8085, 8101), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (8099, 8101), False, 'import ipdb\n'), ((2983, 2997), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2994, 2997), False, 'import pickle\n'), ((3069, 3083), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3080, 3083), False, 'import pickle\n'), ((3152, 3166), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3163, 3166), False, 'import pickle\n'), ((3227, 3248), 'numpy.load', 'np.load', (['to_check_ipw'], {}), '(to_check_ipw)\n', (3234, 3248), True, 'import numpy as np\n'), ((6418, 6435), 'pathlib.Path', 'Path', (['save_dir_ne'], {}), '(save_dir_ne)\n', (6422, 6435), False, 'from pathlib import Path\n'), ((6475, 6491), 'pathlib.Path', 'Path', (['save_dir_e'], {}), '(save_dir_e)\n', (6479, 6491), False, 'from pathlib import Path\n'), ((13539, 13555), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (13553, 13555), False, 'import ipdb\n'), ((14433, 14450), 'pathlib.Path', 'Path', (['save_dir_ne'], {}), '(save_dir_ne)\n', (14437, 14450), False, 'from pathlib import Path\n'), ((14490, 14506), 'pathlib.Path', 'Path', (['save_dir_e'], {}), '(save_dir_e)\n', (14494, 14506), False, 'from pathlib import Path\n'), ((3352, 3366), 'pickle.load', 'pickle.load', (['t'], {}), '(t)\n', (3363, 3366), False, 'import pickle\n')]
|
"""LogRegression trains a logistic regression model implemented by
Scikit-Learn on the given dataset. Before training, the user is
prompted for parameter input. After training, model metrics are
displayed, and the user can make new predictions.
View the documentation at https://manufacturingnet.readthedocs.io/.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (accuracy_score, confusion_matrix, make_scorer,
roc_auc_score, roc_curve)
from sklearn.model_selection import (GridSearchCV, cross_val_score,
train_test_split)
class LogRegression:
"""Class framework for logistic regression model."""
def __init__(self, attributes=None, labels=None):
"""Initializes a LogisticRegression object."""
self.attributes = attributes
self.labels = labels
self.test_size = None
self.cv = None
self.graph_results = None
self.fpr = None
self.tpr = None
self.bin = False
self.gridsearch = False
self.gs_params = None
self.gs_result = None
self.regression = None
self.classes = None
self.coefficients = None
self.intercept = None
self.n_iter = None
self.accuracy = None
self.precision = None
self.recall = None
self.roc_auc = None
self.confusion_matrix = None
self.cross_val_scores = None
# Accessor methods
def get_attributes(self):
"""Accessor method for attributes."""
return self.attributes
def get_labels(self):
"""Accessor method for labels."""
return self.labels
def get_classes(self):
"""Accessor method for classes."""
return self.classes
def get_regression(self):
"""Accessor method for regression."""
return self.regression
def get_coefficents(self):
"""Accessor method for coefficients."""
return self.coefficients
def get_n_iter(self):
"""Accessor method for n_iter."""
return self.n_iter
def get_accuracy(self):
"""Accessor method for accuracy."""
return self.accuracy
def get_roc_auc(self):
"""Accessor method for roc_auc."""
return self.roc_auc
def get_confusion_matrix(self):
"""Accessor method for confusion_matrix."""
return self.confusion_matrix
def get_cross_val_scores(self):
"""Accessor method for cross_val_scores."""
return self.cross_val_scores
# Modifier methods
def set_attributes(self, new_attributes=None):
"""Modifier method for attributes."""
self.attributes = new_attributes
def set_labels(self, new_labels=None):
"""Modifier method for labels."""
self.labels = new_labels
# Wrapper for logistic regression model
def run(self):
"""Performs logistic regression on dataset and updates relevant
instance data.
"""
if self._check_inputs():
# Instantiate LogisticRegression() object using helper method
self.regression = self._create_model()
# Split into training and testing set
dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test = \
train_test_split(self.attributes, self.labels,
test_size=self.test_size)
# Train the model and get resultant coefficients
# Handle exception if arguments are incorrect
try:
self.regression.fit(dataset_X_train, np.ravel(dataset_y_train))
except Exception as e:
print("An exception occurred while training the regression",
"model. Check your inputs and try again.")
print("Here is the exception message:")
print(e)
self.regression = None
return
# Get resultant model instance data
self.classes = self.regression.classes_
self.coefficients = self.regression.coef_
self.intercept = self.regression.intercept_
self.n_iter = self.regression.n_iter_
# Make predictions using testing set
y_prediction = self.regression.predict(dataset_X_test)
# Metrics
self.accuracy = accuracy_score(y_prediction, dataset_y_test)
probas = self.regression.predict_proba(dataset_X_test)
# If classification is binary, calculate roc_auc
if probas.shape[1] == 2:
self.bin = True
self.roc_auc = \
roc_auc_score(self.regression.predict(dataset_X_test),
probas[::, 1])
self.fpr, self.tpr, _ = roc_curve(
dataset_y_test, probas[::, 1])
# Else, calculate confusion matrix
else:
self.confusion_matrix = \
confusion_matrix(dataset_y_test, y_prediction)
self.cross_val_scores = cross_val_score(self.regression,
self.attributes,
self.labels, cv=self.cv)
# Output results
self._output_results()
def predict(self, dataset_X=None):
"""Predicts the output of each datapoint in dataset_X using the
regression model. Returns the predictions.
"""
# Check that run() has already been called
if self.regression is None:
print("The regression model seems to be missing. Have you called",
"run() yet?")
return None
# Try to make the prediction
# Handle exception if dataset_X isn't a valid input
try:
y_prediction = self.regression.predict(dataset_X)
except Exception as e:
print("The model failed to run. Check your inputs and try again.")
print("Here is the exception message:")
print(e)
return None
print("\nLogRegression Predictions:\n", y_prediction, "\n")
return y_prediction
# Helper methods
def _create_model(self):
"""Runs UI for getting parameters and creating model."""
print("\n==================================")
print("= LogRegression Parameter Inputs =")
print("==================================\n")
print("Default values:",
"test_size = 0.25",
"cv = 5",
"graph_results = False",
"penalty = 'l2'",
"dual = False",
"tol = 0.0001",
"C = 1.0",
"fit_intercept = True",
"intercept_scaling = 1",
"class_weight = None",
"random_state = None",
"solver = 'lbfgs'",
"max_iter = 100",
"multi_class = 'auto'",
"verbose = False",
"warm_start = False",
"n_jobs = None",
"l1_ratio = None", sep="\n")
# Set defaults
self.test_size = 0.25
self.cv = None
self.graph_results = False
while True:
user_input = input("\nUse default parameters (Y/n)? ").lower()
if user_input in {"y", ""}:
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LogisticRegression()
elif user_input == "n":
break
else:
print("Invalid input.")
print("\nIf you are unsure about a parameter, press enter to use its",
"default value.")
print("If you finish entering parameters early, enter 'q' to skip",
"ahead.\n")
# Set more defaults
penalty = "l2"
dual = False
tol = 0.0001
C = 1.0
fit_intercept = True
intercept_scaling = 1
class_weight = None
random_state = None
solver = "lbfgs"
max_iter = 100
multi_class = "auto"
verbose = 0
warm_start = False
n_jobs = None
l1_ratio = None
# Get user parameter input
while True:
break_early = False
while True:
user_input = input("\nWhat fraction of the dataset should be the "
+ "testing set (0,1)? ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0 or user_input >= 1:
raise Exception
self.test_size = user_input
break
except Exception:
print("Invalid input.")
print("test_size =", self.test_size)
if break_early:
break
while True:
user_input = input("\nUse GridSearch to find the best "
+ "hyperparameters (y/N)? ").lower()
if user_input == "q":
break_early = True
break
elif user_input in {"n", "y", ""}:
break
else:
print("Invalid input.")
if break_early:
break
while user_input == "y":
print("\n= GridSearch Parameter Inputs =\n")
print("Enter 'q' to skip GridSearch.")
self.gridsearch = True
params = {}
print("\nWarnings:")
print("Solvers 'lbfgs', 'newton-cg', 'sag', and 'saga' support",
"only 'l2' or no penalty.")
print("Solver 'liblinear' requires a penalty.")
print("Penalty 'elasticnet' is only supported by the",
"'saga' solver.")
print("Failing to heed these warnings may crash GridSearch!")
while True:
print("\nEnter the classifier penalties to evaluate.")
print("Options: 1-'l1', 2-'l2', 3-'elasticnet'. Enter 'all'",
"for all options.")
print("Example input: 1,2,3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
elif user_input == "all":
pen_params = ["l1", "l2", "elasticnet"]
break
else:
pen_dict = {1: "l1", 2: "l2", 3: "elasticnet"}
try:
pen_params_int = \
list(map(int, list(user_input.split(","))))
if len(pen_params_int) > len(pen_dict):
raise Exception
pen_params = []
for each in pen_params_int:
if not pen_dict.get(each):
raise Exception
pen_params.append(pen_dict.get(each))
break
except Exception:
print("Invalid input.")
if break_early:
break
params["penalty"] = pen_params
print("penalties:", pen_params)
while True:
print("\nEnter the solvers to evaluate.")
print("Options: 1-'newton-cg', 2-'lbfgs', 3-'liblinear',",
"4-'sag', 5-'saga'. Enter 'all' for all options.")
print("Example input: 1,2,3")
user_input = input().lower()
if user_input == "q":
self.gridsearch = False
break_early = True
break
elif user_input == "all":
sol_params = ["newton-cg", "lbfgs", "liblinear", "sag",
"saga"]
break
else:
sol_dict = {1: "newton-cg", 2: "lbfgs", 3: "liblinear",
4: "sag", 5: "saga"}
try:
sol_params_int = \
list(map(int, list(user_input.split(","))))
if len(sol_params_int) > len(sol_dict):
raise Exception
sol_params = []
for each in sol_params_int:
if not sol_dict.get(each):
raise Exception
sol_params.append(sol_dict.get(each))
break
except Exception:
print("Invalid input.")
if break_early:
break
params["solver"] = sol_params
print("solvers:", sol_params)
print("\n= End of GridSearch inputs. =\n")
self.gs_params = params
best_params = self._run_gridsearch()
solver = best_params["solver"]
penalty = best_params["penalty"]
break
break_early = False
while True:
user_input = input("\nEnter the number of folds for cross "
+ "validation [2,): ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input < 2:
raise Exception
self.cv = user_input
break
except Exception:
print("Invalid input.")
print("cv =", self.cv)
if break_early:
break
while True:
user_input = \
input("\nGraph the ROC curve? Only binary classification "
+ "is supported (y/N): ").lower()
if user_input == "y":
self.graph_results = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("graph_results =", self.graph_results)
if break_early:
break
while not self.gridsearch:
print("\nWhich algorithm should be used in the optimization",
"problem?")
user_input = input("Enter 1 for 'newton-cg', 2 for 'lbfgs', 3 "
+ "for 'liblinear', 4 for 'sag', or 5 for "
+ "'saga': ").lower()
if user_input == "1":
solver = "newton-cg"
break
elif user_input == "3":
solver = "liblinear"
break
elif user_input == "4":
solver = "sag"
break
elif user_input == "5":
solver = "saga"
break
elif user_input in {"2", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
if not self.gridsearch:
print("solver =", solver)
if break_early:
break
while not self.gridsearch:
print("\nWhich norm should be used in penalization?")
user_input = input("Enter 1 for 'l1', 2 for 'l2', 3 for "
+ "'elasticnet', or 4 for 'none': ").lower()
if solver in {"newton-cg", "lbfgs", "sag"} \
and user_input not in {"2", "4"}:
print("Invalid input.")
print("Solvers 'newton-cg', 'sag', and 'lbfgs' support",
"only 'l2' or no penalty.")
continue
if user_input == "3" and solver != "saga":
print("Invalid input.")
print("'elasticnet' is only supported by the 'saga' solver.")
continue
if user_input == "4" and solver == "liblinear":
print("Invalid input.")
print("Solver 'liblinear' requires a penalty.")
continue
if user_input == "1":
penalty = "l1"
break
elif user_input == "3":
penalty = "elasticnet"
break
elif user_input == "4":
penalty = "none"
break
elif user_input in {"2", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
if not self.gridsearch:
print("penalty =", penalty)
if break_early:
break
while True:
user_input = input("\nUse dual formulation (y/N)? ").lower()
if user_input == "y":
dual = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("dual =", dual)
if break_early:
break
while True:
user_input = input("\nEnter a positive number for the tolerance "
+ "for stopping criteria: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
tol = user_input
break
except Exception:
print("Invalid input.")
print("tol =", tol)
if break_early:
break
while True:
user_input = input("\nEnter a positive number for the inverse "
+ "of regularization strength C: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = float(user_input)
if user_input <= 0:
raise Exception
C = user_input
break
except Exception:
print("Invalid input.")
print("C =", C)
if break_early:
break
while True:
user_input = \
input("\nInclude a y-intercept in the model (Y/n)? ").lower()
if user_input == "n":
fit_intercept = False
break
elif user_input in {"y", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("fit_intercept =", fit_intercept)
if break_early:
break
while fit_intercept:
user_input = input("\nEnter a number for the intercept "
+ "scaling factor: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
intercept_scaling = float(user_input)
break
except Exception:
print("Invalid input.")
if fit_intercept:
print("intercept_scaling =", intercept_scaling)
if break_early:
break
while True:
user_input = input("\nAutomatically balance the class weights "
+ "(y/N)? ").lower()
if user_input == "y":
class_weight = "balanced"
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("class_weight =", class_weight)
if break_early:
break
print("\nTo set manual weights, call",
"get_regression().set_params() to set the class_weight",
"parameter.")
while True:
user_input = \
input("\nEnter an integer for the random number seed: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
random_state = int(user_input)
break
except Exception:
print("Invalid input.")
print("random_state =", random_state)
if break_early:
break
while True:
user_input = \
input("\nEnter a positive maximum number of iterations: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0:
raise Exception
max_iter = user_input
break
except Exception:
print("Invalid input.")
print("max_iter =", max_iter)
if break_early:
break
while True:
print("\nPlease choose a multiclass scheme.")
user_input = input("Enter 1 for one-vs-rest, 2 for multinomial, "
+ "or 3 to automatically choose: ").lower()
if user_input == "1":
multi_class = "ovr"
break
elif user_input == "2":
multi_class = "multinomial"
break
elif user_input in {"3", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("multi_class =", multi_class)
if break_early:
break
while True:
user_input = input("\nEnable verbose output during training "
+ "(y/N)? ").lower()
if user_input == "y":
verbose = 1
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("verbose =", bool(verbose))
if break_early:
break
while True:
user_input = \
input("\nEnable warm start? This will use the previous "
+ "solution for fitting (y/N): ").lower()
if user_input == "y":
warm_start = True
break
elif user_input in {"n", ""}:
break
elif user_input == "q":
break_early = True
break
else:
print("Invalid input.")
print("warm_start =", warm_start)
if break_early:
break
while multi_class == "ovr":
print("\nEnter a positive number of CPU cores to use.")
user_input = input("Enter -1 to use all cores: ")
try:
if user_input == "":
break
elif user_input.lower() == "q":
break_early = True
break
user_input = int(user_input)
if user_input <= 0 and user_input != -1:
raise Exception
n_jobs = user_input
break
except Exception:
print("Invalid input.")
if multi_class == "ovr":
print("n_jobs =", n_jobs)
if break_early:
break
while penalty == "elasticnet":
user_input = input("\nEnter a decimal for the Elastic-Net "
+ "mixing parameter [0,1]: ")
try:
if user_input.lower() in {"q", ""}:
break
user_input = float(user_input)
if user_input < 0 or user_input > 1:
raise Exception
l1_ratio = user_input
break
except Exception:
print("Invalid input.")
if penalty == "elasticnet":
print("l1_ratio =", l1_ratio)
break
print("\n===========================================")
print("= End of inputs; press enter to continue. =")
input("===========================================\n")
return LogisticRegression(penalty=penalty, dual=dual, tol=tol, C=C,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight,
random_state=random_state, solver=solver,
max_iter=max_iter, multi_class=multi_class,
verbose=verbose, warm_start=warm_start,
n_jobs=n_jobs, l1_ratio=l1_ratio)
def _output_results(self):
"""Outputs model metrics after run() finishes."""
print("\n=========================")
print("= LogRegression Results =")
print("=========================\n")
print("Classes:\n", self.classes)
print("\nNumber of Iterations:\n", self.n_iter)
print("\n{:<20} {:<20}".format("Accuracy:", self.accuracy))
if self.bin:
print("\n{:<20} {:<20}".format("ROC AUC:", self.roc_auc))
else:
print("\nConfusion Matrix:\n", self.confusion_matrix)
print("\nCross Validation Scores: ", self.cross_val_scores)
if self.gridsearch:
print("\n{:<20} {:<20}".format("GridSearch Score:",
self.gs_result))
if self.bin and self.graph_results:
plt.plot(self.fpr, self.tpr)
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.legend(loc=4)
plt.show()
print("\n\nCall predict() to make predictions for new data.")
print("\n===================")
print("= End of results. =")
print("===================\n")
def _run_gridsearch(self):
"""Runs GridSearch with the parameters given in run(). Returns
the best parameters."""
acc_scorer = make_scorer(accuracy_score)
clf = LogisticRegression()
dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test = \
train_test_split(self.attributes, self.labels,
test_size=self.test_size)
# Run GridSearch
grid_obj = GridSearchCV(clf, self.gs_params, scoring=acc_scorer)
grid_obj = grid_obj.fit(dataset_X_train, dataset_y_train)
# Set the clf to the best combination of parameters
clf = grid_obj.best_estimator_
# Fit the best algorithm to the data
clf.fit(dataset_X_train, dataset_y_train)
predictions = clf.predict(dataset_X_test)
self.gs_result = accuracy_score(dataset_y_test, predictions)
# Return the best parameters
print("\nBest GridSearch Parameters:\n", grid_obj.best_params_, "\n")
return grid_obj.best_params_
def _check_inputs(self):
"""Verifies if the instance data is ready for use in logistic
regression model.
"""
# Check if attributes exists
if self.attributes is None:
print("attributes is missing; call set_attributes(new_attributes)",
"to fix this! new_attributes should be a populated numpy",
"array of your independent variables.")
return False
# Check if labels exists
if self.labels is None:
print("labels is missing; call set_labels(new_labels) to fix this!",
"new_labels should be a populated numpy array of your",
"dependent variables.")
return False
# Check if attributes and labels have same number of rows (samples)
if self.attributes.shape[0] != self.labels.shape[0]:
print("attributes and labels don't have the same number of rows.",
"Make sure the number of samples in each dataset matches!")
return False
return True
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"sklearn.metrics.roc_curve",
"numpy.ravel",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.legend",
"sklearn.metrics.make_scorer",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((27414, 27747), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': 'penalty', 'dual': 'dual', 'tol': 'tol', 'C': 'C', 'fit_intercept': 'fit_intercept', 'intercept_scaling': 'intercept_scaling', 'class_weight': 'class_weight', 'random_state': 'random_state', 'solver': 'solver', 'max_iter': 'max_iter', 'multi_class': 'multi_class', 'verbose': 'verbose', 'warm_start': 'warm_start', 'n_jobs': 'n_jobs', 'l1_ratio': 'l1_ratio'}), '(penalty=penalty, dual=dual, tol=tol, C=C, fit_intercept=\n fit_intercept, intercept_scaling=intercept_scaling, class_weight=\n class_weight, random_state=random_state, solver=solver, max_iter=\n max_iter, multi_class=multi_class, verbose=verbose, warm_start=\n warm_start, n_jobs=n_jobs, l1_ratio=l1_ratio)\n', (27432, 27747), False, 'from sklearn.linear_model import LogisticRegression\n'), ((29358, 29385), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (29369, 29385), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, make_scorer, roc_auc_score, roc_curve\n'), ((29400, 29420), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (29418, 29420), False, 'from sklearn.linear_model import LogisticRegression\n'), ((29510, 29582), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.attributes', 'self.labels'], {'test_size': 'self.test_size'}), '(self.attributes, self.labels, test_size=self.test_size)\n', (29526, 29582), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((29657, 29710), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'self.gs_params'], {'scoring': 'acc_scorer'}), '(clf, self.gs_params, scoring=acc_scorer)\n', (29669, 29710), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((30048, 30091), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['dataset_y_test', 'predictions'], {}), '(dataset_y_test, predictions)\n', (30062, 30091), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, make_scorer, roc_auc_score, roc_curve\n'), ((3380, 3452), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.attributes', 'self.labels'], {'test_size': 'self.test_size'}), '(self.attributes, self.labels, test_size=self.test_size)\n', (3396, 3452), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((4452, 4496), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_prediction', 'dataset_y_test'], {}), '(y_prediction, dataset_y_test)\n', (4466, 4496), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, make_scorer, roc_auc_score, roc_curve\n'), ((5165, 5239), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['self.regression', 'self.attributes', 'self.labels'], {'cv': 'self.cv'}), '(self.regression, self.attributes, self.labels, cv=self.cv)\n', (5180, 5239), False, 'from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\n'), ((28807, 28835), 'matplotlib.pyplot.plot', 'plt.plot', (['self.fpr', 'self.tpr'], {}), '(self.fpr, self.tpr)\n', (28815, 28835), True, 'import matplotlib.pyplot as plt\n'), ((28848, 28881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (28858, 28881), True, 'import matplotlib.pyplot as plt\n'), ((28894, 28926), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (28904, 28926), True, 'import matplotlib.pyplot as plt\n'), ((28939, 28961), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC Curve"""'], {}), "('ROC Curve')\n", (28948, 28961), True, 'import matplotlib.pyplot as plt\n'), ((28974, 28991), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (28984, 28991), True, 'import matplotlib.pyplot as plt\n'), ((29004, 29014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29012, 29014), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4931), 'sklearn.metrics.roc_curve', 'roc_curve', (['dataset_y_test', 'probas[:, 1]'], {}), '(dataset_y_test, probas[:, 1])\n', (4901, 4931), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, make_scorer, roc_auc_score, roc_curve\n'), ((5081, 5127), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['dataset_y_test', 'y_prediction'], {}), '(dataset_y_test, y_prediction)\n', (5097, 5127), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, make_scorer, roc_auc_score, roc_curve\n'), ((7687, 7707), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (7705, 7707), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3676, 3701), 'numpy.ravel', 'np.ravel', (['dataset_y_train'], {}), '(dataset_y_train)\n', (3684, 3701), True, 'import numpy as np\n')]
|
from __future__ import division
from builtins import str
import numpy
import os
import sys
import logging
from ektelo.algorithm.dawa.cutils import cutil
from ektelo.algorithm.dawa.partition_engines import partition_engine
from ektelo import util
class l1partition_engine(partition_engine.partition_engine):
"""Use the L1 partition method."""
def __init__(self):
self.init_params = util.init_params_from_locals(locals())
@staticmethod
def Run(x, epsilon, ratio,seed):
return L1partition(x, epsilon, ratio, gethist=True,seed =seed)
class l1partition_approx_engine(partition_engine.partition_engine):
"""Use the approximate L1 partition method."""
def __init__(self):
self.init_params = util.init_params_from_locals(locals())
@staticmethod
def Run(x, epsilon, ratio,seed):
return L1partition_approx(x, epsilon, ratio, gethist=True,seed = seed)
def L1partition(x, epsilon, ratio=0.5, gethist=False,seed=None):
"""Compute the noisy L1 histogram using all interval buckets
Args:
x - list of numeric values. The input data vector
epsilon - double. Total private budget
ratio - double in (0, 1). use ratio*epsilon for partition computation and (1-ratio)*epsilon for querying
the count in each partition
gethist - boolean. If set to truth, return the partition directly (the privacy budget used is still ratio*epsilon)
Return:
if gethist == False, return an estimated data vector. Otherwise, return the partition
"""
assert seed is not None, "seed must be set"
prng = numpy.random.RandomState(seed)
assert (x.dtype == numpy.dtype(int) or x.dtype == numpy.dtype("int32")), "Input vector must be int! %s given" %x.dtype
y=x.astype('int32') #numpy type int32 is not not JSON serializable
check = (x ==y)
assert check.sum() == len(check), "Casting error from int to int32"
x=y
n = len(x)
hist = cutil.L1partition(n+1, x, epsilon, ratio, prng.randint(500000))
hatx = numpy.zeros(n)
rb = n
if gethist:
bucks = []
for lb in hist[1:]:
bucks.insert(0, [lb, rb-1])
rb = lb
if lb == 0:
break
logging.debug(' L1-PART: number of buckets %s' % str(bucks[:5]) )
return bucks
else:
for lb in hist[1:]:
hatx[lb:rb] = util.old_div(max(0, sum(x[lb:rb]) + prng.laplace(0, util.old_div(1.0,(epsilon*(1-ratio))), 1)), float(rb - lb))
rb = lb
if lb == 0:
break
return hatx
def L1partition_approx(x, epsilon, ratio=0.5, gethist=False,seed =None):
"""Compute the noisy L1 histogram using interval buckets of size 2^k
Args:
x - list of numeric values. The input data vector
epsilon - double. Total private budget
ratio - double in (0, 1) the use ratio*epsilon for partition computation and (1-ratio)*epsilon for querying
the count in each partition
gethist - boolean. If set to truth, return the partition directly (the privacy budget used is still ratio*epsilon)
Return:
if gethist == False, return an estimated data vector. Otherwise, return the partition
"""
assert seed is not None, "seed must be set"
prng = numpy.random.RandomState(seed)
n = len(x)
# check that the input vector x is of appropriate type
assert (x.dtype == numpy.dtype(int) or x.dtype == numpy.dtype("int32")), "Input vector must be int! %s given" %x.dtype
y=x.astype('int32') #numpy type int32 is not not JSON serializable
check = (x ==y)
assert check.sum() == len(check), "Casting error from int to int32"
x=y
hist = cutil.L1partition_approx(n+1, x, epsilon, ratio, prng.randint(500000))
hatx = numpy.zeros(n)
rb = n
if gethist:
bucks = []
for lb in hist[1:]:
bucks.insert(0, [lb, rb-1])
rb = lb
if lb == 0:
break
return bucks
else:
for lb in hist[1:]:
hatx[lb:rb] = util.old_div(max(0, sum(x[lb:rb]) + prng.laplace(0, util.old_div(1.0,(epsilon*(1-ratio))), 1)), float(rb - lb))
rb = lb
if lb == 0:
break
return hatx
|
[
"numpy.dtype",
"numpy.zeros",
"numpy.random.RandomState",
"ektelo.util.old_div",
"builtins.str"
] |
[((1623, 1653), 'numpy.random.RandomState', 'numpy.random.RandomState', (['seed'], {}), '(seed)\n', (1647, 1653), False, 'import numpy\n'), ((2051, 2065), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (2062, 2065), False, 'import numpy\n'), ((3345, 3375), 'numpy.random.RandomState', 'numpy.random.RandomState', (['seed'], {}), '(seed)\n', (3369, 3375), False, 'import numpy\n'), ((3839, 3853), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (3850, 3853), False, 'import numpy\n'), ((1678, 1694), 'numpy.dtype', 'numpy.dtype', (['int'], {}), '(int)\n', (1689, 1694), False, 'import numpy\n'), ((1709, 1729), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (1720, 1729), False, 'import numpy\n'), ((3474, 3490), 'numpy.dtype', 'numpy.dtype', (['int'], {}), '(int)\n', (3485, 3490), False, 'import numpy\n'), ((3505, 3525), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (3516, 3525), False, 'import numpy\n'), ((2304, 2318), 'builtins.str', 'str', (['bucks[:5]'], {}), '(bucks[:5])\n', (2307, 2318), False, 'from builtins import str\n'), ((2477, 2517), 'ektelo.util.old_div', 'util.old_div', (['(1.0)', '(epsilon * (1 - ratio))'], {}), '(1.0, epsilon * (1 - ratio))\n', (2489, 2517), False, 'from ektelo import util\n'), ((4186, 4226), 'ektelo.util.old_div', 'util.old_div', (['(1.0)', '(epsilon * (1 - ratio))'], {}), '(1.0, epsilon * (1 - ratio))\n', (4198, 4226), False, 'from ektelo import util\n')]
|
import os, io, csv, json
import requests, argparse
import pandas as pd
import numpy as np
from ast import literal_eval
from datetime import datetime
from panoptes_client import Project, Panoptes
from collections import OrderedDict, Counter
from sklearn.cluster import DBSCAN
import kso_utils.db_utils as db_utils
from kso_utils.zooniverse_utils import auth_session
def bb_iou(boxA, boxB):
# Compute edges
temp_boxA = boxA.copy()
temp_boxB = boxB.copy()
temp_boxA[2], temp_boxA[3] = (
temp_boxA[0] + temp_boxA[2],
temp_boxA[1] + temp_boxA[3],
)
temp_boxB[2], temp_boxB[3] = (
temp_boxB[0] + temp_boxB[2],
temp_boxB[1] + temp_boxB[3],
)
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(temp_boxA[0], temp_boxB[0])
yA = max(temp_boxA[1], temp_boxB[1])
xB = min(temp_boxA[2], temp_boxB[2])
yB = min(temp_boxA[3], temp_boxB[3])
# compute the area of intersection rectangle
interArea = abs(max((xB - xA, 0)) * max((yB - yA), 0))
if interArea == 0:
return 1
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = abs((temp_boxA[2] - temp_boxA[0]) * (temp_boxA[3] - temp_boxA[1]))
boxBArea = abs((temp_boxB[2] - temp_boxB[0]) * (temp_boxB[3] - temp_boxB[1]))
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return 1 - iou
def filter_bboxes(total_users, users, bboxes, obj, eps, iua):
# If at least half of those who saw this frame decided that there was an object
user_count = pd.Series(users).nunique()
if user_count / total_users >= obj:
# Get clusters of annotation boxes based on iou criterion
cluster_ids = DBSCAN(min_samples=1, metric=bb_iou, eps=eps).fit_predict(bboxes)
# Count the number of users within each cluster
counter_dict = Counter(cluster_ids)
# Accept a cluster assignment if at least 80% of users agree on annotation
passing_ids = [k for k, v in counter_dict.items() if v / user_count >= iua]
indices = np.isin(cluster_ids, passing_ids)
final_boxes = []
for i in passing_ids:
# Compute median over all accepted bounding boxes
boxes = np.median(np.array(bboxes)[np.where(cluster_ids == i)], axis=0)
final_boxes.append(boxes)
return indices, final_boxes
else:
return [], bboxes
def main():
"Handles argument parsing and launches the correct function."
parser = argparse.ArgumentParser()
parser.add_argument(
"--user", "-u", help="Zooniverse username", type=str, required=True
)
parser.add_argument(
"--password", "-p", help="Zooniverse password", type=str, required=True
)
parser.add_argument(
"-db",
"--db_path",
type=str,
help="the absolute path to the database file",
default=r"koster_lab.db",
required=True,
)
parser.add_argument(
"-obj",
"--object_thresh",
type=float,
help="Agreement threshold required among different users",
default=0.8,
)
parser.add_argument(
"-zw",
"--zoo_workflow",
type=float,
help="Number of the Zooniverse workflow of interest",
default=12852,
required=False,
)
parser.add_argument(
"-zwv",
"--zoo_workflow_version",
type=float,
help="Version number of the Zooniverse workflow of interest",
default=21.85,
required=False,
)
parser.add_argument(
"-eps",
"--iou_epsilon",
type=float,
help="threshold of iou for clustering",
default=0.5,
)
parser.add_argument(
"-iua",
"--inter_user_agreement",
type=float,
help="proportion of users agreeing on clustering",
default=0.8,
)
parser.add_argument(
"-nu",
"--n_users",
type=float,
help="Minimum number of different Zooniverse users required per clip",
default=5,
required=False,
)
parser.add_argument(
"-du",
"--duplicates_file_id",
help="Google drive id of list of duplicated subjects",
type=str,
required=False,
)
args = parser.parse_args()
project = auth_session(args.user, args.password)
# Get the export classifications
export = project.get_export("classifications")
# Save the response as pandas data frame
rawdata = pd.read_csv(
io.StringIO(export.content.decode("utf-8")),
usecols=[
"user_name",
"subject_ids",
"subject_data",
"classification_id",
"workflow_id",
"workflow_version",
"created_at",
"annotations",
],
)
# Filter w2 classifications
w2_data = rawdata[
(rawdata.workflow_id == args.zoo_workflow)
& (rawdata.workflow_version >= args.zoo_workflow_version)
].reset_index()
# Clear duplicated subjects
if args.duplicates_file_id:
w2_data = db_utils.combine_duplicates(w2_data, args.duplicates_file_id)
#Drop NaN columns
w2_data = w2_data.drop(['dupl_subject_id', 'single_subject_id'], 1)
## Check if subjects have been uploaded
# Get species id for each species
conn = db_utils.create_connection(args.db_path)
# Get subject table
uploaded_subjects = pd.read_sql_query(
"SELECT id FROM subjects WHERE subject_type='frame'", conn
)
# Add frame subjects to db that have not been uploaded
new_subjects = w2_data[(~w2_data.subject_ids.isin(uploaded_subjects))]
new_subjects["subject_dict"] = new_subjects["subject_data"].apply(lambda x: [v["retired"] for k,v in json.loads(x).items()][0])
new_subjects = new_subjects[~new_subjects.subject_dict.isnull()].drop("subject_dict", 1)
if len(new_subjects) > 0 and args.zoo_workflow_version > 30:
# Get info of subjects uploaded to the project
export = project.get_export("subjects")
# Save the subjects info as pandas data frame
subjects_df = pd.read_csv(
io.StringIO(export.content.decode("utf-8")),
usecols=["subject_id", "subject_set_id", "created_at"],
)
new_subjects = pd.merge(
new_subjects,
subjects_df,
how="left",
left_on="subject_ids",
right_on="subject_id",
)
# Extract the video filename and annotation details
new_subjects[
[
"frame_number",
"frame_exp_sp_id",
"movie_id",
"classifications_count",
"created_at",
"retired_at",
"retirement_reason",
]
] = pd.DataFrame(
new_subjects["subject_data"]
.apply(
lambda x: [
{
"frame_number": v["frame_number"],
"frame_exp_sp_id": v["frame_exp_sp_id"],
"movie_id": v["movie_id"],
"classifications_count": v["retired"]["classifications_count"],
"created_at": v["retired"]["created_at"],
"retired_at": v["retired"]["retired_at"],
"retirement_reason": v["retired"]["retirement_reason"],
}
for k, v in json.loads(x).items()
][0]
)
.tolist()
)
new_subjects["subject_type"] = "frame"
movies_df = pd.read_sql_query("SELECT id, filename FROM movies", conn)
movies_df = movies_df.rename(
columns={"id": "movie_id", "filename": "movie_filename"}
)
new_subjects = pd.merge(new_subjects, movies_df, how="left", on="movie_id")
new_subjects["filename"] = new_subjects.apply(
lambda x: x["movie_filename"] + "_" + str(x["frame_number"]) + ".jpg",
axis=1,
)
# Set irrelevant columns to None
new_subjects["clip_start_time"] = None
new_subjects["clip_end_time"] = None
new_subjects = new_subjects[
[
"subject_ids",
"subject_type",
"filename",
"clip_start_time",
"clip_end_time",
"frame_exp_sp_id",
"frame_number",
"workflow_id",
"subject_set_id",
"classifications_count",
"retired_at",
"retirement_reason",
"created_at",
"movie_id",
]
]
new_subjects = new_subjects.drop_duplicates(subset="subject_ids")
db_utils.test_table(new_subjects, "subjects", keys=["movie_id"])
# Add values to subjects
db_utils.add_to_table(
args.db_path, "subjects", [tuple(i) for i in new_subjects.values], 14
)
# Calculate the number of users that classified each subject
w2_data["n_users"] = w2_data.groupby("subject_ids")["classification_id"].transform(
"nunique"
)
# Select frames with at least n different user classifications
w2_data = w2_data[w2_data.n_users >= args.n_users]
# Drop workflow and n_users columns
w2_data = w2_data.drop(
columns=[
"workflow_id",
"workflow_version",
"n_users",
"created_at",
]
)
# Extract the video filename and annotation details
subject_data_df = pd.DataFrame(
w2_data["subject_data"]
.apply(
lambda x: [
{
"movie_id": v["movie_id"],
"frame_number": v["frame_number"],
"label": v["label"],
}
for k, v in json.loads(x).items() # if v['retired']
][0],
1,
)
.tolist()
)
w2_data = pd.concat(
[w2_data.reset_index().drop("index", 1), subject_data_df],
axis=1,
ignore_index=True,
)
w2_data = w2_data[w2_data.columns[1:]]
pd.set_option('display.max_columns', None)
w2_data.columns = [
"classification_id",
"user_name",
"annotations",
"subject_data",
"subject_ids",
"movie_id",
"frame_number",
"label",
]
movies_df = pd.read_sql_query("SELECT id, filename FROM movies", conn)
movies_df = movies_df.rename(columns={"id": "movie_id"})
w2_data = pd.merge(w2_data, movies_df, how="left", on="movie_id")
# Convert to dictionary entries
w2_data["movie_id"] = w2_data["movie_id"].apply(lambda x: {"movie_id": x})
w2_data["frame_number"] = w2_data["frame_number"].apply(
lambda x: {"frame_number": x}
)
w2_data["label"] = w2_data["label"].apply(lambda x: {"label": x})
w2_data["user_name"] = w2_data["user_name"].apply(lambda x: {"user_name": x})
w2_data["subject_id"] = w2_data["subject_ids"].apply(lambda x: {"subject_id": x})
w2_data["annotation"] = w2_data["annotations"].apply(
lambda x: literal_eval(x)[0]["value"], 1
)
# Extract annotation metadata
w2_data["annotation"] = w2_data[
["movie_id", "frame_number", "label", "annotation", "user_name", "subject_id"]
].apply(
lambda x: [
OrderedDict(
list(x["movie_id"].items())
+ list(x["frame_number"].items())
+ list(x["label"].items())
+ list(x["annotation"][i].items())
+ list(x["user_name"].items())
+ list(x["subject_id"].items())
)
for i in range(len(x["annotation"]))
]
if len(x["annotation"]) > 0
else [
OrderedDict(
list(x["movie_id"].items())
+ list(x["frame_number"].items())
+ list(x["label"].items())
+ list(x["user_name"].items())
+ list(x["subject_id"].items())
)
],
1,
)
# Convert annotation to format which the tracker expects
ds = [
OrderedDict(
{
"user": i["user_name"],
"movie_id": i["movie_id"],
"label": i["label"],
"start_frame": i["frame_number"],
"x": int(i["x"]) if "x" in i else None,
"y": int(i["y"]) if "y" in i else None,
"w": int(i["width"]) if "width" in i else None,
"h": int(i["height"]) if "height" in i else None,
"subject_id": i["subject_id"] if "subject_id" in i else None,
}
)
for i in w2_data.annotation.explode()
if i is not None and i is not np.nan
]
# Get prepared annotations
w2_full = pd.DataFrame(ds)
w2_annotations = w2_full[w2_full["x"].notnull()]
new_rows = []
final_indices = []
for name, group in w2_annotations.groupby(["movie_id", "label", "start_frame"]):
movie_id, label, start_frame = name
total_users = w2_full[
(w2_full.movie_id == movie_id)
& (w2_full.label == label)
& (w2_full.start_frame == start_frame)
]["user"].nunique()
# Filter bboxes using IOU metric (essentially a consensus metric)
# Keep only bboxes where mean overlap exceeds this threshold
indices, new_group = filter_bboxes(
total_users=total_users,
users=[i[0] for i in group.values],
bboxes=[np.array((i[4], i[5], i[6], i[7])) for i in group.values],
obj=args.object_thresh,
eps=args.iou_epsilon,
iua=args.inter_user_agreement,
)
subject_ids = [i[8] for i in group.values[indices]]
for ix, box in zip(subject_ids, new_group):
new_rows.append(
(
movie_id,
label,
start_frame,
ix,
)
+ tuple(box)
)
w2_annotations = pd.DataFrame(
new_rows,
columns=[
"movie_id",
"label",
"start_frame",
"subject_id",
"x",
"y",
"w",
"h",
],
)
# Get species id for each species
conn = db_utils.create_connection(args.db_path)
# Get subject table
subjects_df = pd.read_sql_query("SELECT id, frame_exp_sp_id FROM subjects", conn)
subjects_df = subjects_df.rename(
columns={"id": "subject_id", "frame_exp_sp_id": "species_id"}
)
w2_annotations = pd.merge(
w2_annotations,
subjects_df,
how="left",
left_on="subject_id",
right_on="subject_id",
validate="many_to_one",
)
# Filter out invalid movies
w2_annotations = w2_annotations[w2_annotations["movie_id"].notnull()][
["species_id", "x", "y", "w", "h", "subject_id"]
]
# Add values to agg_annotations_frame
db_utils.add_to_table(
args.db_path,
"agg_annotations_frame",
[(None,) + tuple(i) for i in w2_annotations.values],
7,
)
print(f"Frame Aggregation Complete: {len(w2_annotations)} annotations added")
if __name__ == "__main__":
main()
|
[
"pandas.DataFrame",
"numpy.isin",
"kso_utils.db_utils.combine_duplicates",
"argparse.ArgumentParser",
"kso_utils.zooniverse_utils.auth_session",
"json.loads",
"pandas.merge",
"kso_utils.db_utils.create_connection",
"numpy.where",
"numpy.array",
"pandas.Series",
"pandas.read_sql_query",
"ast.literal_eval",
"collections.Counter",
"kso_utils.db_utils.test_table",
"pandas.set_option",
"sklearn.cluster.DBSCAN"
] |
[((2739, 2764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2762, 2764), False, 'import requests, argparse\n'), ((4562, 4600), 'kso_utils.zooniverse_utils.auth_session', 'auth_session', (['args.user', 'args.password'], {}), '(args.user, args.password)\n', (4574, 4600), False, 'from kso_utils.zooniverse_utils import auth_session\n'), ((5603, 5643), 'kso_utils.db_utils.create_connection', 'db_utils.create_connection', (['args.db_path'], {}), '(args.db_path)\n', (5629, 5643), True, 'import kso_utils.db_utils as db_utils\n'), ((5693, 5770), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT id FROM subjects WHERE subject_type=\'frame\'"""', 'conn'], {}), '("SELECT id FROM subjects WHERE subject_type=\'frame\'", conn)\n', (5710, 5770), True, 'import pandas as pd\n'), ((10475, 10517), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (10488, 10517), True, 'import pandas as pd\n'), ((10747, 10805), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT id, filename FROM movies"""', 'conn'], {}), "('SELECT id, filename FROM movies', conn)\n", (10764, 10805), True, 'import pandas as pd\n'), ((10882, 10937), 'pandas.merge', 'pd.merge', (['w2_data', 'movies_df'], {'how': '"""left"""', 'on': '"""movie_id"""'}), "(w2_data, movies_df, how='left', on='movie_id')\n", (10890, 10937), True, 'import pandas as pd\n'), ((13198, 13214), 'pandas.DataFrame', 'pd.DataFrame', (['ds'], {}), '(ds)\n', (13210, 13214), True, 'import pandas as pd\n'), ((14465, 14571), 'pandas.DataFrame', 'pd.DataFrame', (['new_rows'], {'columns': "['movie_id', 'label', 'start_frame', 'subject_id', 'x', 'y', 'w', 'h']"}), "(new_rows, columns=['movie_id', 'label', 'start_frame',\n 'subject_id', 'x', 'y', 'w', 'h'])\n", (14477, 14571), True, 'import pandas as pd\n'), ((14748, 14788), 'kso_utils.db_utils.create_connection', 'db_utils.create_connection', (['args.db_path'], {}), '(args.db_path)\n', (14774, 14788), True, 'import kso_utils.db_utils as db_utils\n'), ((14832, 14899), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT id, frame_exp_sp_id FROM subjects"""', 'conn'], {}), "('SELECT id, frame_exp_sp_id FROM subjects', conn)\n", (14849, 14899), True, 'import pandas as pd\n'), ((15037, 15159), 'pandas.merge', 'pd.merge', (['w2_annotations', 'subjects_df'], {'how': '"""left"""', 'left_on': '"""subject_id"""', 'right_on': '"""subject_id"""', 'validate': '"""many_to_one"""'}), "(w2_annotations, subjects_df, how='left', left_on='subject_id',\n right_on='subject_id', validate='many_to_one')\n", (15045, 15159), True, 'import pandas as pd\n'), ((2091, 2111), 'collections.Counter', 'Counter', (['cluster_ids'], {}), '(cluster_ids)\n', (2098, 2111), False, 'from collections import OrderedDict, Counter\n'), ((2298, 2331), 'numpy.isin', 'np.isin', (['cluster_ids', 'passing_ids'], {}), '(cluster_ids, passing_ids)\n', (2305, 2331), True, 'import numpy as np\n'), ((5352, 5413), 'kso_utils.db_utils.combine_duplicates', 'db_utils.combine_duplicates', (['w2_data', 'args.duplicates_file_id'], {}), '(w2_data, args.duplicates_file_id)\n', (5379, 5413), True, 'import kso_utils.db_utils as db_utils\n'), ((6564, 6661), 'pandas.merge', 'pd.merge', (['new_subjects', 'subjects_df'], {'how': '"""left"""', 'left_on': '"""subject_ids"""', 'right_on': '"""subject_id"""'}), "(new_subjects, subjects_df, how='left', left_on='subject_ids',\n right_on='subject_id')\n", (6572, 6661), True, 'import pandas as pd\n'), ((7896, 7954), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT id, filename FROM movies"""', 'conn'], {}), "('SELECT id, filename FROM movies', conn)\n", (7913, 7954), True, 'import pandas as pd\n'), ((8095, 8155), 'pandas.merge', 'pd.merge', (['new_subjects', 'movies_df'], {'how': '"""left"""', 'on': '"""movie_id"""'}), "(new_subjects, movies_df, how='left', on='movie_id')\n", (8103, 8155), True, 'import pandas as pd\n'), ((9072, 9136), 'kso_utils.db_utils.test_table', 'db_utils.test_table', (['new_subjects', '"""subjects"""'], {'keys': "['movie_id']"}), "(new_subjects, 'subjects', keys=['movie_id'])\n", (9091, 9136), True, 'import kso_utils.db_utils as db_utils\n'), ((1791, 1807), 'pandas.Series', 'pd.Series', (['users'], {}), '(users)\n', (1800, 1807), True, 'import pandas as pd\n'), ((1946, 1991), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'min_samples': '(1)', 'metric': 'bb_iou', 'eps': 'eps'}), '(min_samples=1, metric=bb_iou, eps=eps)\n', (1952, 1991), False, 'from sklearn.cluster import DBSCAN\n'), ((2480, 2496), 'numpy.array', 'np.array', (['bboxes'], {}), '(bboxes)\n', (2488, 2496), True, 'import numpy as np\n'), ((2497, 2523), 'numpy.where', 'np.where', (['(cluster_ids == i)'], {}), '(cluster_ids == i)\n', (2505, 2523), True, 'import numpy as np\n'), ((11473, 11488), 'ast.literal_eval', 'literal_eval', (['x'], {}), '(x)\n', (11485, 11488), False, 'from ast import literal_eval\n'), ((13925, 13959), 'numpy.array', 'np.array', (['(i[4], i[5], i[6], i[7])'], {}), '((i[4], i[5], i[6], i[7]))\n', (13933, 13959), True, 'import numpy as np\n'), ((6025, 6038), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (6035, 6038), False, 'import os, io, csv, json\n'), ((10178, 10191), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (10188, 10191), False, 'import os, io, csv, json\n'), ((7739, 7752), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (7749, 7752), False, 'import os, io, csv, json\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# ufit, a universal scattering fitting suite
#
# Copyright (c) 2013-2019, <NAME> and contributors. All rights reserved.
# Licensed under a 2-clause BSD license, see LICENSE.
# *****************************************************************************
"""Models for different peak shapes."""
from numpy import exp, log, sqrt, sin, cos, pi
from scipy.special import wofz
from ufit.models import Model
__all__ = ['Gauss', 'GaussInt', 'Lorentz', 'LorentzInt',
'Voigt', 'PseudoVoigt', 'DHO']
class Gauss(Model):
"""Gaussian peak
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'ampl', 'fwhm']
def __init__(self, name='', pos=None, ampl=None, fwhm=None):
pp, pa, pf = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: \
abs(p[pa]) * exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class GaussInt(Model):
"""Gaussian peak with integrated intensity parameter
Parameters:
* `pos` - Peak center position
* `int` - Integrated intensity
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'int', 'fwhm']
def __init__(self, name='', pos=None, int=None, fwhm=None):
pp, pint, pf = self._init_params(name, self.param_names, locals())
# integration and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: \
abs(p[pint]) / (abs(p[pf]) * sqrt(pi/(4 * log(2)))) * \
exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
fwhm = 2*abs(w[0] - p[0])
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1] * fwhm * sqrt(2*pi), # peak intensity (integrated)
self.params[2].name: fwhm, # FWHM
}
class Lorentz(Model):
"""Lorentzian peak
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'ampl', 'fwhm']
def __init__(self, name='', pos=None, ampl=None, fwhm=None):
pp, pa, pf = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: abs(p[pa]) / (1 + 4*(x - p[pp])**2/p[pf]**2)
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class LorentzInt(Model):
"""Lorentzian peak with integrated intensity parameter
Parameters:
* `pos` - Peak center position
* `int` - Integrated intensity
* `fwhm` - Full width at half maximum
"""
param_names = ['pos', 'int', 'fwhm']
def __init__(self, name='', pos=None, int=None, fwhm=None):
pp, pint, pf = self._init_params(name, self.param_names, locals())
# integration and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.fcn = lambda p, x: 2 * abs(p[pint]) / (pi * p[pf]) / (1 + 4*(x - p[pp])**2/p[pf]**2)
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
fwhm = 2*abs(w[0] - p[0])
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1] * fwhm * pi/2, # integrated intensity
self.params[2].name: fwhm, # FWHM
}
class Voigt(Model):
"""Voigt peak
A convolution of a Gaussian and a Lorentzian.
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum of the Gauss part
* `shape` - Lorentz contribution
"""
param_names = ['pos', 'ampl', 'fwhm', 'shape']
def __init__(self, name='', pos=None, ampl=None, fwhm=None, shape=None):
pp, pa, pf, psh = self._init_params(name, self.param_names, locals())
# amplitude and fwhms should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.params[3].finalize = abs
self.fcn = lambda p, x: \
p[pa] / wofz(1j*sqrt(log(2))*p[psh]).real * \
wofz(2*sqrt(log(2)) * (x-p[pp])/p[pf] + 1j*sqrt(log(2))*p[psh]).real
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM of Gauss
self.params[3].name: 0,
}
class PseudoVoigt(Model):
"""Pseudo-Voigt peak
A pseudo-convolution of a Gaussian and a Lorentzian.
Parameters:
* `pos` - Peak center position
* `ampl` - Amplitude at center
* `fwhm` - Full width at half maximum
* `eta` - Lorentzicity
"""
param_names = ['pos', 'ampl', 'fwhm', 'eta']
def __init__(self, name='', pos=None, ampl=None, fwhm=None, eta=0.5):
pp, pa, pf, pe = self._init_params(name, self.param_names, locals())
# amplitude and fwhm should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
# eta should be between 0 and 1
self.params[3].finalize = lambda e: e % 1.0
self.fcn = lambda p, x: abs(p[pa]) * \
((p[pe] % 1.0) / (1 + 4*(x - p[pp])**2/p[pf]**2) +
(1-(p[pe] % 1.0)) * exp(-(x - p[pp])**2/p[pf]**2 * 4*log(2)))
pick_points = ['peak', 'width']
def convert_pick(self, p, w):
return {
self.params[0].name: p[0], # position
self.params[1].name: p[1], # peak amplitude
self.params[2].name: 2*abs(w[0] - p[0]), # FWHM
}
class DHO(Model):
"""Damped Harmonic Oscillator
Two Lorentzians centered around zero with a common width and amplitude,
respecting the Bose factor.
Parameters:
* `center` - Energy zero
* `pos` - omega_0
* `ampl` - Amplitude
* `gamma` - Damping
* `tt` - Temperature in K
"""
param_names = ['center', 'pos', 'ampl', 'gamma', 'tt']
def __init__(self, name='',
center=0, pos=None, ampl=None, gamma=None, tt=None):
pc, pp, pa, pg, ptt = self._init_params(name, self.param_names,
locals())
# pos, amplitude and gamma should be positive
self.params[1].finalize = abs
self.params[2].finalize = abs
self.params[3].finalize = abs
self.fcn = lambda p, x: x / (1. - exp(-11.6045*(x+0.00001) / p[ptt])) * \
abs(p[pa]) * abs(p[pg]) / \
((p[pp]**2 - (x - p[pc])**2)**2 + (p[pg]*(x - p[pc]))**2)
pick_points = ['left peak', 'width of left peak', 'right peak']
def convert_pick(self, p1, w, p2):
return {
self.params[0].name: 0.5*(p1[0] + p2[0]), # center
self.params[1].name: 0.5*abs(p1[0] - p2[0]), # position
self.params[2].name: p1[1] * 0.01, # peak amplitude
self.params[3].name: 2*abs(w[0] - p1[0]), # gamma
}
class Gauss2D(Model):
"""Gaussian peak in two dimensions
Parameters:
* `bkgd` - Background
* `pos_x` - X center position
* `pos_y` - Y center position
* `ampl` - amplitude
* `fwhm_x` - Full width in X direction
* `fwhm_y` - Full width in Y direction
* `theta` - rotation of Gaussian in radians
"""
param_names = ['bkgd', 'pos_x', 'pos_y', 'ampl', 'fwhm_x', 'fwhm_y', 'theta']
def __init__(self, name='', bkgd=None, pos_x=None, pos_y=None, ampl=None,
fwhm_x=None, fwhm_y=None, theta=None):
pb, ppx, ppy, pa, pfx, pfy, pth = self._init_params(
name, self.param_names, locals())
self.params[3].finalize = abs
self.params[4].finalize = abs
self.params[5].finalize = abs
def fcn(p, x):
# rotate coordinate system by theta
c, s = cos(p[pth]), sin(p[pth])
x1 = (x[:, 0] - p[ppx])*c - (x[:, 1] - p[ppy])*s
y1 = (x[:, 0] - p[ppx])*s + (x[:, 1] - p[ppy])*c
return abs(p[pb]) + abs(p[pa]) * \
exp(-x1**2/p[pfx]**2 * 4*log(2)) * \
exp(-y1**2/p[pfy]**2 * 4*log(2))
self.fcn = fcn
|
[
"numpy.log",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.sqrt"
] |
[((2370, 2382), 'numpy.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (2374, 2382), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((8778, 8789), 'numpy.cos', 'cos', (['p[pth]'], {}), '(p[pth])\n', (8781, 8789), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((8791, 8802), 'numpy.sin', 'sin', (['p[pth]'], {}), '(p[pth])\n', (8794, 8802), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((1190, 1196), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (1193, 1196), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((2141, 2147), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (2144, 2147), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((9066, 9072), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (9069, 9072), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((6255, 6261), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (6258, 6261), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((7353, 7389), 'numpy.exp', 'exp', (['(-11.6045 * (x + 1e-05) / p[ptt])'], {}), '(-11.6045 * (x + 1e-05) / p[ptt])\n', (7356, 7389), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((9013, 9019), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (9016, 9019), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((2082, 2088), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (2085, 2088), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((4966, 4972), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (4969, 4972), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((5051, 5057), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (5054, 5057), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n'), ((5015, 5021), 'numpy.log', 'log', (['(2)'], {}), '(2)\n', (5018, 5021), False, 'from numpy import exp, log, sqrt, sin, cos, pi\n')]
|
import numpy as np
from skopt.space import Space
from skopt.sampler import Grid
import matplotlib.pyplot as plt
import seaborn as sns
def plot_teacher_action():
space = Space([(-1., 1.), (-1., 1.)])
grid = Grid(border="include", use_full_layout=False)
action_manipulated = grid.generate(space.dimensions, 160)
action_manipulated = np.array(action_manipulated)
action_manipulated2 = \
np.append(action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] < -0.3), :],
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] < -0.3), :],
axis=0)
action_manipulated2 = \
np.append(action_manipulated2,
action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[:, 1] > 0.3), :],
axis=0)
action_manipulated = np.array([[-0.1, 0],
[0.1, 0],
[0, 0.1],
[0, -0.1],
[-0.25, 0],
[0.25, 0],
[0, 0.25],
[0, -0.25],
[-0.1, 0.1],
[0.1, 0.1],
[-0.1, -0.1],
[0.1, -0.1],
[-0.25, 0.25],
[0.25, 0.25],
[-0.25, -0.25],
[0.25, -0.25],
[0.1, 0.05],
[0.05, 0.1],
[0.05, -0.1],
[-0.25, 0.1],
[0.25, 0.8],
[0.6, 0.25],
[0.3, -0.25],
[-0.1, 0.7],
[0.9, 0.1],
[-0.1, -1],
[1, -0.1],
[-0.2, 0.75],
[0.5, 0.5],
[-0.5, -0.5],
[0.75, 0],
[0.15, 0.05],
[0.6, 0.1],
[0.4, -0.1],
[-0.25, 0.15],
[0.25, 0.9],
[-0.35, 0.25],
[0.5, -0.25],
[-0.19, 0.19],
[1, 1],
[-1, -1],
[0, 1],
[-1, 0],
[0.2, 0.75],
[-0.8, 0],
[0, -0.58]])
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.scatterplot(data=action_manipulated, x=action_manipulated[:, 0], y=action_manipulated[:, 1])
plt.xlabel('velocity x')
plt.ylabel('velocity y')
plt.ylim(bottom=-1.05, top=1.05)
plt.xlim(-1.05, 1.05)
plt.savefig("art/plots/teacher_action_random.png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
sns.scatterplot(data=action_manipulated2, x=action_manipulated2[:, 0], y=action_manipulated2[:, 1])
plt.xlabel('velocity x')
plt.ylabel('velocity y')
plt.ylim(bottom=-1.05, top=1.05)
plt.xlim(-1.05, 1.05)
plt.savefig("art/plots/teacher_action_grid.png", dpi=100, transparent=True)
plt.show()
|
[
"seaborn.set_style",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"seaborn.scatterplot",
"matplotlib.pyplot.ylim",
"skopt.sampler.Grid",
"skopt.space.Space",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((175, 208), 'skopt.space.Space', 'Space', (['[(-1.0, 1.0), (-1.0, 1.0)]'], {}), '([(-1.0, 1.0), (-1.0, 1.0)])\n', (180, 208), False, 'from skopt.space import Space\n'), ((216, 261), 'skopt.sampler.Grid', 'Grid', ([], {'border': '"""include"""', 'use_full_layout': '(False)'}), "(border='include', use_full_layout=False)\n", (220, 261), False, 'from skopt.sampler import Grid\n'), ((349, 377), 'numpy.array', 'np.array', (['action_manipulated'], {}), '(action_manipulated)\n', (357, 377), True, 'import numpy as np\n'), ((414, 631), 'numpy.append', 'np.append', (['action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[\n :, 1] < -0.3), :]', 'action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:,\n 1] > 0.3), :]'], {'axis': '(0)'}), '(action_manipulated[(action_manipulated[:, 0] < -0.3) * (\n action_manipulated[:, 1] < -0.3), :], action_manipulated[(\n action_manipulated[:, 0] > 0.3) * (action_manipulated[:, 1] > 0.3), :],\n axis=0)\n', (423, 631), True, 'import numpy as np\n'), ((690, 825), 'numpy.append', 'np.append', (['action_manipulated2', 'action_manipulated[(action_manipulated[:, 0] > 0.3) * (action_manipulated[:,\n 1] < -0.3), :]'], {'axis': '(0)'}), '(action_manipulated2, action_manipulated[(action_manipulated[:, 0] >\n 0.3) * (action_manipulated[:, 1] < -0.3), :], axis=0)\n', (699, 825), True, 'import numpy as np\n'), ((894, 1029), 'numpy.append', 'np.append', (['action_manipulated2', 'action_manipulated[(action_manipulated[:, 0] < -0.3) * (action_manipulated[\n :, 1] > 0.3), :]'], {'axis': '(0)'}), '(action_manipulated2, action_manipulated[(action_manipulated[:, 0] <\n -0.3) * (action_manipulated[:, 1] > 0.3), :], axis=0)\n', (903, 1029), True, 'import numpy as np\n'), ((1089, 1710), 'numpy.array', 'np.array', (['[[-0.1, 0], [0.1, 0], [0, 0.1], [0, -0.1], [-0.25, 0], [0.25, 0], [0, 0.25],\n [0, -0.25], [-0.1, 0.1], [0.1, 0.1], [-0.1, -0.1], [0.1, -0.1], [-0.25,\n 0.25], [0.25, 0.25], [-0.25, -0.25], [0.25, -0.25], [0.1, 0.05], [0.05,\n 0.1], [0.05, -0.1], [-0.25, 0.1], [0.25, 0.8], [0.6, 0.25], [0.3, -0.25\n ], [-0.1, 0.7], [0.9, 0.1], [-0.1, -1], [1, -0.1], [-0.2, 0.75], [0.5, \n 0.5], [-0.5, -0.5], [0.75, 0], [0.15, 0.05], [0.6, 0.1], [0.4, -0.1], [\n -0.25, 0.15], [0.25, 0.9], [-0.35, 0.25], [0.5, -0.25], [-0.19, 0.19],\n [1, 1], [-1, -1], [0, 1], [-1, 0], [0.2, 0.75], [-0.8, 0], [0, -0.58]]'], {}), '([[-0.1, 0], [0.1, 0], [0, 0.1], [0, -0.1], [-0.25, 0], [0.25, 0],\n [0, 0.25], [0, -0.25], [-0.1, 0.1], [0.1, 0.1], [-0.1, -0.1], [0.1, -\n 0.1], [-0.25, 0.25], [0.25, 0.25], [-0.25, -0.25], [0.25, -0.25], [0.1,\n 0.05], [0.05, 0.1], [0.05, -0.1], [-0.25, 0.1], [0.25, 0.8], [0.6, 0.25\n ], [0.3, -0.25], [-0.1, 0.7], [0.9, 0.1], [-0.1, -1], [1, -0.1], [-0.2,\n 0.75], [0.5, 0.5], [-0.5, -0.5], [0.75, 0], [0.15, 0.05], [0.6, 0.1], [\n 0.4, -0.1], [-0.25, 0.15], [0.25, 0.9], [-0.35, 0.25], [0.5, -0.25], [-\n 0.19, 0.19], [1, 1], [-1, -1], [0, 1], [-1, 0], [0.2, 0.75], [-0.8, 0],\n [0, -0.58]])\n', (1097, 1710), True, 'import numpy as np\n'), ((3255, 3267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3265, 3267), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3346), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': True, 'axes.edgecolor': 'black'}"], {}), "('whitegrid', {'axes.grid': True, 'axes.edgecolor': 'black'})\n", (3285, 3346), True, 'import seaborn as sns\n'), ((3351, 3452), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'action_manipulated', 'x': 'action_manipulated[:, 0]', 'y': 'action_manipulated[:, 1]'}), '(data=action_manipulated, x=action_manipulated[:, 0], y=\n action_manipulated[:, 1])\n', (3366, 3452), True, 'import seaborn as sns\n'), ((3452, 3476), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""velocity x"""'], {}), "('velocity x')\n", (3462, 3476), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3505), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity y"""'], {}), "('velocity y')\n", (3491, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3542), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-1.05)', 'top': '(1.05)'}), '(bottom=-1.05, top=1.05)\n', (3518, 3542), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3568), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.05)', '(1.05)'], {}), '(-1.05, 1.05)\n', (3555, 3568), True, 'import matplotlib.pyplot as plt\n'), ((3573, 3650), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""art/plots/teacher_action_random.png"""'], {'dpi': '(100)', 'transparent': '(True)'}), "('art/plots/teacher_action_random.png', dpi=100, transparent=True)\n", (3584, 3650), True, 'import matplotlib.pyplot as plt\n'), ((3655, 3665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3663, 3665), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3683), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3681, 3683), True, 'import matplotlib.pyplot as plt\n'), ((3688, 3762), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': True, 'axes.edgecolor': 'black'}"], {}), "('whitegrid', {'axes.grid': True, 'axes.edgecolor': 'black'})\n", (3701, 3762), True, 'import seaborn as sns\n'), ((3767, 3871), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'action_manipulated2', 'x': 'action_manipulated2[:, 0]', 'y': 'action_manipulated2[:, 1]'}), '(data=action_manipulated2, x=action_manipulated2[:, 0], y=\n action_manipulated2[:, 1])\n', (3782, 3871), True, 'import seaborn as sns\n'), ((3871, 3895), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""velocity x"""'], {}), "('velocity x')\n", (3881, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3900, 3924), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity y"""'], {}), "('velocity y')\n", (3910, 3924), True, 'import matplotlib.pyplot as plt\n'), ((3929, 3961), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(-1.05)', 'top': '(1.05)'}), '(bottom=-1.05, top=1.05)\n', (3937, 3961), True, 'import matplotlib.pyplot as plt\n'), ((3966, 3987), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.05)', '(1.05)'], {}), '(-1.05, 1.05)\n', (3974, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4067), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""art/plots/teacher_action_grid.png"""'], {'dpi': '(100)', 'transparent': '(True)'}), "('art/plots/teacher_action_grid.png', dpi=100, transparent=True)\n", (4003, 4067), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4080, 4082), True, 'import matplotlib.pyplot as plt\n')]
|
# Test convolving to different resolutions
# Test the effect of convolving straight to 20000 and convolving first to an intermediate resolution say 80000.
import matplotlib.pyplot as plt
import numpy as np
from IP_multi_Convolution import ip_convolution, unitary_Gauss
def main():
# fwhm = lambda/R
fwhm = 2046 / 100000
# Starting spectrum
wav = np.linspace(2040, 2050, 20000)
flux = (np.ones_like(wav) - unitary_Gauss(wav, 2045, fwhm) -
unitary_Gauss(wav, 2047, fwhm))
# range in which to have the convoled values. Be careful of the edges!
chip_limits = [2042, 2049]
# Convolution to 80k
R = 80000
wav_80k, flux_80k = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
# Convolution to 50k
R = 50000
wav_50k, flux_50k = ip_convolution(wav, flux, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_80k_50k, flux_80k_50k = ip_convolution(wav_80k, flux_80k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
# Convolution to 20k
R = 20000
wav_80k_20k, flux_80k_20k = ip_convolution(wav_80k, flux_80k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_50k_20k, flux_50k_20k = ip_convolution(wav_50k, flux_50k, chip_limits, R,
fwhm_lim=5.0, plot=False, verbose=True)
wav_80k_50k_20k, flux_80k_50k_20k = ip_convolution(wav_80k_50k, flux_80k_50k,
chip_limits, R, fwhm_lim=5.0,
plot=False, verbose=True)
# Convolution straight to 20000
wav_20k, flux_20k = ip_convolution(wav, flux, chip_limits, R, fwhm_lim=5.0,
plot=False, verbose=True)
# Plot the results
plt.figure(1)
plt.xlabel(r"wavelength [nm])")
plt.ylabel(r"flux [counts] ")
plt.plot(wav, flux / np.max(flux), color='k',
linestyle="-", label="Original spectra")
plt.plot(wav_80k, flux_80k / np.max(flux_80k), color='r', linestyle="-.", label="R=80k-20k")
plt.plot(wav_50k, flux_50k / np.max(flux_50k), color='b', linestyle="--", label="R=50k")
plt.plot(wav_80k_20k, flux_80k_20k / np.max(flux_80k_20k), color='r',
linestyle="-", label="R=80k-20k")
plt.plot(wav_50k_20k, flux_50k_20k / np.max(flux_50k_20k), color='b',
linestyle="-", label="R=50k20k")
plt.plot(wav_80k_50k_20k, flux_80k_50k_20k / np.max(flux_80k_50k_20k), color='m',
linestyle="-", label="R=80k-50k-20k")
plt.plot(wav_20k, flux_20k / np.max(flux_20k), color='c', linestyle="-", label="R=20k")
plt.legend(loc='best')
plt.title(r"Convolution by different Instrument Profiles")
plt.show()
if __name__ == "__main__":
# The IPcovolution fails if it is not run inside __name__ == "__main__"
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.ones_like",
"matplotlib.pyplot.legend",
"IP_multi_Convolution.unitary_Gauss",
"matplotlib.pyplot.figure",
"numpy.max",
"IP_multi_Convolution.ip_convolution",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((367, 397), 'numpy.linspace', 'np.linspace', (['(2040)', '(2050)', '(20000)'], {}), '(2040, 2050, 20000)\n', (378, 397), True, 'import numpy as np\n'), ((678, 764), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav', 'flux', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=False, verbose\n =True)\n', (692, 764), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((863, 949), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav', 'flux', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=False, verbose\n =True)\n', (877, 949), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1017, 1110), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_80k', 'flux_80k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_80k, flux_80k, chip_limits, R, fwhm_lim=5.0, plot=False,\n verbose=True)\n', (1031, 1110), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1226, 1319), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_80k', 'flux_80k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_80k, flux_80k, chip_limits, R, fwhm_lim=5.0, plot=False,\n verbose=True)\n', (1240, 1319), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1396, 1489), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_50k', 'flux_50k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_50k, flux_50k, chip_limits, R, fwhm_lim=5.0, plot=False,\n verbose=True)\n', (1410, 1489), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1574, 1675), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav_80k_50k', 'flux_80k_50k', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav_80k_50k, flux_80k_50k, chip_limits, R, fwhm_lim=5.0,\n plot=False, verbose=True)\n', (1588, 1675), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1843, 1929), 'IP_multi_Convolution.ip_convolution', 'ip_convolution', (['wav', 'flux', 'chip_limits', 'R'], {'fwhm_lim': '(5.0)', 'plot': '(False)', 'verbose': '(True)'}), '(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=False, verbose\n =True)\n', (1857, 1929), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((1993, 2006), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2003, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2041), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""wavelength [nm])"""'], {}), "('wavelength [nm])')\n", (2021, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2047, 2075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""flux [counts] """'], {}), "('flux [counts] ')\n", (2057, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2867), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2855, 2867), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2929), 'matplotlib.pyplot.title', 'plt.title', (['"""Convolution by different Instrument Profiles"""'], {}), "('Convolution by different Instrument Profiles')\n", (2881, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2943, 2945), True, 'import matplotlib.pyplot as plt\n'), ((475, 505), 'IP_multi_Convolution.unitary_Gauss', 'unitary_Gauss', (['wav', '(2047)', 'fwhm'], {}), '(wav, 2047, fwhm)\n', (488, 505), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((410, 427), 'numpy.ones_like', 'np.ones_like', (['wav'], {}), '(wav)\n', (422, 427), True, 'import numpy as np\n'), ((430, 460), 'IP_multi_Convolution.unitary_Gauss', 'unitary_Gauss', (['wav', '(2045)', 'fwhm'], {}), '(wav, 2045, fwhm)\n', (443, 460), False, 'from IP_multi_Convolution import ip_convolution, unitary_Gauss\n'), ((2102, 2114), 'numpy.max', 'np.max', (['flux'], {}), '(flux)\n', (2108, 2114), True, 'import numpy as np\n'), ((2214, 2230), 'numpy.max', 'np.max', (['flux_80k'], {}), '(flux_80k)\n', (2220, 2230), True, 'import numpy as np\n'), ((2311, 2327), 'numpy.max', 'np.max', (['flux_50k'], {}), '(flux_50k)\n', (2317, 2327), True, 'import numpy as np\n'), ((2412, 2432), 'numpy.max', 'np.max', (['flux_80k_20k'], {}), '(flux_80k_20k)\n', (2418, 2432), True, 'import numpy as np\n'), ((2533, 2553), 'numpy.max', 'np.max', (['flux_50k_20k'], {}), '(flux_50k_20k)\n', (2539, 2553), True, 'import numpy as np\n'), ((2661, 2685), 'numpy.max', 'np.max', (['flux_80k_50k_20k'], {}), '(flux_80k_50k_20k)\n', (2667, 2685), True, 'import numpy as np\n'), ((2782, 2798), 'numpy.max', 'np.max', (['flux_20k'], {}), '(flux_20k)\n', (2788, 2798), True, 'import numpy as np\n')]
|
import os
import random
import re
import ssl
import tempfile
from urllib import request
import cv2
import imageio
import numpy as np
import tensorflow as tf
import tensorflow_hub as tfhub
UCF_ROOT = 'https://www.crcv.ucf.edu/THUMOS14/UCF101/UCF101/'
KINETICS_URL = ('https://raw.githubusercontent.com/deepmind/'
'kinetics-i3d/master/data/label_map.txt')
CACHE_DIR = tempfile.mkdtemp()
UNVERIFIED_CONTEXT = ssl._create_unverified_context()
def fetch_ucf_videos():
index = \
(request
.urlopen(UCF_ROOT, context=UNVERIFIED_CONTEXT)
.read()
.decode('utf-8'))
videos = re.findall('(v_[\w]+\.avi)', index)
return sorted(set(videos))
def fetch_kinetics_labels():
with request.urlopen(KINETICS_URL) as f:
labels = [line.decode('utf-8').strip()
for line in f.readlines()]
return labels
def fetch_random_video(videos_list):
video_name = random.choice(videos_list)
cache_path = os.path.join(CACHE_DIR, video_name)
if not os.path.exists(cache_path):
url = request.urljoin(UCF_ROOT, video_name)
response = (request
.urlopen(url,
context=UNVERIFIED_CONTEXT)
.read())
with open(cache_path, 'wb') as f:
f.write(response)
return cache_path
def crop_center(frame):
height, width = frame.shape[:2]
smallest_dimension = min(width, height)
x_start = (width // 2) - (smallest_dimension // 2)
x_end = x_start + smallest_dimension
y_start = (height // 2) - (smallest_dimension // 2)
y_end = y_start + smallest_dimension
roi = frame[y_start:y_end, x_start:x_end]
return roi
def read_video(path, max_frames=32, resize=(224, 224)):
capture = cv2.VideoCapture(path)
frames = []
while len(frames) <= max_frames:
frame_read, frame = capture.read()
if not frame_read:
break
frame = crop_center(frame)
frame = cv2.resize(frame, resize)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frames.append(frame)
capture.release()
frames = np.array(frames)
return frames / 255.
def predict(model, labels, sample_video):
model_input = tf.constant(sample_video,
dtype=tf.float32)
model_input = model_input[tf.newaxis, ...]
logits = model(model_input)['default'][0]
probabilities = tf.nn.softmax(logits)
print('Top 5 actions:')
for i in np.argsort(probabilities)[::-1][:5]:
print(f'{labels[i]}: {probabilities[i] * 100:5.2f}%')
def save_as_gif(images, video_name):
converted_images = np.clip(images * 255, 0, 255)
converted_images = converted_images.astype(np.uint8)
imageio.mimsave(f'./{video_name}.gif',
converted_images,
fps=25)
VIDEO_LIST = fetch_ucf_videos()
LABELS = fetch_kinetics_labels()
video_path = fetch_random_video(VIDEO_LIST)
sample_video = read_video(video_path)
model_path = 'https://tfhub.dev/deepmind/i3d-kinetics-400/1'
model = tfhub.load(model_path)
model = model.signatures['default']
predict(model, LABELS, sample_video)
video_name = video_path.rsplit('/', maxsplit=1)[1][:-4]
save_as_gif(sample_video, video_name)
|
[
"cv2.resize",
"tensorflow.nn.softmax",
"tensorflow_hub.load",
"os.path.join",
"cv2.cvtColor",
"os.path.exists",
"random.choice",
"tensorflow.constant",
"numpy.clip",
"cv2.VideoCapture",
"urllib.request.urlopen",
"numpy.argsort",
"tempfile.mkdtemp",
"re.findall",
"numpy.array",
"ssl._create_unverified_context",
"imageio.mimsave",
"urllib.request.urljoin"
] |
[((385, 403), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (401, 403), False, 'import tempfile\n'), ((425, 457), 'ssl._create_unverified_context', 'ssl._create_unverified_context', ([], {}), '()\n', (455, 457), False, 'import ssl\n'), ((3091, 3113), 'tensorflow_hub.load', 'tfhub.load', (['model_path'], {}), '(model_path)\n', (3101, 3113), True, 'import tensorflow_hub as tfhub\n'), ((628, 665), 're.findall', 're.findall', (['"""(v_[\\\\w]+\\\\.avi)"""', 'index'], {}), "('(v_[\\\\w]+\\\\.avi)', index)\n", (638, 665), False, 'import re\n'), ((938, 964), 'random.choice', 'random.choice', (['videos_list'], {}), '(videos_list)\n', (951, 964), False, 'import random\n'), ((982, 1017), 'os.path.join', 'os.path.join', (['CACHE_DIR', 'video_name'], {}), '(CACHE_DIR, video_name)\n', (994, 1017), False, 'import os\n'), ((1789, 1811), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (1805, 1811), False, 'import cv2\n'), ((2155, 2171), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (2163, 2171), True, 'import numpy as np\n'), ((2260, 2303), 'tensorflow.constant', 'tf.constant', (['sample_video'], {'dtype': 'tf.float32'}), '(sample_video, dtype=tf.float32)\n', (2271, 2303), True, 'import tensorflow as tf\n'), ((2448, 2469), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2461, 2469), True, 'import tensorflow as tf\n'), ((2674, 2703), 'numpy.clip', 'np.clip', (['(images * 255)', '(0)', '(255)'], {}), '(images * 255, 0, 255)\n', (2681, 2703), True, 'import numpy as np\n'), ((2766, 2830), 'imageio.mimsave', 'imageio.mimsave', (['f"""./{video_name}.gif"""', 'converted_images'], {'fps': '(25)'}), "(f'./{video_name}.gif', converted_images, fps=25)\n", (2781, 2830), False, 'import imageio\n'), ((735, 764), 'urllib.request.urlopen', 'request.urlopen', (['KINETICS_URL'], {}), '(KINETICS_URL)\n', (750, 764), False, 'from urllib import request\n'), ((1030, 1056), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (1044, 1056), False, 'import os\n'), ((1072, 1109), 'urllib.request.urljoin', 'request.urljoin', (['UCF_ROOT', 'video_name'], {}), '(UCF_ROOT, video_name)\n', (1087, 1109), False, 'from urllib import request\n'), ((2008, 2033), 'cv2.resize', 'cv2.resize', (['frame', 'resize'], {}), '(frame, resize)\n', (2018, 2033), False, 'import cv2\n'), ((2050, 2088), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2062, 2088), False, 'import cv2\n'), ((2512, 2537), 'numpy.argsort', 'np.argsort', (['probabilities'], {}), '(probabilities)\n', (2522, 2537), True, 'import numpy as np\n'), ((1131, 1179), 'urllib.request.urlopen', 'request.urlopen', (['url'], {'context': 'UNVERIFIED_CONTEXT'}), '(url, context=UNVERIFIED_CONTEXT)\n', (1146, 1179), False, 'from urllib import request\n'), ((507, 560), 'urllib.request.urlopen', 'request.urlopen', (['UCF_ROOT'], {'context': 'UNVERIFIED_CONTEXT'}), '(UCF_ROOT, context=UNVERIFIED_CONTEXT)\n', (522, 560), False, 'from urllib import request\n')]
|
from Main.Environments.Connect4 import Constants, Utils
from Tests.Environments.Connect4 import testCasesRawEvaluate
from unittest import TestCase
import numpy as np
class TestCreateMirroredStateAndPolicy(TestCase):
def testMirrorState(self):
AMOUNT_OF_TESTS_PER_CASE = 10
for case in testCasesRawEvaluate.TEST_CASES:
board = np.array(case[0])
for p in [-1, 1]:
convState = Utils.state2ConvState(board, p)
convStates = [convState for i in range(AMOUNT_OF_TESTS_PER_CASE)]
randomPolices = [np.random.random(7) for i in range(AMOUNT_OF_TESTS_PER_CASE)]
mirrorStates, mirrorPolices = Utils.createMirroredStateAndPolicy(convStates, randomPolices)
reMirrorStates, reMirrorPolices = Utils.createMirroredStateAndPolicy(mirrorStates, mirrorPolices)
for i in range(len(randomPolices)):
assert np.array_equal(randomPolices[i], reMirrorPolices[i])
for m in reMirrorStates:
assert np.array_equal(convState, m)
|
[
"Main.Environments.Connect4.Utils.state2ConvState",
"Main.Environments.Connect4.Utils.createMirroredStateAndPolicy",
"numpy.random.random",
"numpy.array",
"numpy.array_equal"
] |
[((372, 389), 'numpy.array', 'np.array', (['case[0]'], {}), '(case[0])\n', (380, 389), True, 'import numpy as np\n'), ((450, 481), 'Main.Environments.Connect4.Utils.state2ConvState', 'Utils.state2ConvState', (['board', 'p'], {}), '(board, p)\n', (471, 481), False, 'from Main.Environments.Connect4 import Constants, Utils\n'), ((710, 771), 'Main.Environments.Connect4.Utils.createMirroredStateAndPolicy', 'Utils.createMirroredStateAndPolicy', (['convStates', 'randomPolices'], {}), '(convStates, randomPolices)\n', (744, 771), False, 'from Main.Environments.Connect4 import Constants, Utils\n'), ((823, 886), 'Main.Environments.Connect4.Utils.createMirroredStateAndPolicy', 'Utils.createMirroredStateAndPolicy', (['mirrorStates', 'mirrorPolices'], {}), '(mirrorStates, mirrorPolices)\n', (857, 886), False, 'from Main.Environments.Connect4 import Constants, Utils\n'), ((599, 618), 'numpy.random.random', 'np.random.random', (['(7)'], {}), '(7)\n', (615, 618), True, 'import numpy as np\n'), ((970, 1022), 'numpy.array_equal', 'np.array_equal', (['randomPolices[i]', 'reMirrorPolices[i]'], {}), '(randomPolices[i], reMirrorPolices[i])\n', (984, 1022), True, 'import numpy as np\n'), ((1095, 1123), 'numpy.array_equal', 'np.array_equal', (['convState', 'm'], {}), '(convState, m)\n', (1109, 1123), True, 'import numpy as np\n')]
|
'''
VizUtil.py
Utilities for displaying satellite images,
with (optional) bound-box annotations
'''
import numpy as np
from matplotlib import pylab
import os
import skimage.color
def imshow(Im, block=False, figID=1):
figH = pylab.figure(num=figID)
figH.clf()
pylab.imshow(Im)
pylab.draw()
pylab.show(block=block)
def showExamples(PMat, Nsubplots=9, block=False, figID=1, W=1, H=1):
nRow = int(np.floor(np.sqrt(Nsubplots)))
nCol = int(np.ceil(Nsubplots/ float(nRow)))
figH, axH = pylab.subplots(nRow, nCol, num=figID, figsize=(W*nCol, H*nRow))
Kplot = np.minimum(PMat.shape[0], Nsubplots)
for kk in range(Kplot):
pylab.subplot(nRow, nCol, kk+1)
if PMat[kk].ndim == 3:
pylab.imshow(PMat[kk], interpolation='nearest')
else:
pylab.imshow(PMat[kk], interpolation='nearest', cmap='gray')
pylab.axis('image')
pylab.xticks([])
pylab.yticks([])
# Disable visibility for unused subplots
for kk in range(Kplot, nRow*nCol):
pylab.subplot(nRow, nCol, kk+1)
pylab.axis('off')
pylab.draw()
pylab.show(block=block)
def save_fig_as_png(savepath, figID=1):
figH = pylab.figure(num=figID)
pylab.draw()
if not os.path.exists(savepath) and not savepath.count(os.path.sep):
savepath = os.path.join(DEFAULTSAVEPATH, savepath)
pylab.xticks([])
pylab.yticks([])
pylab.savefig(savepath, bbox_inches = 'tight', pad_inches = 0)
def makeImageWithBBoxAnnotations(Im, BBox, BBox2=None,
boxcolor=[0,1,0], # green
boxcolor2=[1,1,0], # yellow
**kwargs):
''' Create color image with bounding boxes highlighted in color
'''
if Im.ndim < 3:
AIm = skimage.color.gray2rgb(Im)
else:
AIm = Im.copy() # annotation shouldn't happen to original array
_add_bbox_to_im_inplace(AIm, BBox, boxcolor)
if BBox2 is not None:
_add_bbox_to_im_inplace(AIm, BBox2, boxcolor2)
return AIm
def _add_bbox_to_im_inplace(Im, BBox, boxcolor, doThickLines=1):
BBox = np.asarray(BBox, dtype=np.int32)
boxcolor = np.asarray(boxcolor, dtype=np.float64)
if boxcolor.max() > 1:
boxcolor = boxcolor / 255
for r in xrange(BBox.shape[0]):
Im[BBox[r,0]:BBox[r,1], BBox[r,2]] = boxcolor[np.newaxis,:]
Im[BBox[r,0]:BBox[r,1], BBox[r,3]-1] = boxcolor[np.newaxis,:]
Im[BBox[r,0], BBox[r,2]:BBox[r,3]] = boxcolor[np.newaxis,:]
Im[BBox[r,1]-1, BBox[r,2]:BBox[r,3]] = boxcolor[np.newaxis,:]
## Draw thick lines by repeating this cmd
## but slightly shifting BBox coords +1 or -1 pixel
if doThickLines:
for inc in [-1, +1]:
ABox = BBox + inc
np.maximum(ABox, 0, out=ABox)
np.minimum(ABox[:,1], Im.shape[0], out=ABox[:,1])
np.minimum(ABox[:,3], Im.shape[1], out=ABox[:,3])
_add_bbox_to_im_inplace(Im, ABox, boxcolor, doThickLines=0)
"""
def showMostConfidentFalseNegatives(Ytrue, Phat, Nsubplots=9):
if Phat.ndim > 1:
Phat = Phat[:,-1] # use final column, which is probability of 1
Yhat = np.asarray(Phat > 0.5, dtype=Ytrue.dtype)
falseNegIDs = np.flatnonzero( np.logical_and(Yhat == 0, Yhat != Ytrue))
print 'FALSE NEG: %d/%d' % (len(falseNegIDs), np.sum(Ytrue==1))
if len(falseNegIDs) == 0:
return None
# Sort false positives from smallest probability to largest
sortIDs = np.argsort( Phat[falseNegIDs] )
falseNegIDs = falseNegIDs[sortIDs[:Nsubplots]]
#print ' ', falseNegIDs, Phat[falseNegIDs]
PosIms, _ = loadTestImages(testGroupIDs, falseNegIDs, None)
return plotImages(PosIms, Nsubplots=Nsubplots)
def showMostConfidentFalsePositives(Ytrue, Phat, Nsubplots=9):
if Phat.ndim > 1:
Phat = Phat[:,-1] # use final column, which is probability of 1
Yhat = np.asarray(Phat > 0.5, dtype=Ytrue.dtype)
falsePosIDs = np.flatnonzero( np.logical_and(Yhat == 1, Yhat != Ytrue))
print 'FALSE POS: %d/%d' % (len(falsePosIDs), np.sum(Ytrue==0))
if len(falsePosIDs) == 0:
return None
# Sort false positives from largest probability to smallest
sortIDs = np.argsort( -1*Phat[falsePosIDs] )
falsePosIDs = falsePosIDs[sortIDs[:Nsubplots]]
#print ' ', falsePosIDs, Phat[falsePosIDs]
_, NegIms = loadTestImages(testGroupIDs, None, falsePosIDs)
return plotImages(NegIms, Nsubplots=Nsubplots)
"""
|
[
"matplotlib.pylab.savefig",
"matplotlib.pylab.xticks",
"numpy.minimum",
"numpy.maximum",
"matplotlib.pylab.subplot",
"matplotlib.pylab.imshow",
"numpy.asarray",
"matplotlib.pylab.figure",
"os.path.exists",
"matplotlib.pylab.axis",
"matplotlib.pylab.yticks",
"matplotlib.pylab.subplots",
"numpy.sqrt",
"os.path.join",
"matplotlib.pylab.draw",
"matplotlib.pylab.show"
] |
[((228, 251), 'matplotlib.pylab.figure', 'pylab.figure', ([], {'num': 'figID'}), '(num=figID)\n', (240, 251), False, 'from matplotlib import pylab\n'), ((267, 283), 'matplotlib.pylab.imshow', 'pylab.imshow', (['Im'], {}), '(Im)\n', (279, 283), False, 'from matplotlib import pylab\n'), ((286, 298), 'matplotlib.pylab.draw', 'pylab.draw', ([], {}), '()\n', (296, 298), False, 'from matplotlib import pylab\n'), ((301, 324), 'matplotlib.pylab.show', 'pylab.show', ([], {'block': 'block'}), '(block=block)\n', (311, 324), False, 'from matplotlib import pylab\n'), ((499, 566), 'matplotlib.pylab.subplots', 'pylab.subplots', (['nRow', 'nCol'], {'num': 'figID', 'figsize': '(W * nCol, H * nRow)'}), '(nRow, nCol, num=figID, figsize=(W * nCol, H * nRow))\n', (513, 566), False, 'from matplotlib import pylab\n'), ((575, 611), 'numpy.minimum', 'np.minimum', (['PMat.shape[0]', 'Nsubplots'], {}), '(PMat.shape[0], Nsubplots)\n', (585, 611), True, 'import numpy as np\n'), ((1039, 1051), 'matplotlib.pylab.draw', 'pylab.draw', ([], {}), '()\n', (1049, 1051), False, 'from matplotlib import pylab\n'), ((1054, 1077), 'matplotlib.pylab.show', 'pylab.show', ([], {'block': 'block'}), '(block=block)\n', (1064, 1077), False, 'from matplotlib import pylab\n'), ((1128, 1151), 'matplotlib.pylab.figure', 'pylab.figure', ([], {'num': 'figID'}), '(num=figID)\n', (1140, 1151), False, 'from matplotlib import pylab\n'), ((1154, 1166), 'matplotlib.pylab.draw', 'pylab.draw', ([], {}), '()\n', (1164, 1166), False, 'from matplotlib import pylab\n'), ((1295, 1311), 'matplotlib.pylab.xticks', 'pylab.xticks', (['[]'], {}), '([])\n', (1307, 1311), False, 'from matplotlib import pylab\n'), ((1314, 1330), 'matplotlib.pylab.yticks', 'pylab.yticks', (['[]'], {}), '([])\n', (1326, 1330), False, 'from matplotlib import pylab\n'), ((1333, 1391), 'matplotlib.pylab.savefig', 'pylab.savefig', (['savepath'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(savepath, bbox_inches='tight', pad_inches=0)\n", (1346, 1391), False, 'from matplotlib import pylab\n'), ((2037, 2069), 'numpy.asarray', 'np.asarray', (['BBox'], {'dtype': 'np.int32'}), '(BBox, dtype=np.int32)\n', (2047, 2069), True, 'import numpy as np\n'), ((2083, 2121), 'numpy.asarray', 'np.asarray', (['boxcolor'], {'dtype': 'np.float64'}), '(boxcolor, dtype=np.float64)\n', (2093, 2121), True, 'import numpy as np\n'), ((642, 675), 'matplotlib.pylab.subplot', 'pylab.subplot', (['nRow', 'nCol', '(kk + 1)'], {}), '(nRow, nCol, kk + 1)\n', (655, 675), False, 'from matplotlib import pylab\n'), ((837, 856), 'matplotlib.pylab.axis', 'pylab.axis', (['"""image"""'], {}), "('image')\n", (847, 856), False, 'from matplotlib import pylab\n'), ((861, 877), 'matplotlib.pylab.xticks', 'pylab.xticks', (['[]'], {}), '([])\n', (873, 877), False, 'from matplotlib import pylab\n'), ((882, 898), 'matplotlib.pylab.yticks', 'pylab.yticks', (['[]'], {}), '([])\n', (894, 898), False, 'from matplotlib import pylab\n'), ((983, 1016), 'matplotlib.pylab.subplot', 'pylab.subplot', (['nRow', 'nCol', '(kk + 1)'], {}), '(nRow, nCol, kk + 1)\n', (996, 1016), False, 'from matplotlib import pylab\n'), ((1019, 1036), 'matplotlib.pylab.axis', 'pylab.axis', (['"""off"""'], {}), "('off')\n", (1029, 1036), False, 'from matplotlib import pylab\n'), ((1253, 1292), 'os.path.join', 'os.path.join', (['DEFAULTSAVEPATH', 'savepath'], {}), '(DEFAULTSAVEPATH, savepath)\n', (1265, 1292), False, 'import os\n'), ((417, 435), 'numpy.sqrt', 'np.sqrt', (['Nsubplots'], {}), '(Nsubplots)\n', (424, 435), True, 'import numpy as np\n'), ((707, 754), 'matplotlib.pylab.imshow', 'pylab.imshow', (['PMat[kk]'], {'interpolation': '"""nearest"""'}), "(PMat[kk], interpolation='nearest')\n", (719, 754), False, 'from matplotlib import pylab\n'), ((771, 831), 'matplotlib.pylab.imshow', 'pylab.imshow', (['PMat[kk]'], {'interpolation': '"""nearest"""', 'cmap': '"""gray"""'}), "(PMat[kk], interpolation='nearest', cmap='gray')\n", (783, 831), False, 'from matplotlib import pylab\n'), ((1176, 1200), 'os.path.exists', 'os.path.exists', (['savepath'], {}), '(savepath)\n', (1190, 1200), False, 'import os\n'), ((2645, 2674), 'numpy.maximum', 'np.maximum', (['ABox', '(0)'], {'out': 'ABox'}), '(ABox, 0, out=ABox)\n', (2655, 2674), True, 'import numpy as np\n'), ((2681, 2732), 'numpy.minimum', 'np.minimum', (['ABox[:, 1]', 'Im.shape[0]'], {'out': 'ABox[:, 1]'}), '(ABox[:, 1], Im.shape[0], out=ABox[:, 1])\n', (2691, 2732), True, 'import numpy as np\n'), ((2737, 2788), 'numpy.minimum', 'np.minimum', (['ABox[:, 3]', 'Im.shape[1]'], {'out': 'ABox[:, 3]'}), '(ABox[:, 3], Im.shape[1], out=ABox[:, 3])\n', (2747, 2788), True, 'import numpy as np\n')]
|
#coding:utf-8
# return candidate position set of one pitch duration near center of the frame
# by differential change point and threshold from bottom line.
# return 0 if there is no.
#
# 中心付近の1ピッチ分の候補インデックス[sp,ep]を返す。
# 候補が無いときは零を返す。
#
# 微分の変化点と閾値により候補を選出する。
import numpy as np
import matplotlib.pyplot as plt
# Check version
# Python 3.6.4, 64bit on Win32 (Windows 10)
# numpy (1.14.0)
def diff_ana(y, sr, show=False):
# (1) 傾きの変化より選択
f_prime=np.gradient(y) # 数値勾配(傾き)
indices_diff0 = np.where( np.diff(np.sign(f_prime)) > 0.0 )[0] # 符号(-1,0,1)化したものの差分をとり、正値の変化点を検出する
# (2) 底辺に近い値を選択
thres0= (np.amax(y) - np.amin(y)) * 0.25 + np.amin(y) # 最小値から振幅幅の25%までの値を候補として使う。
indices_thres0 = np.where( y < thres0 )[0]
# (3) 上記の条件を満たす 論理積 を取る
indices=np.sort(np.array(list( set(indices_diff0) & set(indices_thres0))))
infections = y[indices]
if len(indices) >= 2: # 候補が2個以上のときに、探す。
index0= np.argmin(np.abs(indices - len(y)/2)) # 中心に一番近いインデックスを求める
if len(indices) == 2: # 候補が2個しかないときは
sp= indices[0]
ep= indices[1]
elif index0 < len(y)/2 and indices[-1] > len(y)/2 : # そのインデックスが中心より前ならば
sp= indices[index0]
ep= indices[index0+1]
else:
sp= indices[index0-1]
ep= indices[index0]
else: # 候補が無い
sp=0
ep=0
indices1=np.array([sp,ep])
infections1 = y[indices1]
#print ( indices, indices1)
#print ('select index, [Hz]', indices1, (sr / (indices1[1]-indices1[0])) )
if show:
fig = plt.figure()
ax1 = fig.add_subplot(311)
plt.title('diff: two red cirles shows selected portion')
plt.xlabel('mSec')
plt.ylabel('level')
ax1.plot(np.arange(len(y)) * 1000.0 / sr, y, 'bo-', ms=2)
ax1.plot(indices * 1000.0 / sr, infections, 'yo', ms=5)
ax1.plot(indices1 * 1000.0 / sr, infections1, 'ro', ms=5)
ax2 = fig.add_subplot(312)
ax2.plot(np.arange(len(f_prime)) * 1000.0 / sr, f_prime, 'ro', ms=5)
ax3 = fig.add_subplot(313)
f_prime2=np.gradient(f_prime)
indices2 = np.where(np.diff(np.sign(f_prime2)))[0]
infections2 = y[indices2]
ax3.plot(np.arange(len(y)) * 1000.0 / sr, y, 'bo-', ms=2)
ax3.plot(indices2 * 1000.0 / sr, infections2, 'ro', ms=5)
plt.show()
return int(sp), int(ep)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.array",
"numpy.sign",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.gradient"
] |
[((477, 491), 'numpy.gradient', 'np.gradient', (['y'], {}), '(y)\n', (488, 491), True, 'import numpy as np\n'), ((1440, 1458), 'numpy.array', 'np.array', (['[sp, ep]'], {}), '([sp, ep])\n', (1448, 1458), True, 'import numpy as np\n'), ((679, 689), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (686, 689), True, 'import numpy as np\n'), ((741, 761), 'numpy.where', 'np.where', (['(y < thres0)'], {}), '(y < thres0)\n', (749, 761), True, 'import numpy as np\n'), ((1649, 1661), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1659, 1661), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1763), 'matplotlib.pyplot.title', 'plt.title', (['"""diff: two red cirles shows selected portion"""'], {}), "('diff: two red cirles shows selected portion')\n", (1716, 1763), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mSec"""'], {}), "('mSec')\n", (1783, 1791), True, 'import matplotlib.pyplot as plt\n'), ((1801, 1820), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""level"""'], {}), "('level')\n", (1811, 1820), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2228), 'numpy.gradient', 'np.gradient', (['f_prime'], {}), '(f_prime)\n', (2219, 2228), True, 'import numpy as np\n'), ((2478, 2488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2486, 2488), True, 'import matplotlib.pyplot as plt\n'), ((645, 655), 'numpy.amax', 'np.amax', (['y'], {}), '(y)\n', (652, 655), True, 'import numpy as np\n'), ((658, 668), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (665, 668), True, 'import numpy as np\n'), ((542, 558), 'numpy.sign', 'np.sign', (['f_prime'], {}), '(f_prime)\n', (549, 558), True, 'import numpy as np\n'), ((2267, 2284), 'numpy.sign', 'np.sign', (['f_prime2'], {}), '(f_prime2)\n', (2274, 2284), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing, svm
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
import statsmodels.api as sm
import matplotlib.dates as mdates
import warnings
import itertools
import dateutil
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV as gsc
from sklearn.linear_model import Ridge,Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
def main ():
# Using svm
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
S1,S2=AQI_SVM(data)
S3,S4=AQI_Feature_importance_SVM(data)
S5,S6=AQI_Domain_Knowledge_SVM(data)
S7,S8=AQI_without_Domain_Knowledge_SVM(data)
##Linear Regression
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
LR1,LR2=AQI(data)
LR3,LR4=AQI_Feature_importance(data)
LR5,LR6==AQI_Domain_Knowledge(data)
LR7,LR8=AQI_without_Domain_Knowledge(data)
## Predincting for next day
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
normalize(data)
y=pd.read_csv('AQI_prediction_add.csv')
LR_F1,LR_F2=AQI_Future(data,y.AQI_predicted)
LR_F3,LR_F4=AQI_Feature_importance_Future(data,y.AQI_predicted)
LR_F5,LR_F6=AQI_Domain_Knowledge_Future(data,y.AQI_predicted)
LR_F7,LR_F8=AQI_without_Domain_Knowledge_Future(data,y.AQI_predicted)
##Predicting for Autumn Season
data=pd.read_csv('autumn_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_A1,LR_A2=AQI(data)
LR_A3,LR_A4=AQI_Feature_importance(data)
LR_A5,LR_A6=AQI_Domain_Knowledge(data)
LR_A7,LR_A8=AQI_without_Domain_Knowledge(data)
##Predicting for Summer Season
data=pd.read_csv('summer_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_S1,LR_S2=AQI(data)
LR_S3,LR_S4=AQI_Feature_importance(data)
LR_S5,LR_S6=AQI_Domain_Knowledge(data)
LR_S7,LR_S8=AQI_without_Domain_Knowledge(data)
##Predicting for Winter Season
data=pd.read_csv('winter_data.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
data=pd.get_dummies(data, columns=[' _conds'], prefix = [' _conds'])
data=pd.get_dummies(data, columns=[' _wdire'], prefix = [' _wdire'])
data=pd.get_dummies(data, columns=['Type'], prefix = ['Type'])
LR_W1,LR_W2=AQI(data)
LR_W3,LR_W4=AQI_Feature_importance(data)
LR_W5,LR_W6=AQI_Domain_Knowledge(data)
LR_W7,LR_W8=AQI_without_Domain_Knowledge(data)
##Using Ridge
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
## Using all features
R1,R2=AQI_Ridge(data,h)
R3,R4=AQI_Feature_importance_Ridge(data,h)
R5,R6=AQI_Domain_Knowledge_Ridge(data,h)
R7,R8=AQI_without_Domain_Knowledge_Ridge(data,h)
##Future
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
h = BestParams(data)
y = pd.read_csv('AQI_prediction_add.csv')
R_F1,R_F2=AQI_Future_Ridge(data, y.AQI_predicted,h)
R_F3,R_F4=AQI_Feature_importance_Future_Ridge(data, y.AQI_predicted,h)
R_F5,R_F6=AQI_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
R_F7,R_F8=AQI_without_Domain_Knowledge_Future_Ridge(data, y.AQI_predicted,h)
##using Lasso
data=pd.read_csv('Original_with_dummies.csv')
y=data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI']=y
h=BestParams(data)
L1,L2=AQI_Lasso(data,h)
L3,L4=AQI_Feature_importance_Lasso(data,h)
L5,L6=AQI_Domain_Knowledge_Lasso(data,h)
L7,L8=AQI_without_Domain_Knowledge_Lasso(data,h)
## Predincting for nxt day
data=pd.read_csv('Original_with_dummies.csv')
normalize(data)
h=BestParams(data)
y=pd.read_csv('AQI_prediction_add.csv')
L_F1,L_F2=AQI_Future_Lasso(data,y.AQI_predicted,h)
L_F3,L_F4=AQI_Feature_importance_Future_Lasso(data,y.AQI_predicted,h)
L_F5,L_F6=AQI_Domain_Knowledge_Future_Lasso(data,y.AQI_predicted,h)
L_F7,L_F8=AQI_without_Domain_Knowledge_Future_Lasso(data,y.AQI_predicted,h)
##Random forest
#All feautres
data = pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
F1,F2=AQI_RF(data)
F3,F4=AQI_Feature_importance_RF(data)
F5,F6=AQI_Domain_Knowledge_RF(data)
F7,F8=AQI_without_Domain_Knowledge_RF(data)
## Predincting for nxt day
data = pd.read_csv('Original_with_dummies.csv')
normalize(data)
y = pd.read_csv('AQI_prediction_add.csv')
F_F1,F_F2=AQI_Future_RF(data, y.AQI_predicted)
F_F3,F_F4=AQI_Feature_importance_Future_RF(data, y.AQI_predicted)
F_F5,F_F6=AQI_Domain_Knowledge_Future_RF(data, y.AQI_predicted)
F_F7,F_F8=AQI_without_Domain_Knowledge_Future_RF(data, y.AQI_predicted)
##NN
data=pd.read_csv('Original_with_dummies.csv')
y = data.AQI
data = data.drop('AQI', axis=1)
normalize(data)
data['AQI'] = y
layer = [4,4,4]
NN1,NN2=AQI_NN(data, layer)
NN3,NN4=AQI_Feature_importance_NN(data, layer)
NN5,NN6=AQI_Domain_Knowledge_NN(data, layer)
NN7,NN8=AQI_without_Domain_Knowledge_NN(data, layer)
## Predincting for nxt day
data=pd.read_csv('Original_with_dummies.csv')
y=pd.read_csv('AQI_prediction_add.csv')
normalize(data)
NN_F1,NN_F2=AQI_Future_NN(data,y.AQI_predicted, layer)
NN_F3,NN_F4=AQI_Feature_importance_Future_NN(data,y.AQI_predicted,layer)
NN_F5,NN_F6=AQI_Domain_Knowledge_Future_NN(data,y.AQI_predicted,layer)
NN_F7,NN_F8=AQI_without_Domain_Knowledge_Future_NN(data,y.AQI_predicted, layer)
##All features v/s all models
Bar_graph (LR1,LR2,L1,L2,R1,R2,S1,S2,F1,F2,NN1,NN2)
##iMPORTANT FEATURES V/S ALL MODELS
Bar_graph (LR3,LR4,L3,L4,R3,R4,S3,S4,F3,F4,NN3,NN4)
##Future with important features V/S ALL MODELS except svm
Bar_graph_without_svm (LR_F3,LR_F4,L_F3,L_F4,R_F3,R_F4,F_F3,F_F4,NN_F3,NN_F4)
##Autumn winter and summer
Bar_graph_season (LR_A3,LR_A4,LR_S3,LR_S4,LR_W3,LR_W4)
##Best Model Analysis using Data
data = pd.read_csv('Original_with_dummies.csv')
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
train=90
test=18
tips=[]
LABELS=[]
d=[0,1,2,3,4,5,6,7,8,9]
for i in range (10):
train=train+30
test=test+6
LABELS.append(train)
tips.append(train_test_data_prepare(data, train, test, 15))
plt.plot(tips)
plt.xticks(d, LABELS)
plt.xlabel("No of Days")
plt.ylabel("RMSE")
plt.title("Models")
plt.legend()
plt.show()
#Predicting AQI using all features
def AQI(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future(data,y):
tree_clf = ExtraTreesRegressor()
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area','month_10','month_11',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future(data,y):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
data=data.drop('month_10',axis=1)
data=data.drop('month_11',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = LinearRegression()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def graph_training(y_pred,y_train):
all_samples = [i for i in range(0, 250)]
y_pred=y_pred[0:250]
y_train=y_train[0:250]
plt.plot(all_samples, y_pred,label='Predicted')
plt.plot(all_samples , y_train,label='Expected')
plt.xlabel("No of Samples")
plt.ylabel("AQI")
plt.title("Training")
plt.legend()
plt.show()
def graph_testing(y_pred,y_val):
all_samples = [i for i in range(0, 250)]
y_pred=y_pred[0:250]
y_val=y_val[0:250]
plt.plot(all_samples, y_pred,label='Predicted')
plt.plot(all_samples , y_val,label='Expected')
plt.xlabel("No of Samples")
plt.ylabel("AQI")
plt.title("Validation")
plt.legend()
plt.show()
## svm
def AQI_SVM(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_SVM(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_SVM(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
# df[['Name', 'Qualification']]
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area',]]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_SVM(data):
y=data.AQI
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
# data=data.drop('month_10',axis=1)
# data=data.drop('month_11',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = SVR(gamma='scale')
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def BestParams(data):
y = data.AQI
data = data.drop('AQI', axis=1)
Hyper_params = np.array(
[ 0.011, 0.1, 0.001, 0.01,.3, .2, 0.6, .8, 0.001, 0.0001, 3, 4,1,2.4])
Reg_model = Ridge()
GSCgrid = gsc(estimator=Reg_model, param_grid=dict(alpha=Hyper_params))
GSCgrid.fit(data, y)
# print('Hyper Parameter for Ridge:', GSCgrid.best_estimator_.alpha)
return GSCgrid.best_estimator_.alpha
def normalize(data):
for c in data.columns:
mean = data[c].mean()
max = data[c].max()
min = data[c].min()
data[c] = (data[c] - min) / (max - min)
return data
def AQI_Ridge(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Ridge(data,h):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Ridge(data,h):
y=data.AQI
# df[['Name', 'Qualification']]
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Ridge(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_Ridge(data,y,h):
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_Ridge(data,y,h):
tree_clf = ExtraTreesRegressor()
data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_Ridge(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_Ridge(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Ridge(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Lasso(data,h):
y=data.AQI
data=data.drop('AQI',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Lasso(data,h):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Lasso(data,h):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Lasso(data,h):
y=data.AQI
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_Lasso(data,y,h):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_Lasso(data,y,h):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_Lasso(data,y,h):
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_Lasso(data,y,h):
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = Lasso(alpha=h)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_RF(data):
y=data.AQI
data=data.drop('AQI',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_RF(data):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_RF(data):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_RF(data):
y=data.AQI
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr =RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_RF(data,y):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_RF(data,y):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_RF(data,y):
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_RF(data,y):
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = RandomForestRegressor()
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_NN(data,layer):
y=data.AQI
data=data.drop('AQI',axis=1)
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_NN(data, layer):
tree_clf = ExtraTreesRegressor()
y=data['AQI']
data=data.drop('AQI',axis=1)
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_NN(data, layer):
y=data.AQI
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_NN(data,layer):
y=data.AQI
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_Future_NN(data,y, layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using features from features importance graph
def AQI_Feature_importance_Future_NN(data,y, layer):
tree_clf = ExtraTreesRegressor()
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
tree_clf.fit(data, y)
importances = tree_clf.feature_importances_
feature_names = data.columns
imp_features=dict(zip(feature_names,importances))
features_up = sorted(imp_features.items(), key=lambda x: x[1], reverse=True)
features_down = sorted(imp_features.items(), key=lambda x: x[1], reverse=False)
#best features
x = [i[0] for i in features_up]
print(x)
x=data[x]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
#Predicting AQI using all features
def AQI_Domain_Knowledge_Future_NN(data,y,layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
x=data[[' _tempm',' _wdird',' _wspdm','year','Type_Industrial Area']]
X_train, X_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def AQI_without_Domain_Knowledge_Future_NN(data,y, layer):
# data=pd.get_dummies(data, columns=['month'], prefix = ['month'])
data=data.drop('AQI',axis=1)
data=data.drop('NO2',axis=1)
data=data.drop('SO2',axis=1)
data=data.drop('SPM',axis=1)
data=data.drop('RSPM',axis=1)
data=data.drop('ni',axis=1)
data=data.drop('si',axis=1)
data=data.drop('rpi',axis=1)
data=data.drop('spi',axis=1)
data=data.drop(' _tempm',axis=1)
data=data.drop(' _wdird',axis=1)
data=data.drop(' _wspdm',axis=1)
data=data.drop('year',axis=1)
data=data.drop('Type_Industrial Area',axis=1)
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=0.2, random_state=1)
regr = MLPRegressor(hidden_layer_sizes=(layer),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(X_train, y_train)
print("xxxx")
y_pred = regr.predict(X_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
train= np.sqrt(metrics.mean_squared_error(y_train, y_pred))
graph_training(y_pred,y_train)
y_pred = regr.predict(X_val)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_val, y_pred)))
test=np.sqrt(metrics.mean_squared_error(y_val, y_pred))
graph_testing(y_pred,y_val)
return train,test
def Bar_graph (a1,a2,b1,b2,c1,c2,d1,d2,e1,e2,f1,f2):
barWidth = 0.2
bars2 = [a2,b2,c2,d2,e2,f2]
bars1 = [a1,b1,c1,d1,e1,f1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black', capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['LinearRegression', 'LR with Lasso','LR with Ridge','SVM','random forest', 'Neural Network'])
plt.ylabel('RMSE')
plt.xlabel('Models')
plt.legend()
plt.show()
def Bar_graph_without_svm(a1,a2,b1,b2,c1,c2,d1,d2,e1,e2):
barWidth = 0.2
bars2 = [a2,b2,c2,d2,e2]
bars1 = [a1,b1,c1,d1,e1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black',capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['LinearRegression', 'LR with Lasso','LR with Ridge','random forest', 'Neural Network'])
plt.ylabel('RMSE')
plt.xlabel('Models')
plt.legend()
plt.show()
def Bar_graph_season(a1,a2,b1,b2,c1,c2):
barWidth = 0.2
bars2 = [a2,b2,c2]
bars1 = [a1,b1,c1]
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', capsize=7, label='Train')
plt.bar(r2, bars2, width = barWidth, color = 'cyan', edgecolor = 'black',capsize=7, label='Test')
plt.xticks([r + barWidth for r in range(len(bars1))], ['Autumn', 'Summer','Winter'])
plt.ylabel('RMSE')
plt.xlabel('Seasons')
plt.legend()
plt.show()
def train_test_data_prepare(data, train, test, folds):
d_y = pd.read_csv('AQI_prediction_add.csv')
y = d_y.AQI_predicted
x_data = []
y_data = []
errors = []
for i in range(folds):
x_train = data.loc[i*(train+test):(i*(train+test)+train - 1), :]
x_test = data.loc[(i*(train+test)+train):(i+1)*(train+test)-1, :]
y_train = y.loc[i * (train + test):(i * (train + test) + train - 1)]
y_test = y.loc[(i * (train + test) + train):(i + 1) * (train + test) - 1]
regr = MLPRegressor(hidden_layer_sizes=(4, 4),
activation='relu',
solver='adam',
learning_rate='adaptive',
max_iter=1000,
learning_rate_init=0.01,
alpha=0.01,
# batch_size=500,
# early_stopping=True,
random_state=1)
regr.fit(x_train, y_train)
print("xxxx")
y_pred = regr.predict(x_train)
print('Training Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_train, y_pred)))
y_pred = regr.predict(x_test)
print('Testing Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
errors.append(np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("Cross validation test error = ", sum(errors)/len(errors))
return sum(errors)/len(errors)
main()
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Lasso",
"sklearn.linear_model.Ridge",
"matplotlib.pyplot.show",
"pandas.get_dummies",
"matplotlib.pyplot.legend",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.ylabel",
"sklearn.svm.SVR",
"matplotlib.pyplot.plot",
"sklearn.neural_network.MLPRegressor",
"numpy.array",
"sklearn.ensemble.ExtraTreesRegressor",
"matplotlib.pyplot.xlabel"
] |
[((836, 876), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (847, 876), True, 'import pandas as pd\n'), ((1157, 1197), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (1168, 1197), True, 'import pandas as pd\n'), ((1480, 1520), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (1491, 1520), True, 'import pandas as pd\n'), ((1640, 1677), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (1651, 1677), True, 'import pandas as pd\n'), ((1976, 2006), 'pandas.read_csv', 'pd.read_csv', (['"""autumn_data.csv"""'], {}), "('autumn_data.csv')\n", (1987, 2006), True, 'import pandas as pd\n'), ((2109, 2170), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _conds']", 'prefix': "[' _conds']"}), "(data, columns=[' _conds'], prefix=[' _conds'])\n", (2123, 2170), True, 'import pandas as pd\n'), ((2182, 2243), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _wdire']", 'prefix': "[' _wdire']"}), "(data, columns=[' _wdire'], prefix=[' _wdire'])\n", (2196, 2243), True, 'import pandas as pd\n'), ((2255, 2310), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Type']", 'prefix': "['Type']"}), "(data, columns=['Type'], prefix=['Type'])\n", (2269, 2310), True, 'import pandas as pd\n'), ((2519, 2549), 'pandas.read_csv', 'pd.read_csv', (['"""summer_data.csv"""'], {}), "('summer_data.csv')\n", (2530, 2549), True, 'import pandas as pd\n'), ((2652, 2713), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _conds']", 'prefix': "[' _conds']"}), "(data, columns=[' _conds'], prefix=[' _conds'])\n", (2666, 2713), True, 'import pandas as pd\n'), ((2725, 2786), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _wdire']", 'prefix': "[' _wdire']"}), "(data, columns=[' _wdire'], prefix=[' _wdire'])\n", (2739, 2786), True, 'import pandas as pd\n'), ((2798, 2853), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Type']", 'prefix': "['Type']"}), "(data, columns=['Type'], prefix=['Type'])\n", (2812, 2853), True, 'import pandas as pd\n'), ((3062, 3092), 'pandas.read_csv', 'pd.read_csv', (['"""winter_data.csv"""'], {}), "('winter_data.csv')\n", (3073, 3092), True, 'import pandas as pd\n'), ((3195, 3256), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _conds']", 'prefix': "[' _conds']"}), "(data, columns=[' _conds'], prefix=[' _conds'])\n", (3209, 3256), True, 'import pandas as pd\n'), ((3268, 3329), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "[' _wdire']", 'prefix': "[' _wdire']"}), "(data, columns=[' _wdire'], prefix=[' _wdire'])\n", (3282, 3329), True, 'import pandas as pd\n'), ((3341, 3396), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['Type']", 'prefix': "['Type']"}), "(data, columns=['Type'], prefix=['Type'])\n", (3355, 3396), True, 'import pandas as pd\n'), ((3591, 3631), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (3602, 3631), True, 'import pandas as pd\n'), ((3974, 4014), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (3985, 4014), True, 'import pandas as pd\n'), ((4141, 4178), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (4152, 4178), True, 'import pandas as pd\n'), ((4488, 4528), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (4499, 4528), True, 'import pandas as pd\n'), ((4851, 4891), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (4862, 4891), True, 'import pandas as pd\n'), ((4941, 4978), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (4952, 4978), True, 'import pandas as pd\n'), ((5310, 5350), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (5321, 5350), True, 'import pandas as pd\n'), ((5640, 5680), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (5651, 5680), True, 'import pandas as pd\n'), ((5709, 5746), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (5720, 5746), True, 'import pandas as pd\n'), ((6027, 6067), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (6038, 6067), True, 'import pandas as pd\n'), ((6411, 6451), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (6422, 6451), True, 'import pandas as pd\n'), ((6458, 6495), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (6469, 6495), True, 'import pandas as pd\n'), ((7262, 7302), 'pandas.read_csv', 'pd.read_csv', (['"""Original_with_dummies.csv"""'], {}), "('Original_with_dummies.csv')\n", (7273, 7302), True, 'import pandas as pd\n'), ((7312, 7369), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (7326, 7369), True, 'import pandas as pd\n'), ((7620, 7634), 'matplotlib.pyplot.plot', 'plt.plot', (['tips'], {}), '(tips)\n', (7628, 7634), True, 'import matplotlib.pyplot as plt\n'), ((7639, 7660), 'matplotlib.pyplot.xticks', 'plt.xticks', (['d', 'LABELS'], {}), '(d, LABELS)\n', (7649, 7660), True, 'import matplotlib.pyplot as plt\n'), ((7665, 7689), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No of Days"""'], {}), "('No of Days')\n", (7675, 7689), True, 'import matplotlib.pyplot as plt\n'), ((7694, 7712), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (7704, 7712), True, 'import matplotlib.pyplot as plt\n'), ((7717, 7736), 'matplotlib.pyplot.title', 'plt.title', (['"""Models"""'], {}), "('Models')\n", (7726, 7736), True, 'import matplotlib.pyplot as plt\n'), ((7741, 7753), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7751, 7753), True, 'import matplotlib.pyplot as plt\n'), ((7758, 7768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7766, 7768), True, 'import matplotlib.pyplot as plt\n'), ((7878, 7935), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (7892, 7935), True, 'import pandas as pd\n'), ((7975, 8031), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (7991, 8031), False, 'from sklearn.model_selection import train_test_split\n'), ((8043, 8061), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8059, 8061), False, 'from sklearn.linear_model import LinearRegression\n'), ((8705, 8726), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (8724, 8726), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((8754, 8811), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (8768, 8811), True, 'import pandas as pd\n'), ((9293, 9346), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (9309, 9346), False, 'from sklearn.model_selection import train_test_split\n'), ((9358, 9376), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (9374, 9376), False, 'from sklearn.linear_model import LinearRegression\n'), ((9998, 10055), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (10012, 10055), True, 'import pandas as pd\n'), ((10170, 10223), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (10186, 10223), False, 'from sklearn.model_selection import train_test_split\n'), ((10235, 10253), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (10251, 10253), False, 'from sklearn.linear_model import LinearRegression\n'), ((10848, 10905), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (10862, 10905), True, 'import pandas as pd\n'), ((11437, 11493), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (11453, 11493), False, 'from sklearn.model_selection import train_test_split\n'), ((11505, 11523), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11521, 11523), False, 'from sklearn.linear_model import LinearRegression\n'), ((12089, 12146), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (12103, 12146), True, 'import pandas as pd\n'), ((12186, 12242), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (12202, 12242), False, 'from sklearn.model_selection import train_test_split\n'), ((12254, 12272), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (12270, 12272), False, 'from sklearn.linear_model import LinearRegression\n'), ((12924, 12945), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (12943, 12945), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((12955, 13012), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (12969, 13012), True, 'import pandas as pd\n'), ((13461, 13514), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (13477, 13514), False, 'from sklearn.model_selection import train_test_split\n'), ((13526, 13544), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (13542, 13544), False, 'from sklearn.linear_model import LinearRegression\n'), ((14160, 14217), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (14174, 14217), True, 'import pandas as pd\n'), ((14354, 14407), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (14370, 14407), False, 'from sklearn.model_selection import train_test_split\n'), ((14419, 14437), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (14435, 14437), False, 'from sklearn.linear_model import LinearRegression\n'), ((15026, 15083), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (15040, 15083), True, 'import pandas as pd\n'), ((15692, 15748), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (15708, 15748), False, 'from sklearn.model_selection import train_test_split\n'), ((15760, 15778), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (15776, 15778), False, 'from sklearn.linear_model import LinearRegression\n'), ((16449, 16497), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_pred'], {'label': '"""Predicted"""'}), "(all_samples, y_pred, label='Predicted')\n", (16457, 16497), True, 'import matplotlib.pyplot as plt\n'), ((16501, 16549), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_train'], {'label': '"""Expected"""'}), "(all_samples, y_train, label='Expected')\n", (16509, 16549), True, 'import matplotlib.pyplot as plt\n'), ((16554, 16581), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No of Samples"""'], {}), "('No of Samples')\n", (16564, 16581), True, 'import matplotlib.pyplot as plt\n'), ((16586, 16603), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AQI"""'], {}), "('AQI')\n", (16596, 16603), True, 'import matplotlib.pyplot as plt\n'), ((16608, 16629), 'matplotlib.pyplot.title', 'plt.title', (['"""Training"""'], {}), "('Training')\n", (16617, 16629), True, 'import matplotlib.pyplot as plt\n'), ((16634, 16646), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16644, 16646), True, 'import matplotlib.pyplot as plt\n'), ((16651, 16661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16659, 16661), True, 'import matplotlib.pyplot as plt\n'), ((16794, 16842), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_pred'], {'label': '"""Predicted"""'}), "(all_samples, y_pred, label='Predicted')\n", (16802, 16842), True, 'import matplotlib.pyplot as plt\n'), ((16846, 16892), 'matplotlib.pyplot.plot', 'plt.plot', (['all_samples', 'y_val'], {'label': '"""Expected"""'}), "(all_samples, y_val, label='Expected')\n", (16854, 16892), True, 'import matplotlib.pyplot as plt\n'), ((16897, 16924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""No of Samples"""'], {}), "('No of Samples')\n", (16907, 16924), True, 'import matplotlib.pyplot as plt\n'), ((16929, 16946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AQI"""'], {}), "('AQI')\n", (16939, 16946), True, 'import matplotlib.pyplot as plt\n'), ((16951, 16974), 'matplotlib.pyplot.title', 'plt.title', (['"""Validation"""'], {}), "('Validation')\n", (16960, 16974), True, 'import matplotlib.pyplot as plt\n'), ((16979, 16991), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16989, 16991), True, 'import matplotlib.pyplot as plt\n'), ((16996, 17006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17004, 17006), True, 'import matplotlib.pyplot as plt\n'), ((17095, 17152), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (17109, 17152), True, 'import pandas as pd\n'), ((17192, 17248), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (17208, 17248), False, 'from sklearn.model_selection import train_test_split\n'), ((17260, 17278), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (17263, 17278), False, 'from sklearn.svm import SVR\n'), ((17925, 17946), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (17944, 17946), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((17974, 18031), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (17988, 18031), True, 'import pandas as pd\n'), ((18513, 18566), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (18529, 18566), False, 'from sklearn.model_selection import train_test_split\n'), ((18578, 18596), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (18581, 18596), False, 'from sklearn.svm import SVR\n'), ((19222, 19279), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (19236, 19279), True, 'import pandas as pd\n'), ((19430, 19483), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (19446, 19483), False, 'from sklearn.model_selection import train_test_split\n'), ((19495, 19513), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (19498, 19513), False, 'from sklearn.svm import SVR\n'), ((20112, 20169), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (20126, 20169), True, 'import pandas as pd\n'), ((20782, 20838), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (20798, 20838), False, 'from sklearn.model_selection import train_test_split\n'), ((20850, 20868), 'sklearn.svm.SVR', 'SVR', ([], {'gamma': '"""scale"""'}), "(gamma='scale')\n", (20853, 20868), False, 'from sklearn.svm import SVR\n'), ((21494, 21582), 'numpy.array', 'np.array', (['[0.011, 0.1, 0.001, 0.01, 0.3, 0.2, 0.6, 0.8, 0.001, 0.0001, 3, 4, 1, 2.4]'], {}), '([0.011, 0.1, 0.001, 0.01, 0.3, 0.2, 0.6, 0.8, 0.001, 0.0001, 3, 4,\n 1, 2.4])\n', (21502, 21582), True, 'import numpy as np\n'), ((21602, 21609), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (21607, 21609), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((22107, 22164), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (22121, 22164), True, 'import pandas as pd\n'), ((22204, 22260), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (22220, 22260), False, 'from sklearn.model_selection import train_test_split\n'), ((22272, 22286), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (22277, 22286), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((22937, 22958), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (22956, 22958), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((22986, 23043), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (23000, 23043), True, 'import pandas as pd\n'), ((23525, 23578), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (23541, 23578), False, 'from sklearn.model_selection import train_test_split\n'), ((23590, 23604), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (23595, 23604), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((24372, 24425), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (24388, 24425), False, 'from sklearn.model_selection import train_test_split\n'), ((24437, 24451), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (24442, 24451), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((25576, 25632), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (25592, 25632), False, 'from sklearn.model_selection import train_test_split\n'), ((25644, 25658), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (25649, 25658), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((26230, 26287), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (26244, 26287), True, 'import pandas as pd\n'), ((26327, 26383), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (26343, 26383), False, 'from sklearn.model_selection import train_test_split\n'), ((26395, 26409), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (26400, 26409), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((27069, 27090), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (27088, 27090), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((27100, 27157), 'pandas.get_dummies', 'pd.get_dummies', (['data'], {'columns': "['month']", 'prefix': "['month']"}), "(data, columns=['month'], prefix=['month'])\n", (27114, 27157), True, 'import pandas as pd\n'), ((27606, 27659), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (27622, 27659), False, 'from sklearn.model_selection import train_test_split\n'), ((27671, 27685), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (27676, 27685), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((28482, 28535), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (28498, 28535), False, 'from sklearn.model_selection import train_test_split\n'), ((28547, 28561), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (28552, 28561), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((29751, 29807), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (29767, 29807), False, 'from sklearn.model_selection import train_test_split\n'), ((29819, 29833), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'alpha': 'h'}), '(alpha=h)\n', (29824, 29833), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((30508, 30564), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (30524, 30564), False, 'from sklearn.model_selection import train_test_split\n'), ((30576, 30590), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (30581, 30590), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((31241, 31262), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (31260, 31262), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((31831, 31884), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (31847, 31884), False, 'from sklearn.model_selection import train_test_split\n'), ((31896, 31910), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (31901, 31910), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((32642, 32695), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (32658, 32695), False, 'from sklearn.model_selection import train_test_split\n'), ((32707, 32721), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (32712, 32721), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((33916, 33972), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (33932, 33972), False, 'from sklearn.model_selection import train_test_split\n'), ((33983, 33997), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (33988, 33997), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((34670, 34726), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (34686, 34726), False, 'from sklearn.model_selection import train_test_split\n'), ((34737, 34751), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (34742, 34751), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((35411, 35432), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (35430, 35432), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((35950, 36003), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (35966, 36003), False, 'from sklearn.model_selection import train_test_split\n'), ((36015, 36029), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (36020, 36029), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((36755, 36808), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (36771, 36808), False, 'from sklearn.model_selection import train_test_split\n'), ((36820, 36834), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (36825, 36834), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((37952, 38008), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (37968, 38008), False, 'from sklearn.model_selection import train_test_split\n'), ((38020, 38034), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'h'}), '(alpha=h)\n', (38025, 38034), False, 'from sklearn.linear_model import Ridge, Lasso\n'), ((38669, 38725), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (38685, 38725), False, 'from sklearn.model_selection import train_test_split\n'), ((38737, 38760), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (38758, 38760), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((39406, 39427), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (39425, 39427), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((39996, 40049), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (40012, 40049), False, 'from sklearn.model_selection import train_test_split\n'), ((40061, 40084), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (40082, 40084), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((40811, 40864), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (40827, 40864), False, 'from sklearn.model_selection import train_test_split\n'), ((40876, 40899), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (40897, 40899), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((42018, 42074), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (42034, 42074), False, 'from sklearn.model_selection import train_test_split\n'), ((42085, 42108), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (42106, 42108), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((42776, 42832), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (42792, 42832), False, 'from sklearn.model_selection import train_test_split\n'), ((42844, 42867), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (42865, 42867), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((43522, 43543), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (43541, 43543), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((44061, 44114), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (44077, 44114), False, 'from sklearn.model_selection import train_test_split\n'), ((44126, 44149), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (44147, 44149), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((44870, 44923), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (44886, 44923), False, 'from sklearn.model_selection import train_test_split\n'), ((44935, 44958), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (44956, 44958), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((46072, 46128), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (46088, 46128), False, 'from sklearn.model_selection import train_test_split\n'), ((46140, 46163), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {}), '()\n', (46161, 46163), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((46909, 46965), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (46925, 46965), False, 'from sklearn.model_selection import train_test_split\n'), ((46977, 47168), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (46989, 47168), False, 'from sklearn.neural_network import MLPRegressor\n'), ((48189, 48210), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (48208, 48210), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((48708, 48761), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (48724, 48761), False, 'from sklearn.model_selection import train_test_split\n'), ((48774, 48965), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (48786, 48965), False, 'from sklearn.neural_network import MLPRegressor\n'), ((50067, 50120), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (50083, 50120), False, 'from sklearn.model_selection import train_test_split\n'), ((50133, 50324), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (50145, 50324), False, 'from sklearn.neural_network import MLPRegressor\n'), ((51889, 51945), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (51905, 51945), False, 'from sklearn.model_selection import train_test_split\n'), ((51957, 52148), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (51969, 52148), False, 'from sklearn.neural_network import MLPRegressor\n'), ((53191, 53247), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (53207, 53247), False, 'from sklearn.model_selection import train_test_split\n'), ((53259, 53450), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (53271, 53450), False, 'from sklearn.neural_network import MLPRegressor\n'), ((54480, 54501), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {}), '()\n', (54499, 54501), False, 'from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor\n'), ((55019, 55072), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (55035, 55072), False, 'from sklearn.model_selection import train_test_split\n'), ((55085, 55276), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (55097, 55276), False, 'from sklearn.neural_network import MLPRegressor\n'), ((56443, 56496), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(x, y, test_size=0.2, random_state=1)\n', (56459, 56496), False, 'from sklearn.model_selection import train_test_split\n'), ((56508, 56699), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (56520, 56699), False, 'from sklearn.neural_network import MLPRegressor\n'), ((58260, 58316), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(data, y, test_size=0.2, random_state=1)\n', (58276, 58316), False, 'from sklearn.model_selection import train_test_split\n'), ((58329, 58520), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': 'layer', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'batch_size': '(500)', 'random_state': '(1)'}), "(hidden_layer_sizes=layer, activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, batch_size=500, random_state=1)\n", (58341, 58520), False, 'from sklearn.neural_network import MLPRegressor\n'), ((59626, 59724), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'width': 'barWidth', 'color': '"""blue"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Train"""'}), "(r1, bars1, width=barWidth, color='blue', edgecolor='black', capsize\n =7, label='Train')\n", (59633, 59724), True, 'import matplotlib.pyplot as plt\n'), ((59731, 59828), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'width': 'barWidth', 'color': '"""cyan"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Test"""'}), "(r2, bars2, width=barWidth, color='cyan', edgecolor='black', capsize\n =7, label='Test')\n", (59738, 59828), True, 'import matplotlib.pyplot as plt\n'), ((59988, 60006), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (59998, 60006), True, 'import matplotlib.pyplot as plt\n'), ((60011, 60031), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Models"""'], {}), "('Models')\n", (60021, 60031), True, 'import matplotlib.pyplot as plt\n'), ((60036, 60048), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (60046, 60048), True, 'import matplotlib.pyplot as plt\n'), ((60053, 60063), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60061, 60063), True, 'import matplotlib.pyplot as plt\n'), ((60271, 60369), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'width': 'barWidth', 'color': '"""blue"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Train"""'}), "(r1, bars1, width=barWidth, color='blue', edgecolor='black', capsize\n =7, label='Train')\n", (60278, 60369), True, 'import matplotlib.pyplot as plt\n'), ((60376, 60473), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'width': 'barWidth', 'color': '"""cyan"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Test"""'}), "(r2, bars2, width=barWidth, color='cyan', edgecolor='black', capsize\n =7, label='Test')\n", (60383, 60473), True, 'import matplotlib.pyplot as plt\n'), ((60625, 60643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (60635, 60643), True, 'import matplotlib.pyplot as plt\n'), ((60648, 60668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Models"""'], {}), "('Models')\n", (60658, 60668), True, 'import matplotlib.pyplot as plt\n'), ((60673, 60685), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (60683, 60685), True, 'import matplotlib.pyplot as plt\n'), ((60690, 60700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60698, 60700), True, 'import matplotlib.pyplot as plt\n'), ((60879, 60977), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'width': 'barWidth', 'color': '"""blue"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Train"""'}), "(r1, bars1, width=barWidth, color='blue', edgecolor='black', capsize\n =7, label='Train')\n", (60886, 60977), True, 'import matplotlib.pyplot as plt\n'), ((60984, 61081), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'width': 'barWidth', 'color': '"""cyan"""', 'edgecolor': '"""black"""', 'capsize': '(7)', 'label': '"""Test"""'}), "(r2, bars2, width=barWidth, color='cyan', edgecolor='black', capsize\n =7, label='Test')\n", (60991, 61081), True, 'import matplotlib.pyplot as plt\n'), ((61175, 61193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (61185, 61193), True, 'import matplotlib.pyplot as plt\n'), ((61198, 61219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Seasons"""'], {}), "('Seasons')\n", (61208, 61219), True, 'import matplotlib.pyplot as plt\n'), ((61224, 61236), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (61234, 61236), True, 'import matplotlib.pyplot as plt\n'), ((61241, 61251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61249, 61251), True, 'import matplotlib.pyplot as plt\n'), ((61319, 61356), 'pandas.read_csv', 'pd.read_csv', (['"""AQI_prediction_add.csv"""'], {}), "('AQI_prediction_add.csv')\n", (61330, 61356), True, 'import pandas as pd\n'), ((8266, 8309), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (8292, 8309), False, 'from sklearn import metrics\n'), ((8494, 8535), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (8520, 8535), False, 'from sklearn import metrics\n'), ((9581, 9624), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (9607, 9624), False, 'from sklearn import metrics\n'), ((9809, 9850), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (9835, 9850), False, 'from sklearn import metrics\n'), ((10458, 10501), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (10484, 10501), False, 'from sklearn import metrics\n'), ((10686, 10727), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (10712, 10727), False, 'from sklearn import metrics\n'), ((11728, 11771), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (11754, 11771), False, 'from sklearn import metrics\n'), ((11956, 11997), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (11982, 11997), False, 'from sklearn import metrics\n'), ((12477, 12520), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (12503, 12520), False, 'from sklearn import metrics\n'), ((12705, 12746), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (12731, 12746), False, 'from sklearn import metrics\n'), ((13749, 13792), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (13775, 13792), False, 'from sklearn import metrics\n'), ((13977, 14018), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (14003, 14018), False, 'from sklearn import metrics\n'), ((14642, 14685), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (14668, 14685), False, 'from sklearn import metrics\n'), ((14870, 14911), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (14896, 14911), False, 'from sklearn import metrics\n'), ((15983, 16026), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (16009, 16026), False, 'from sklearn import metrics\n'), ((16211, 16252), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (16237, 16252), False, 'from sklearn import metrics\n'), ((17483, 17526), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (17509, 17526), False, 'from sklearn import metrics\n'), ((17711, 17752), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (17737, 17752), False, 'from sklearn import metrics\n'), ((18801, 18844), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (18827, 18844), False, 'from sklearn import metrics\n'), ((19029, 19070), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (19055, 19070), False, 'from sklearn import metrics\n'), ((19718, 19761), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (19744, 19761), False, 'from sklearn import metrics\n'), ((19946, 19987), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (19972, 19987), False, 'from sklearn import metrics\n'), ((21073, 21116), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (21099, 21116), False, 'from sklearn import metrics\n'), ((21301, 21342), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (21327, 21342), False, 'from sklearn import metrics\n'), ((22491, 22534), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (22517, 22534), False, 'from sklearn import metrics\n'), ((22719, 22760), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (22745, 22760), False, 'from sklearn import metrics\n'), ((23809, 23852), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (23835, 23852), False, 'from sklearn import metrics\n'), ((24037, 24078), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24063, 24078), False, 'from sklearn import metrics\n'), ((24656, 24699), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (24682, 24699), False, 'from sklearn import metrics\n'), ((24884, 24925), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24910, 24925), False, 'from sklearn import metrics\n'), ((25863, 25906), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (25889, 25906), False, 'from sklearn import metrics\n'), ((26091, 26132), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26117, 26132), False, 'from sklearn import metrics\n'), ((26614, 26657), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (26640, 26657), False, 'from sklearn import metrics\n'), ((26842, 26883), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26868, 26883), False, 'from sklearn import metrics\n'), ((27890, 27933), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (27916, 27933), False, 'from sklearn import metrics\n'), ((28118, 28159), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (28144, 28159), False, 'from sklearn import metrics\n'), ((28766, 28809), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (28792, 28809), False, 'from sklearn import metrics\n'), ((28994, 29035), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (29020, 29035), False, 'from sklearn import metrics\n'), ((30038, 30081), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (30064, 30081), False, 'from sklearn import metrics\n'), ((30266, 30307), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (30292, 30307), False, 'from sklearn import metrics\n'), ((30795, 30838), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (30821, 30838), False, 'from sklearn import metrics\n'), ((31023, 31064), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (31049, 31064), False, 'from sklearn import metrics\n'), ((32115, 32158), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32141, 32158), False, 'from sklearn import metrics\n'), ((32343, 32384), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (32369, 32384), False, 'from sklearn import metrics\n'), ((32926, 32969), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32952, 32969), False, 'from sklearn import metrics\n'), ((33154, 33195), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (33180, 33195), False, 'from sklearn import metrics\n'), ((34202, 34245), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34228, 34245), False, 'from sklearn import metrics\n'), ((34430, 34471), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (34456, 34471), False, 'from sklearn import metrics\n'), ((34956, 34999), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34982, 34999), False, 'from sklearn import metrics\n'), ((35184, 35225), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (35210, 35225), False, 'from sklearn import metrics\n'), ((36234, 36277), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (36260, 36277), False, 'from sklearn import metrics\n'), ((36462, 36503), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (36488, 36503), False, 'from sklearn import metrics\n'), ((37039, 37082), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (37065, 37082), False, 'from sklearn import metrics\n'), ((37267, 37308), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (37293, 37308), False, 'from sklearn import metrics\n'), ((38239, 38282), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38265, 38282), False, 'from sklearn import metrics\n'), ((38467, 38508), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (38493, 38508), False, 'from sklearn import metrics\n'), ((38965, 39008), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38991, 39008), False, 'from sklearn import metrics\n'), ((39193, 39234), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (39219, 39234), False, 'from sklearn import metrics\n'), ((40289, 40332), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (40315, 40332), False, 'from sklearn import metrics\n'), ((40517, 40558), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (40543, 40558), False, 'from sklearn import metrics\n'), ((41104, 41147), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (41130, 41147), False, 'from sklearn import metrics\n'), ((41332, 41373), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (41358, 41373), False, 'from sklearn import metrics\n'), ((42313, 42356), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (42339, 42356), False, 'from sklearn import metrics\n'), ((42541, 42582), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (42567, 42582), False, 'from sklearn import metrics\n'), ((43072, 43115), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (43098, 43115), False, 'from sklearn import metrics\n'), ((43300, 43341), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (43326, 43341), False, 'from sklearn import metrics\n'), ((44354, 44397), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (44380, 44397), False, 'from sklearn import metrics\n'), ((44582, 44623), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (44608, 44623), False, 'from sklearn import metrics\n'), ((45163, 45206), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (45189, 45206), False, 'from sklearn import metrics\n'), ((45391, 45432), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (45417, 45432), False, 'from sklearn import metrics\n'), ((46368, 46411), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (46394, 46411), False, 'from sklearn import metrics\n'), ((46596, 46637), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (46622, 46637), False, 'from sklearn import metrics\n'), ((47741, 47784), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (47767, 47784), False, 'from sklearn import metrics\n'), ((47969, 48010), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (47995, 48010), False, 'from sklearn import metrics\n'), ((49538, 49581), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (49564, 49581), False, 'from sklearn import metrics\n'), ((49766, 49807), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (49792, 49807), False, 'from sklearn import metrics\n'), ((50897, 50940), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (50923, 50940), False, 'from sklearn import metrics\n'), ((51125, 51166), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (51151, 51166), False, 'from sklearn import metrics\n'), ((52721, 52764), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (52747, 52764), False, 'from sklearn import metrics\n'), ((52949, 52990), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (52975, 52990), False, 'from sklearn import metrics\n'), ((54023, 54066), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (54049, 54066), False, 'from sklearn import metrics\n'), ((54251, 54292), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (54277, 54292), False, 'from sklearn import metrics\n'), ((55849, 55892), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (55875, 55892), False, 'from sklearn import metrics\n'), ((56077, 56118), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (56103, 56118), False, 'from sklearn import metrics\n'), ((57272, 57315), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (57298, 57315), False, 'from sklearn import metrics\n'), ((57500, 57541), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (57526, 57541), False, 'from sklearn import metrics\n'), ((59093, 59136), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (59119, 59136), False, 'from sklearn import metrics\n'), ((59321, 59362), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (59347, 59362), False, 'from sklearn import metrics\n'), ((61780, 61956), 'sklearn.neural_network.MLPRegressor', 'MLPRegressor', ([], {'hidden_layer_sizes': '(4, 4)', 'activation': '"""relu"""', 'solver': '"""adam"""', 'learning_rate': '"""adaptive"""', 'max_iter': '(1000)', 'learning_rate_init': '(0.01)', 'alpha': '(0.01)', 'random_state': '(1)'}), "(hidden_layer_sizes=(4, 4), activation='relu', solver='adam',\n learning_rate='adaptive', max_iter=1000, learning_rate_init=0.01, alpha\n =0.01, random_state=1)\n", (61792, 61956), False, 'from sklearn.neural_network import MLPRegressor\n'), ((8201, 8244), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (8227, 8244), False, 'from sklearn import metrics\n'), ((8433, 8474), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (8459, 8474), False, 'from sklearn import metrics\n'), ((9516, 9559), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (9542, 9559), False, 'from sklearn import metrics\n'), ((9748, 9789), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (9774, 9789), False, 'from sklearn import metrics\n'), ((10393, 10436), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (10419, 10436), False, 'from sklearn import metrics\n'), ((10625, 10666), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (10651, 10666), False, 'from sklearn import metrics\n'), ((11663, 11706), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (11689, 11706), False, 'from sklearn import metrics\n'), ((11895, 11936), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (11921, 11936), False, 'from sklearn import metrics\n'), ((12412, 12455), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (12438, 12455), False, 'from sklearn import metrics\n'), ((12644, 12685), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (12670, 12685), False, 'from sklearn import metrics\n'), ((13684, 13727), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (13710, 13727), False, 'from sklearn import metrics\n'), ((13916, 13957), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (13942, 13957), False, 'from sklearn import metrics\n'), ((14577, 14620), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (14603, 14620), False, 'from sklearn import metrics\n'), ((14809, 14850), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (14835, 14850), False, 'from sklearn import metrics\n'), ((15918, 15961), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (15944, 15961), False, 'from sklearn import metrics\n'), ((16150, 16191), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (16176, 16191), False, 'from sklearn import metrics\n'), ((17418, 17461), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (17444, 17461), False, 'from sklearn import metrics\n'), ((17650, 17691), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (17676, 17691), False, 'from sklearn import metrics\n'), ((18736, 18779), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (18762, 18779), False, 'from sklearn import metrics\n'), ((18968, 19009), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (18994, 19009), False, 'from sklearn import metrics\n'), ((19653, 19696), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (19679, 19696), False, 'from sklearn import metrics\n'), ((19885, 19926), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (19911, 19926), False, 'from sklearn import metrics\n'), ((21008, 21051), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (21034, 21051), False, 'from sklearn import metrics\n'), ((21240, 21281), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (21266, 21281), False, 'from sklearn import metrics\n'), ((22426, 22469), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (22452, 22469), False, 'from sklearn import metrics\n'), ((22658, 22699), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (22684, 22699), False, 'from sklearn import metrics\n'), ((23744, 23787), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (23770, 23787), False, 'from sklearn import metrics\n'), ((23976, 24017), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24002, 24017), False, 'from sklearn import metrics\n'), ((24591, 24634), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (24617, 24634), False, 'from sklearn import metrics\n'), ((24823, 24864), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (24849, 24864), False, 'from sklearn import metrics\n'), ((25798, 25841), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (25824, 25841), False, 'from sklearn import metrics\n'), ((26030, 26071), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26056, 26071), False, 'from sklearn import metrics\n'), ((26549, 26592), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (26575, 26592), False, 'from sklearn import metrics\n'), ((26781, 26822), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (26807, 26822), False, 'from sklearn import metrics\n'), ((27825, 27868), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (27851, 27868), False, 'from sklearn import metrics\n'), ((28057, 28098), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (28083, 28098), False, 'from sklearn import metrics\n'), ((28701, 28744), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (28727, 28744), False, 'from sklearn import metrics\n'), ((28933, 28974), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (28959, 28974), False, 'from sklearn import metrics\n'), ((29973, 30016), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (29999, 30016), False, 'from sklearn import metrics\n'), ((30205, 30246), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (30231, 30246), False, 'from sklearn import metrics\n'), ((30730, 30773), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (30756, 30773), False, 'from sklearn import metrics\n'), ((30962, 31003), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (30988, 31003), False, 'from sklearn import metrics\n'), ((32050, 32093), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32076, 32093), False, 'from sklearn import metrics\n'), ((32282, 32323), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (32308, 32323), False, 'from sklearn import metrics\n'), ((32861, 32904), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (32887, 32904), False, 'from sklearn import metrics\n'), ((33093, 33134), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (33119, 33134), False, 'from sklearn import metrics\n'), ((34137, 34180), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34163, 34180), False, 'from sklearn import metrics\n'), ((34369, 34410), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (34395, 34410), False, 'from sklearn import metrics\n'), ((34891, 34934), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (34917, 34934), False, 'from sklearn import metrics\n'), ((35123, 35164), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (35149, 35164), False, 'from sklearn import metrics\n'), ((36169, 36212), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (36195, 36212), False, 'from sklearn import metrics\n'), ((36401, 36442), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (36427, 36442), False, 'from sklearn import metrics\n'), ((36974, 37017), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (37000, 37017), False, 'from sklearn import metrics\n'), ((37206, 37247), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (37232, 37247), False, 'from sklearn import metrics\n'), ((38174, 38217), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38200, 38217), False, 'from sklearn import metrics\n'), ((38406, 38447), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (38432, 38447), False, 'from sklearn import metrics\n'), ((38900, 38943), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (38926, 38943), False, 'from sklearn import metrics\n'), ((39132, 39173), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (39158, 39173), False, 'from sklearn import metrics\n'), ((40224, 40267), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (40250, 40267), False, 'from sklearn import metrics\n'), ((40456, 40497), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (40482, 40497), False, 'from sklearn import metrics\n'), ((41039, 41082), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (41065, 41082), False, 'from sklearn import metrics\n'), ((41271, 41312), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (41297, 41312), False, 'from sklearn import metrics\n'), ((42248, 42291), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (42274, 42291), False, 'from sklearn import metrics\n'), ((42480, 42521), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (42506, 42521), False, 'from sklearn import metrics\n'), ((43007, 43050), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (43033, 43050), False, 'from sklearn import metrics\n'), ((43239, 43280), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (43265, 43280), False, 'from sklearn import metrics\n'), ((44289, 44332), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (44315, 44332), False, 'from sklearn import metrics\n'), ((44521, 44562), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (44547, 44562), False, 'from sklearn import metrics\n'), ((45098, 45141), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (45124, 45141), False, 'from sklearn import metrics\n'), ((45330, 45371), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (45356, 45371), False, 'from sklearn import metrics\n'), ((46303, 46346), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (46329, 46346), False, 'from sklearn import metrics\n'), ((46535, 46576), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (46561, 46576), False, 'from sklearn import metrics\n'), ((47676, 47719), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (47702, 47719), False, 'from sklearn import metrics\n'), ((47908, 47949), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (47934, 47949), False, 'from sklearn import metrics\n'), ((49473, 49516), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (49499, 49516), False, 'from sklearn import metrics\n'), ((49705, 49746), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (49731, 49746), False, 'from sklearn import metrics\n'), ((50832, 50875), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (50858, 50875), False, 'from sklearn import metrics\n'), ((51064, 51105), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (51090, 51105), False, 'from sklearn import metrics\n'), ((52656, 52699), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (52682, 52699), False, 'from sklearn import metrics\n'), ((52888, 52929), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (52914, 52929), False, 'from sklearn import metrics\n'), ((53958, 54001), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (53984, 54001), False, 'from sklearn import metrics\n'), ((54190, 54231), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (54216, 54231), False, 'from sklearn import metrics\n'), ((55784, 55827), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (55810, 55827), False, 'from sklearn import metrics\n'), ((56016, 56057), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (56042, 56057), False, 'from sklearn import metrics\n'), ((57207, 57250), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (57233, 57250), False, 'from sklearn import metrics\n'), ((57439, 57480), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (57465, 57480), False, 'from sklearn import metrics\n'), ((59028, 59071), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (59054, 59071), False, 'from sklearn import metrics\n'), ((59260, 59301), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_val', 'y_pred'], {}), '(y_val, y_pred)\n', (59286, 59301), False, 'from sklearn import metrics\n'), ((62496, 62539), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (62522, 62539), False, 'from sklearn import metrics\n'), ((62638, 62680), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (62664, 62680), False, 'from sklearn import metrics\n'), ((62713, 62755), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (62739, 62755), False, 'from sklearn import metrics\n')]
|
import random
import numpy as np
import matplotlib.pyplot as plt
from torch import tensor
from torch import cat
from torch import clamp
from torch.distributions import normal
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch.utils.tensorboard import SummaryWriter
import torch
import os
print(os.environ)
import roboschool
import gym
model_name = "SAC-RoboschoolHopper-v1"
num_iterations = 3000000
learning_rate = 0.0003
discount_rate = 0.99
replay_buffer_max_size = 1000000
target_smoothing_coefficient = 0.0005
target_update_interval = 1
num_gradient_steps = 1
num_env_steps = 1
reward_scale = 5
minibatch_size = 256
writer = SummaryWriter(log_dir="./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil")
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
cpu_device = torch.device("cpu")
# define actor network
class SACRoboschoolHopperActorNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperActorNN, self).__init__()
self.fc1 = nn.Linear(15, 256)
self.fc2 = nn.Linear(256, 256)
self.mean = nn.Linear(256, 3)
self.log_stdev = nn.Linear(256, 3)
self.normal_dist = normal.Normal(0, 1)
def forward(self, x_state):
x_state = F.relu(self.fc1(x_state))
x_state = F.relu(self.fc2(x_state))
mean = self.mean(x_state)
log_stdev = self.log_stdev(x_state)
action = mean + self.normal_dist.sample(sample_shape=log_stdev.shape) * torch.exp(log_stdev)
squashed_action = torch.tanh(action)
action_dist = normal.Normal(mean, torch.exp(log_stdev))
log_prob_squashed_a = action_dist.log_prob(action) - torch.sum(torch.log(clamp(tensor(1).view(squashed_action.shape) - squashed_action**2, min=1e-8)), dim=1) # TODO check dims
return action, log_prob_squashed_a
# define critic network
class SACRoboschoolHopperCriticNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperCriticNN, self).__init__()
self.fc1 = nn.Linear(18, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 3)
def forward(self, x_state, x_action):
x = cat((x_state, x_action), dim=1) # concatenate inputs along 0th dimension
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# define soft state value network
class SACRoboschoolHopperStateValueNN(nn.Module):
def __init__(self):
super(SACRoboschoolHopperStateValueNN, self).__init__()
self.fc1 = nn.Linear(15, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, 1)
def forward(self, x_state):
x = F.relu(self.fc1(x_state))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Initialize parameter vectors ψ, ψ¯, θ, φ.
state_value_net = SACRoboschoolHopperStateValueNN().to(device)
state_value_target_net = SACRoboschoolHopperStateValueNN().to(device)
critic_net_1 = SACRoboschoolHopperCriticNN().to(device)
critic_net_2 = SACRoboschoolHopperCriticNN().to(device)
actor_net = SACRoboschoolHopperActorNN().to(device)
# make the state value target net parameters the same
state_value_target_net.load_state_dict(state_value_net.state_dict())
# initialize replay buffer D
replay_buffer = []
# initialize train and test environments
env = gym.make('RoboschoolHopper-v1')
curr_state = env.reset()
curr_state = tensor(curr_state).float().to(device)
test_env = gym.make('RoboschoolHopper-v1')
curr_test_state = test_env.reset()
greatest_avg_episode_rewards = -np.inf
# initialize optimizers for each network except target (parameters updated manually)
state_value_net_optimizer = optim.Adam(state_value_net.parameters(), lr=learning_rate)
critic_net_1_optimizer = optim.Adam(critic_net_1.parameters(), lr=learning_rate)
critic_net_2_optimizer = optim.Adam(critic_net_2.parameters(), lr=learning_rate)
actor_net_optimizer = optim.Adam(actor_net.parameters(), lr=learning_rate)
# for each iteration do
for t in range(num_iterations):
# for each environment step do
# (in practice, at most one env step per gradient step)
# at ∼ πφ(at|st)
action, log_prob = actor_net(curr_state.view(1, -1,).float()).detach().to(cpu_device).numpy().squeeze()
# action_np = action.detach().to(cpu_device).numpy().squeeze()
# st+1 ∼ p(st+1|st, at)
next_state, reward, done, _ = env.step(action)
reward = reward * reward_scale
# D ← D ∪ {(st, at, r(st, at), st+1)}
replay_buffer.append((curr_state.view(1, -1, ), tensor(action).to(device).view(1, -1, ), log_prob.to(device).view(1, -1, ),
tensor(reward).float().to(device).view(1, 1, ), tensor(next_state).to(device).view(1, -1, ),
tensor(done).to(device).view(1, 1, )))
if len(replay_buffer) > replay_buffer_max_size + 10:
replay_buffer = replay_buffer[10:]
# for each gradient step do
for gradient_step in range(num_gradient_steps):
# Sample mini-batch of N transitions (s, a, r, s') from D
transitions_minibatch = random.choices(replay_buffer, k=minibatch_size)
minibatch_states, minibatch_actions, minibatch_action_log_probs, minibatch_rewards, minibatch_next_states, minibatch_dones = [cat(mb, dim=0) for mb in zip(*transitions_minibatch)]
minibatch_states = minibatch_states.float()
# ψ ← ψ − λV ∇ˆψJV (ψ)
state_value_net.zero_grad()
# state_value_error = torch.mean(0.5 * torch.mean(state_value_net(minibatch_states) - torch.mean(torch.min(critic_net_1(minibatch_states, minibatch_actions),critic_net_2(minibatch_states, minibatch_actions)) - torch.log(actor_net(minibatch_states)))) ** 2) # TODO fix?
state_value_net_loss = torch.mean(0.5 * (state_value_net(minibatch_states) - (torch.min(critic_net_1(minibatch_states, minibatch_actions), critic_net_2(minibatch_states, minibatch_actions)) - torch.log(clamp(actor_net(minibatch_states), min=1e-8)))) ** 2) # TODO fix?
state_value_net_loss.backward()
state_value_net_optimizer.step()
writer.add_scalar('Loss/state_value_net', state_value_net_loss.detach().to(cpu_device).numpy().squeeze(), t)
# θi ← θi − λQ∇ˆθiJQ(θi) for i ∈ {1, 2}
critic_net_1.zero_grad()
critic_net_1_loss = torch.mean(0.5 * (critic_net_1(minibatch_states, minibatch_actions) - (minibatch_rewards + discount_rate*state_value_target_net(minibatch_next_states)*(-minibatch_dones.float() + 1))) ** 2)
critic_net_1_loss.backward()
critic_net_1_optimizer.step()
writer.add_scalar('Loss/critic_net_1', critic_net_1_loss.detach().to(cpu_device).numpy().squeeze(), t)
critic_net_2.zero_grad()
critic_net_2_loss = torch.mean(0.5 * (critic_net_2(minibatch_states, minibatch_actions) - (minibatch_rewards + discount_rate * state_value_target_net(minibatch_next_states)*(-minibatch_dones.float() + 1))) ** 2)
critic_net_2_loss.backward()
critic_net_2_optimizer.step()
writer.add_scalar('Loss/critic_net_2', critic_net_2_loss.detach().to(cpu_device).numpy().squeeze(), t)
# φ ← φ − λπ∇ˆφJπ(φ)
actor_net.zero_grad()
minibatch_actions_new, minibatch_action_log_probs_new = actor_net(minibatch_states)
actor_net_loss = torch.mean(minibatch_action_log_probs_new - torch.min(critic_net_1(minibatch_states, minibatch_actions_new), critic_net_2(minibatch_states, minibatch_actions_new))) # TODO fix?
actor_net_loss.backward()
actor_net_optimizer.step()
writer.add_scalar('Loss/actor_net', actor_net_loss.detach().to(cpu_device).numpy().squeeze(), t)
# print(actor_net_loss.grad_fn())
# ψ¯ ← τψ + (1 − τ )ψ¯
for state_value_target_net_parameter, state_value_net_parameter in zip(state_value_target_net.parameters(), state_value_net.parameters()):
state_value_target_net_parameter.data = target_smoothing_coefficient*state_value_net_parameter + (1 - target_smoothing_coefficient)*state_value_target_net_parameter
# end for
if t % (num_iterations // 1000) == 0 or t == num_iterations - 1:
print("iter", t)
torch.save(state_value_net.state_dict(), 'models/current/' + model_name + '-state_value_net.pkl')
torch.save(state_value_target_net.state_dict(), 'models/current/' + model_name + '-state_value_target_net.pkl')
torch.save(critic_net_1.state_dict(), 'models/current/' + model_name + '-critic_net_1.pkl')
torch.save(critic_net_2.state_dict(), 'models/current/' + model_name + '-critic_net_2.pkl')
torch.save(actor_net.state_dict(), 'models/current/' + model_name + '-actor_net.pkl')
if not done:
curr_state = tensor(next_state).float().to(device)
else:
curr_state = env.reset()
curr_state = tensor(curr_state).float().to(device)
if t % (num_iterations // 25) == 0 or t == num_iterations - 1:
render = False
num_eval_episodes = 10
test_obs = test_env.reset()
episode_rewards = []
episode_reward = 0
while len(episode_rewards) < num_eval_episodes:
test_action = actor_net(tensor(test_obs).float().to(device)).detach().to(cpu_device).numpy().squeeze()
test_obs, test_reward, test_done, _ = test_env.step(test_action)
episode_reward += test_reward
if test_done:
episode_rewards.append(episode_reward)
episode_reward = 0
test_obs = test_env.reset()
if render:
test_env.render()
avg_episode_rewards = np.mean(np.asarray(episode_rewards))
writer.add_scalar('Reward/test', avg_episode_rewards, t)
if avg_episode_rewards > greatest_avg_episode_rewards:
torch.save(actor_net.state_dict(), 'models/current/best/best-' + model_name + '-actor_net.pkl')
# end for
render = True
num_eval_episodes = 10
obs = env.reset()
episode_rewards = []
episode_reward = 0
while len(episode_rewards) < num_eval_episodes:
action = actor_net(tensor(obs).float().to(device)).detach().to(cpu_device).numpy().squeeze()
obs, reward, done, _ = env.step(action)
episode_reward += reward
if done:
episode_rewards.append(episode_reward)
episode_reward = 0
obs = env.reset()
if render:
env.render()
episode_rewards = np.asarray(episode_rewards)
episode_length_histogram = plt.hist(episode_rewards)
plt.title("Episode Rewards")
plt.xlabel("Total Reward")
plt.ylabel("Frequency")
plt.savefig("episode_rewards_hist.png")
plt.savefig("models/current/episode_rewards_hist.png")
print("Mean total episode reward:", np.mean(episode_rewards))
|
[
"matplotlib.pyplot.title",
"gym.make",
"matplotlib.pyplot.hist",
"numpy.asarray",
"random.choices",
"torch.cat",
"torch.distributions.normal.Normal",
"torch.nn.Linear",
"torch.exp",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"matplotlib.pyplot.ylabel",
"torch.tensor",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"torch.tanh"
] |
[((672, 751), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': '"""./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil"""'}), "(log_dir='./runs/v0-1mil-iter-256-node-hidden-layers-buffer-1mil')\n", (685, 751), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((836, 855), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (848, 855), False, 'import torch\n'), ((869, 888), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (881, 888), False, 'import torch\n'), ((3383, 3414), 'gym.make', 'gym.make', (['"""RoboschoolHopper-v1"""'], {}), "('RoboschoolHopper-v1')\n", (3391, 3414), False, 'import gym\n'), ((3503, 3534), 'gym.make', 'gym.make', (['"""RoboschoolHopper-v1"""'], {}), "('RoboschoolHopper-v1')\n", (3511, 3534), False, 'import gym\n'), ((10409, 10436), 'numpy.asarray', 'np.asarray', (['episode_rewards'], {}), '(episode_rewards)\n', (10419, 10436), True, 'import numpy as np\n'), ((10464, 10489), 'matplotlib.pyplot.hist', 'plt.hist', (['episode_rewards'], {}), '(episode_rewards)\n', (10472, 10489), True, 'import matplotlib.pyplot as plt\n'), ((10490, 10518), 'matplotlib.pyplot.title', 'plt.title', (['"""Episode Rewards"""'], {}), "('Episode Rewards')\n", (10499, 10518), True, 'import matplotlib.pyplot as plt\n'), ((10519, 10545), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total Reward"""'], {}), "('Total Reward')\n", (10529, 10545), True, 'import matplotlib.pyplot as plt\n'), ((10546, 10569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (10556, 10569), True, 'import matplotlib.pyplot as plt\n'), ((10570, 10609), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""episode_rewards_hist.png"""'], {}), "('episode_rewards_hist.png')\n", (10581, 10609), True, 'import matplotlib.pyplot as plt\n'), ((10610, 10664), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""models/current/episode_rewards_hist.png"""'], {}), "('models/current/episode_rewards_hist.png')\n", (10621, 10664), True, 'import matplotlib.pyplot as plt\n'), ((10701, 10725), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (10708, 10725), True, 'import numpy as np\n'), ((1060, 1078), 'torch.nn.Linear', 'nn.Linear', (['(15)', '(256)'], {}), '(15, 256)\n', (1069, 1078), False, 'from torch import nn\n'), ((1098, 1117), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {}), '(256, 256)\n', (1107, 1117), False, 'from torch import nn\n'), ((1138, 1155), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (1147, 1155), False, 'from torch import nn\n'), ((1181, 1198), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(3)'], {}), '(256, 3)\n', (1190, 1198), False, 'from torch import nn\n'), ((1226, 1245), 'torch.distributions.normal.Normal', 'normal.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (1239, 1245), False, 'from torch.distributions import normal\n'), ((1572, 1590), 'torch.tanh', 'torch.tanh', (['action'], {}), '(action)\n', (1582, 1590), False, 'import torch\n'), ((2058, 2076), 'torch.nn.Linear', 'nn.Linear', (['(18)', '(100)'], {}), '(18, 100)\n', (2067, 2076), False, 'from torch import nn\n'), ((2096, 2115), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(100)'], {}), '(100, 100)\n', (2105, 2115), False, 'from torch import nn\n'), ((2135, 2152), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(3)'], {}), '(100, 3)\n', (2144, 2152), False, 'from torch import nn\n'), ((2208, 2239), 'torch.cat', 'cat', (['(x_state, x_action)'], {'dim': '(1)'}), '((x_state, x_action), dim=1)\n', (2211, 2239), False, 'from torch import cat\n'), ((2580, 2598), 'torch.nn.Linear', 'nn.Linear', (['(15)', '(100)'], {}), '(15, 100)\n', (2589, 2598), False, 'from torch import nn\n'), ((2618, 2637), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(100)'], {}), '(100, 100)\n', (2627, 2637), False, 'from torch import nn\n'), ((2657, 2674), 'torch.nn.Linear', 'nn.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (2666, 2674), False, 'from torch import nn\n'), ((5120, 5167), 'random.choices', 'random.choices', (['replay_buffer'], {'k': 'minibatch_size'}), '(replay_buffer, k=minibatch_size)\n', (5134, 5167), False, 'import random\n'), ((1633, 1653), 'torch.exp', 'torch.exp', (['log_stdev'], {}), '(log_stdev)\n', (1642, 1653), False, 'import torch\n'), ((5302, 5316), 'torch.cat', 'cat', (['mb'], {'dim': '(0)'}), '(mb, dim=0)\n', (5305, 5316), False, 'from torch import cat\n'), ((9651, 9678), 'numpy.asarray', 'np.asarray', (['episode_rewards'], {}), '(episode_rewards)\n', (9661, 9678), True, 'import numpy as np\n'), ((1525, 1545), 'torch.exp', 'torch.exp', (['log_stdev'], {}), '(log_stdev)\n', (1534, 1545), False, 'import torch\n'), ((3453, 3471), 'torch.tensor', 'tensor', (['curr_state'], {}), '(curr_state)\n', (3459, 3471), False, 'from torch import tensor\n'), ((8750, 8768), 'torch.tensor', 'tensor', (['next_state'], {}), '(next_state)\n', (8756, 8768), False, 'from torch import tensor\n'), ((8852, 8870), 'torch.tensor', 'tensor', (['curr_state'], {}), '(curr_state)\n', (8858, 8870), False, 'from torch import tensor\n'), ((4577, 4591), 'torch.tensor', 'tensor', (['action'], {}), '(action)\n', (4583, 4591), False, 'from torch import tensor\n'), ((4727, 4745), 'torch.tensor', 'tensor', (['next_state'], {}), '(next_state)\n', (4733, 4745), False, 'from torch import tensor\n'), ((4798, 4810), 'torch.tensor', 'tensor', (['done'], {}), '(done)\n', (4804, 4810), False, 'from torch import tensor\n'), ((1742, 1751), 'torch.tensor', 'tensor', (['(1)'], {}), '(1)\n', (1748, 1751), False, 'from torch import tensor\n'), ((4679, 4693), 'torch.tensor', 'tensor', (['reward'], {}), '(reward)\n', (4685, 4693), False, 'from torch import tensor\n'), ((10094, 10105), 'torch.tensor', 'tensor', (['obs'], {}), '(obs)\n', (10100, 10105), False, 'from torch import tensor\n'), ((9197, 9213), 'torch.tensor', 'tensor', (['test_obs'], {}), '(test_obs)\n', (9203, 9213), False, 'from torch import tensor\n')]
|
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
import powerlaw
import pylab
from matplotlib.font_manager import FontProperties
from matplotlib import rc
from scipy import stats
from scipy.stats import skewnorm
import plotly.graph_objs as go
def generate_astro_single_plots(astro_plotter, astroA, output_folder):
output_experiment_path = astro_plotter.get_output_experiment_path(astroA, output_folder)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = astro_plotter.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = astro_plotter.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour heatmaps (saturation)...')
fig_heatmap_grids, fig_heatmap_dff_grids = astro_plotter.get_behaviour_contour_threshold_plots(astroA, threshold=0.5)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps_saturation')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting borders...')
#Borders plot
fig_border = astro_plotter.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = astro_plotter.get_behaviour_activity_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = astro_plotter.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = astro_plotter.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
def generate_astro_comparison_plots(astro_plotter, astroA_l, output_folder, name_tag, astroA_l_pairs=None, astroA_long_l=None, n_chunks=3):
output_experiment_path_all_comparison, _, _, astroA_l_s = astro_plotter.setup_comparison_all_vars(astroA_l, os.path.join(output_folder, name_tag))
print('Plotting sizes histogram dataset comparison for each behaviour')
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
astroA_l_filt = []
bh_l_test = ['rest', 'running', 'stick_run_ind_15', 'stick_rest']
for astroA in astroA_l:
include = True
for bh in bh_l_test:
if bh not in astroA.indices_d.keys() or bh not in astroA.activity_ratios.keys():
include = False
print(':(', astroA.print_id, bh)
if include:
astroA_l_filt.append(astroA)
day_0_1_pairs = []
if astroA_l_pairs is not None:
for astroA_l_pair in astroA_l_pairs:
if astroA_l_pair[1].day == 1:
day_0_1_pairs.append(astroA_l_pair)
print('Comparing behaviour distribution plots...')
configs = [
{'measure': 'area', 'range': [None, 60], 'nbins' : 20, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOE'},
{'measure': 'dffMax2', 'range': [0.6, 5], 'nbins' : 20, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOE'},
{'measure': 'duration', 'range' : [None, 30], 'nbins' : 10, 'bh_l' : ['rest', 'stick_rest', 'running', 'stick_run_ind_15'], 'mode' : 'MOA'}
]
for config in configs:
behaviour_l = config['bh_l']
measure = config['measure']
min_measure, max_measure = config['range']
mode = config['mode']
n_bins = config['nbins']
confidence = True
try:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}-mode={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, mode))
plot, stats_d = astro_plotter.measure_distribution_bh_compare_plot(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode=mode)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
if measure == 'area':
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
else:
saving_utils.save_plotly_fig(plot, path)
#saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
except Exception as e:
print('Exception: {}'.format(e))
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
delay_ranges_pairs = [[3*astroA_l[0].fr, 6*astroA_l[0].fr], [2*astroA_l[0].fr, 4*astroA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
before_range_3, after_range_6 = delay_ranges_pairs[0]
before_range_2, after_range_4 = delay_ranges_pairs[1]
print('Alt Proportion plots...')
# Rest to run plots
rest_to_run_setting = {
'before_bh':'rest_semi_exact',
'inds_bh':'running_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': False,
'delay_step_size': 10,
'confidence': True}
# Rest to run - PROPORTIONS
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_sizes'))
rest_to_run_setting['delay_step_size'] = 5
__save_astro_transition_plot(astro_plotter, astroA_l, setting=rest_to_run_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_speed'))
# Run to rest plots
run_to_rest_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'rest_start',
'after_bh':'rest_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': False,
'delay_step_size': 10,
'confidence': True}
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_sizes'))
run_to_rest_setting['delay_step_size'] = 5
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_to_rest_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_speed'))
# Run-stick-run plots
run_stick_run_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'stick_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_2,
'after_range' : after_range_4,
'fit': False,
'delay_step_size': 10,
'confidence': True}
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='dffMax2default',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_amplitudes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='time_s',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_durations'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='measure', measure='area',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_sizes'))
__save_astro_transition_plot(astro_plotter, astroA_l, setting=run_stick_run_setting, plot_type='behaviour', bh_measure='speed',
path=os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_speed'))
#------------------------------------------------------------------------------------------------------------------
print('--------------------------------------------------------------------------------------------------')
print('Distribution of pixel values real vs fake...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pixel_distribution')
x_l = []
y_l = []
name_l = [astroA.print_id for astroA in astroA_l]
for astroA in astroA_l:
grid = astroA.event_grids_1min['default']
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'real')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=True, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l, index=name_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=x_l, index=['mean', 'conf_95', 'fit'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
sample_l_all = []
for astroA in astroA_l:
d = astro_plotter.get_individual_heatmaps_threshold_scaled(astroA, bh='default', threshold=1, num_samples=1, dff_mode=False, with_arr=True)
sample_l_all.append(d['arrs_d']['arr_r'][0])
x_l = []
y_l = []
for grid in sample_l_all:
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
#Normalize values to 1
grid_flat_nz /= np.max(grid_flat_nz)
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'fake')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=False, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean', 'conf_95'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
#------------------------------------------------------------------------------------------------------------------
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'power_law_fit_sizes_distribution')
path = path +'/'
saving_utils.generate_directory_path(path)
pylab.rcParams['xtick.major.pad']='8'
pylab.rcParams['ytick.major.pad']='8'
rc('font', family='sans-serif')
rc('font', size=10.0)
rc('text', usetex=False)
panel_label_font = FontProperties().copy()
panel_label_font.set_weight("bold")
panel_label_font.set_size(12.0)
panel_label_font.set_family("sans-serif")
fig, x, y_l, all_events_measure_l = astro_plotter.measure_distribution_plot(astroA_l, 'default', 'area', num_bins=10, min_measure=None, max_measure=None, measure_name='area', mode='MOE', with_measure_values=True)
xmin=5
data_np = np.array(all_events_measure_l)
fit = powerlaw.Fit(data_np, discrete=True, xmin=xmin)
####
fig = fit.plot_ccdf(linewidth=3, label='Empirical Data')
fit.power_law.plot_ccdf(ax=fig, color='r', linestyle='--', label='Power law fit')
fit.lognormal.plot_ccdf(ax=fig, color='g', linestyle='--', label='Lognormal fit')
fit.exponential.plot_ccdf(ax=fig, color='b', linestyle='--', label='Exponential fit')
####
fig.set_ylabel(u"p(X≥x)")
fig.set_xlabel("Size µm^2")
handles, labels = fig.get_legend_handles_labels()
fig.legend(handles, labels, loc=3)
figname = 'EmpiricalvsFits'
plt.savefig(os.path.join(path, figname+'.svg'), bbox_inches='tight')
plt.savefig(os.path.join(path, figname+'.png'), bbox_inches='tight')
#print('POWER LAW VS LOG NORMAL', fit.distribution_compare('power_law', 'lognormal'))
#print('POWER LAW VS EXPONENTIAL cutoff at {}µm**2'.format(xmin), fit.distribution_compare('power_law', 'exponential'))
#print('POWERLAW FUNCTION: ~x**(-{})'.format(fit.power_law.alpha))
#------------------------------------------------------------------------------------------------------------------
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'size_v_time_corr_ALL')
path = path+'/'
print('Generating direcotyr path', path + '/')
saving_utils.generate_directory_path(path)
areas_all = []
times_all = []
for astroA in astroA_l:
areas_all.extend(np.log(astroA.res_d['area']))
times_all.extend(astroA.res_d['time_s'])
areas_all = np.array(areas_all)
times_all = np.array(times_all)
r, p = stat_utils.get_pearsonr(times_all, areas_all)
df = pd.DataFrame({'Size': areas_all, 'Time': times_all})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
print('Plotting correlation of splitted plots in 3 parts...')
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'split_correlation_all')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'split_correlation_all')
save_splits_pkl_path = os.path.join(save_folder, 'between_splits.pkl')
save_day_splits_pkl_path = os.path.join(save_folder, 'between_days.pkl')
save_random_pkl_path = os.path.join(save_folder, 'random.pkl')
save_bh_splits_pkl_path = os.path.join(save_folder, 'between_rest_run.pkl')
#1 random simulations
#2 (correlation between splits days with variable the splits (so not between days) 3 split correlations with each other (only day 0 and day 1). day 0 splitted 3 times and correlated between each other. same with day 1
#3 (correlation between splits days with variable the between days)) the day 0 and day 1 splitted and then compared between each other between days
#'split_correlation_all'
#4 (correlation between split days with variable the rest-run behaviour)
for bh in ['rest']:
#2
fig, res_splits_l = astro_plotter.get_between_split_split_xcorr(astroA_long_l, bh=bh, save_pkl_path=save_splits_pkl_path, n_chunks=n_chunks)
#3
fig_2, res_day_splits_l = astro_plotter.get_between_day_split_xcorr(day_0_1_pairs, bh=bh, save_pkl_path=save_day_splits_pkl_path, n_chunks=n_chunks)
#4
fig_3, res_bh_splits_l = astro_plotter.get_between_bh_split_xcorr(astroA_long_l, bh_pair=['rest','running'], save_pkl_path=save_bh_splits_pkl_path, n_chunks=n_chunks)
#1
if os.path.isfile(save_random_pkl_path):
random_l = saving_utils.load_pickle(save_random_pkl_path)
else:
random_l = []
for astroA in astroA_long_l:
random_l.extend(astro_plotter.get_random_corrs_self(astroA, bh, n_fake_samples=3))
if save_random_pkl_path is not None:
saving_utils.save_pickle(random_l, save_random_pkl_path)
x = ['Random', 'Self splits', 'Rest-Run splits', 'Day 0-1 Splits']
y = [random_l, res_splits_l, res_bh_splits_l, res_day_splits_l]
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Split correlations (between splits)- {}'.format(bh), x_title='', y_title='Xcorr value', with_stats=True)
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'splits'))
saving_utils.dict_to_csv(stats_d, os.path.join(plot_folder, 'splits' + '.csv'))
#saving_utils.save_csv_dict(stats_d, os.path.join(plot_folder, 'splits' + '.csv'), key_order=['x', 'mean', 'conf_95'])
results_dict = {x[i] : y[i] for i in range(len(x))}
saving_utils.dict_to_csv(results_dict, os.path.join(plot_folder, 'splits-data' + '.csv'))
#results_dict['x'] = x
#key_order = ['x']
#key_order.extend(x)
#saving_utils.save_csv_dict(results_dict, os.path.join(plot_folder, 'splits_data' + '.csv'), key_order=key_order)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
print('Plotting correlation of self splitted plots...')
#STEP 1
#Take only long duration astrocytes
#Set maximum length of astrocyte duration to be 70min
#Then apply splits with xcorr
data_save_path = os.path.join(output_experiment_path_all_comparison, 'data', 'splits_self_all')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'splits_self_all', 'self_all')
y_l_l = []
x_l = []
minute_frame_splits_l = [35, 30, 25, 20, 15, 10, 5, 2]
cut_duration = 70
param_str = 'cut_{}-'.format(cut_duration) + 'splits_{}-'.format('_'.join([str(m) for m in minute_frame_splits_l]))
name_l = []
for i, astroA in enumerate(astroA_long_l):
curr_save_path = os.path.join(data_save_path, 'id_{}-{}.pkl'.format(astroA.print_id, param_str))
res_d = astro_plotter.get_compare_full_self_results_alt(astroA, cut_duration_min=cut_duration, minute_frame_splits_l=minute_frame_splits_l, save_pkl_path=curr_save_path)
y_l_l.append(res_d['y'])
x_l.append(res_d['x'])
name_l.append(astroA.print_id)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l[0], y_l_l, None, name_l, mode='lines', title='Splits self', x_title='Splits (minutes)', y_title='Correlation',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=True, exclude_non_avg_conf=True)
print(path)
saving_utils.save_plotly_fig(fig, path)
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
names_l = ['amplitude', 'size', 'duration']
measure_l = ['dffMax2', 'area', 'time_s' ]
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
bh_list_pairs = [['rest', 'running'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]
bh_list_pairs_names = ['rest_run', 'rest_rest_stick', 'run_run_stick']
for j, bh_list_pair in enumerate(bh_list_pairs):
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'transition_dots_{}'.format(bh_list_pairs_names[j]), '{}'.format('dots_'+names_l[i]))
if 'stick_rest' in bh_list_pair:
plot, stats_d = astro_plotter.get_measure_all_dot_plot(astroA_l_filt, measure, bh_list=bh_list_pair)
else:
plot, stats_d = astro_plotter.get_measure_all_dot_plot(astroA_l, measure, bh_list=bh_list_pair)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '-data.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
stats_d['names']
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
#------------------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pdf_norm_fit')
estimates_d = {}
all_event_values = {}
for measure in ['dffMax2' , 'time_s']:
if measure == 'dffMax2':
num_bins = 200
max_filter_val = 3
elif measure == 'time_s':
num_bins = 30
max_filter_val = 2.91
estimates_d[measure] = {}
all_event_values[measure] = {}
for bh in ['rest', 'running']:
fig, x, y_l, all_events_measure_l = astro_plotter.measure_distribution_plot(astroA_l, bh, measure, num_bins=10, min_measure=None, max_measure=None, measure_name=aqua_utils.get_measure_names([measure]), mode='MOE', with_measure_values=True)
all_events_measure_l = np.array(all_events_measure_l)
all_events_measure_l = all_events_measure_l[all_events_measure_l < max_filter_val]
a_estimate, loc_estimate, scale_estimate = skewnorm.fit(all_events_measure_l)
x = np.linspace(np.min(all_events_measure_l), np.max(all_events_measure_l), 100)
p = skewnorm.pdf(x, a_estimate, loc_estimate, scale_estimate)
estimates_d[measure][bh] = [a_estimate, loc_estimate, scale_estimate, np.min(x), np.max(x)]
all_event_values[measure][bh] = np.copy(np.array(all_events_measure_l))
fig = plotly_utils.plot_scatter_histogram(x=x, y_hist=all_events_measure_l, y_scatter=p, num_bins=num_bins)
mean, var, skew, kurt = skewnorm.stats(a=a_estimate, loc=loc_estimate, scale=scale_estimate, moments='mvsk')
a, b = np.histogram(all_events_measure_l, bins=num_bins, range=(0, np.max(x)), density=True)
id_ = measure + '_' + bh
temp_d = {}
temp_d['Parameters'] = ["a={}".format(a_estimate), "loc={}".format(loc_estimate), "scale={}".format(scale_estimate)]
temp_d['Properties'] = ["MEAN={}".format(mean), "VAR={}".format(var), "SKEW={}".format(skew),"KURT={}".format(kurt)]
#print(temp_d)
saving_utils.save_csv_dict(temp_d, os.path.join(path, id_ + '.csv'), key_order=['Parameters', 'Properties'])
saving_utils.save_plotly_fig(fig, os.path.join(path, id_))
#print('skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)')
#print('skewnorm.pdf(x, a, loc, scale) is identically equivalent to skewnorm.pdf(y, a) / scale with y = (x - loc) / scale')
with_values = True
for measure in ['dffMax2', 'time_s']:
est_rest = estimates_d[measure]['rest']
est_running = estimates_d[measure]['running']
if measure == 'dffMax2':
x_min = 0.6
x_max = 3
nbins = 100
elif measure == 'time_s':
x_min = 0
x_max = 2.91
else:
raise NotImplementedError()
x = np.linspace(x_min, x_max, 500)
if measure == 'duration' or measure == 'time_s':
tempset = set(list(all_event_values[measure]['rest'])).union(set(list(all_event_values[measure]['running'])))
tempset.add(0)
x_val_bins = np.sort(np.array(list(tempset)))
x_val_bins = x_val_bins[x_val_bins <= x_max]
x_val_bins = x_val_bins[x_val_bins >= x_min]
else:
x_val_bins = np.linspace(x_min, x_max, nbins)
#Add bin size / 2 to align better
x_val_diff = 0
if measure == 'duration' or measure == 'time_s':
x_val_diff = (x_val_bins[1] - x_val_bins[0]) / 2
p_rest = skewnorm.pdf(x, est_rest[0], est_rest[1], est_rest[2])
p_running = skewnorm.pdf(x, est_running[0], est_running[1], est_running[2])
if with_values:
vals_running, vals_x_running = np.histogram(all_event_values[measure]['running'][all_event_values[measure]['running'] < x_max], bins=x_val_bins, density=True)
vals_rest, vals_x_rest = np.histogram(all_event_values[measure]['rest'][all_event_values[measure]['rest'] < x_max], bins=x_val_bins, density=True)
#Shift by 1 so they look more aligned(due to large bin sizes)
#e.g. value at 0 is values between 0-(0+bin_size)
#We are essentially moving the point of values lets say [0, 1] to 0 and then with diff to 0.5
vals_running = vals_running[1:]
vals_rest = vals_rest[1:]
measure_name = aqua_utils.get_measure_names([measure])
fig = plotly_utils.plot_scatter_mult(x_l=[x, x, vals_x_rest + x_val_diff, vals_x_running + x_val_diff], y_l_l=[p_rest, p_running, vals_rest, vals_running], mode_l=['lines','lines', 'markers','markers'], name_l=['rest','running', 'rest-true', 'running-true'], confidence=False, with_stats=False, title='Skewed distribution: {}'.format(measure_name), x_title=measure_name, y_title='p(X)')
else:
measure_name = aqua_utils.get_measure_names([measure])
fig = plotly_utils.plot_scatter_mult(x_l=[x, x], y_l_l=[p_rest, p_running], name_l=['rest','running'], confidence=False, with_stats=False, title='Skewed distribution: {}'.format(measure_name), x_title=measure_name, y_title='p(X)')
id_ = 'measure={}-withvalues={}'.format(measure_name, with_values)
saving_utils.save_plotly_fig(fig, os.path.join(path, id_))
def generate_axon_plots(axon_plotter, AA_l, output_folder):
print('---TRANSITION PROPORTION DELAYS PLOT ALL---')
output_experiment_path_all_comparison = os.path.join(output_folder, 'axon_all')
delay_ranges_pairs = [[3*AA_l[0].fr, 6*AA_l[0].fr], [2*AA_l[0].fr, 4*AA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
before_range_3, after_range_6 = delay_ranges_pairs[0]
before_range_2, after_range_4 = delay_ranges_pairs[1]
print('Alt Proportion plots...')
rest_to_run_setting = {
'before_bh':'rest_semi_exact',
'inds_bh':'running_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': True,
'delay_step_size': 10,
'confidence': True}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_speed'),
bh_measure='speed')
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='proportions_stick_filter',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_vibrisastimtiming'),
bh_measure=None)
for aa_setting in ['axon']:
rest_to_run_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=rest_to_run_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'rest_to_run_{aa_setting}_proportions'),
bh_measure=None)
run_to_rest_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'rest_start',
'after_bh':'rest_semi_exact',
'before_range' : before_range_3,
'after_range' : after_range_6,
'fit': True,
'delay_step_size': 10,
'confidence': True
}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_to_rest_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_to_rest_speed'),
bh_measure='speed')
for aa_setting in ['axon']:
run_to_rest_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_to_rest_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_to_rest_{aa_setting}_proportions'),
bh_measure=None)
run_stick_run_setting = {
'before_bh':'running_semi_exact',
'inds_bh':'stick_exact_start',
'after_bh':'running_semi_exact',
'before_range' : before_range_2,
'after_range' : after_range_4,
'fit': True,
'delay_step_size': 10,
'confidence': True
}
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='behaviour',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_speed'),
bh_measure='speed')
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='proportions_stick_filter',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_vibrisastimtiming'),
bh_measure=None)
for aa_setting in ['axon', 'astro']:
run_stick_run_setting['aa_setting'] = aa_setting
__save_axon_transition_plot(axon_plotter=axon_plotter,
AA_l=AA_l,
setting=run_stick_run_setting,
plot_type='proportions',
path=os.path.join(output_experiment_path_all_comparison, 'plots', f'run_stick_run_{aa_setting}_proportions'),
bh_measure=None)
def __save_astro_transition_plot(astro_plotter, astroA_l, setting, plot_type, path, measure=None, bh_measure=None):
measure_y_titles = {'dffMax2default' : 'Amplitude',
'time_s' : 'Duration (s)',
'area' : 'Size'}
bh_measure_y_titles = {'speed' : 'Speed (cm/s)'}
before_bh=setting['before_bh']
inds_bh = setting['inds_bh']
after_bh = setting['after_bh']
before_range = setting['before_range']
after_range = setting['after_range']
fit = setting['fit']
delay_step_size = setting['delay_step_size']
confidence = setting['confidence']
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence}
if plot_type == 'proportions':
fig_d, bin_stats = astro_plotter.get_transition_proportion_delays_plot_all_alt(astroA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
elif plot_type == 'measure':
assert measure is not None
fig_d, bin_stats = astro_plotter.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[measure],
**p)
elif plot_type == 'behaviour':
assert bh_measure is not None
fig_d, bin_stats = astro_plotter.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[bh_measure],
**p)
else:
raise ValueError('Plot type must be "proportions", "measure"')
fig_v = fig_d['event_avg_no_mult']
fig_id = os.path.join(path, 'range_{}_{}-step_{}'.format(before_range, after_range, delay_step_size))
saving_utils.save_plotly_fig(fig_v, fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
all_data_dict = {bin_stats['x'][i]:bin_stats['y_all'][:, i] for i in range(len(bin_stats['x']))}
saving_utils.dict_to_csv(all_data_dict, name=fig_id + 'range_{}_{}-step_{}-data.csv'.format(before_range, after_range, delay_step_size), base_folder=path)
#DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
def __save_axon_transition_plot(axon_plotter, AA_l, setting, plot_type, path, bh_measure=None):
bh_measure_y_titles = {'speed' : 'Speed (cm/s)'}
before_bh = setting['before_bh']
inds_bh = setting['inds_bh']
after_bh = setting['after_bh']
before_range = setting['before_range']
after_range = setting['after_range']
fit = setting['fit']
delay_step_size = setting['delay_step_size']
confidence = setting['confidence']
if 'aa_setting' in setting:
aa_setting = setting['aa_setting']
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence, 'setting' : aa_setting}
else:
p = {'fit' : fit, 'delay_step_size' : delay_step_size, 'confidence' : confidence}
if plot_type == 'proportions':
fig_d, bin_stats = axon_plotter.get_axon_transition_proportion_delays_plot_all(AA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
elif plot_type == 'behaviour':
assert bh_measure is not None
fig_d, bin_stats = axon_plotter.get_transition_bh_values_plot_all_alt(AA_l,
before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[bh_measure],
**p)
elif plot_type == 'proportions_stick_filter':
fig_d, bin_stats = axon_plotter.get_axon_transition_proportion_delays_STICK_FILTER_plot_all(AA_l, before_bh=before_bh, inds_bh=inds_bh, after_bh=after_bh,
before_range=before_range, after_range=after_range,
**p)
else:
raise ValueError('Invalid plot type')
fig_v = fig_d['event_avg_no_mult']
fig_id = os.path.join(path, 'range_{}_{}-step_{}'.format(before_range, after_range, delay_step_size))
saving_utils.save_plotly_fig(fig_v, fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
|
[
"matplotlib.rc",
"scipy.stats.skewnorm.pdf",
"numpy.sum",
"powerlaw.Fit",
"scipy.stats.skewnorm.fit",
"analysis.general_utils.plotly_utils.plot_scatter_mult_with_avg",
"numpy.histogram",
"os.path.isfile",
"scipy.stats.skewnorm.stats",
"analysis.general_utils.plotly_utils.apply_fun_axis_fig",
"analysis.general_utils.saving_utils.save_pth_plt_l_log",
"os.path.join",
"pandas.DataFrame",
"matplotlib.font_manager.FontProperties",
"analysis.general_utils.saving_utils.load_pickle",
"analysis.general_utils.plotly_utils.plot_scatter_error",
"analysis.general_utils.stat_utils.get_pearsonr",
"numpy.max",
"analysis.general_utils.plotly_utils.seaborn_joint_grid",
"analysis.general_utils.plotly_utils.plot_scatter_histogram",
"analysis.general_utils.saving_utils.save_plotly_fig",
"numpy.linspace",
"analysis.general_utils.aqua_utils.get_measure_names",
"analysis.general_utils.general_utils.truncate",
"csv.writer",
"numpy.min",
"analysis.general_utils.saving_utils.save_csv_dict",
"analysis.general_utils.saving_utils.save_pickle",
"numpy.log",
"matplotlib.pyplot.ioff",
"os.makedirs",
"analysis.general_utils.saving_utils.generate_directory_path",
"numpy.array"
] |
[((1460, 1527), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_heatmaps"""'], {}), "(output_experiment_path, 'plots', 'behaviour_heatmaps')\n", (1472, 1527), False, 'import os, sys, glob\n'), ((1988, 2066), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_heatmaps_saturation"""'], {}), "(output_experiment_path, 'plots', 'behaviour_heatmaps_saturation')\n", (2000, 2066), False, 'import os, sys, glob\n'), ((2623, 2702), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_activity"""', '"""activity"""'], {}), "(output_experiment_path, 'plots', 'behaviour_activity', 'activity')\n", (2635, 2702), False, 'import os, sys, glob\n'), ((2786, 2891), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_activity', 'behaviour_activity_path'], {'width': '(1200)', 'height': '(800)'}), '(fig_behaviour_activity,\n behaviour_activity_path, width=1200, height=800)\n', (2814, 2891), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((2970, 3043), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""behaviour_areas"""', '"""areas"""'], {}), "(output_experiment_path, 'plots', 'behaviour_areas', 'areas')\n", (2982, 3043), False, 'import os, sys, glob\n'), ((3119, 3188), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_area', 'behaviour_area_path'], {}), '(fig_behaviour_area, behaviour_area_path)\n', (3147, 3188), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((3280, 3365), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""signal_amplitudes"""', '"""amplitudes"""'], {}), "(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes'\n )\n", (3292, 3365), False, 'import os, sys, glob\n'), ((3450, 3529), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_behaviour_amplitude', 'behaviour_amplitude_path'], {}), '(fig_behaviour_amplitude, behaviour_amplitude_path)\n', (3478, 3529), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((12665, 12751), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""pixel_distribution"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'pixel_distribution')\n", (12677, 12751), False, 'import os, sys, glob\n'), ((13405, 13431), 'os.path.join', 'os.path.join', (['path', '"""real"""'], {}), "(path, 'real')\n", (13417, 13431), False, 'import os, sys, glob\n'), ((13451, 13608), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_l', 'y_l_fmt'], {'x_title': '"""Pixel intensity percentile"""', 'y_title': '"""Frequency (Density)"""', 'exp_fit': '(True)', 'with_details': '(True)'}), "(x_l, y_l_fmt, x_title=\n 'Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=\n True, with_details=True)\n", (13482, 13608), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((13603, 13647), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'plot_path'], {}), '(fig, plot_path)\n', (13631, 13647), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((13744, 13860), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['conf_95'], stats_d['fit']]"], {'columns': 'x_l', 'index': "['mean', 'conf_95', 'fit']"}), "([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=\n x_l, index=['mean', 'conf_95', 'fit'])\n", (13753, 13860), False, 'from pandas import DataFrame\n'), ((14830, 14856), 'os.path.join', 'os.path.join', (['path', '"""fake"""'], {}), "(path, 'fake')\n", (14842, 14856), False, 'import os, sys, glob\n'), ((14876, 15034), 'analysis.general_utils.plotly_utils.plot_scatter_error', 'plotly_utils.plot_scatter_error', (['x_l', 'y_l_fmt'], {'x_title': '"""Pixel intensity percentile"""', 'y_title': '"""Frequency (Density)"""', 'exp_fit': '(False)', 'with_details': '(True)'}), "(x_l, y_l_fmt, x_title=\n 'Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=\n False, with_details=True)\n", (14907, 15034), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15030, 15074), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'plot_path'], {}), '(fig, plot_path)\n', (15058, 15074), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15157, 15249), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['conf_95']]"], {'columns': 'x_l', 'index': "['mean', 'conf_95']"}), "([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean',\n 'conf_95'])\n", (15166, 15249), False, 'from pandas import DataFrame\n'), ((15470, 15570), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""power_law_fit_sizes_distribution"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'power_law_fit_sizes_distribution')\n", (15482, 15570), False, 'import os, sys, glob\n'), ((15592, 15634), 'analysis.general_utils.saving_utils.generate_directory_path', 'saving_utils.generate_directory_path', (['path'], {}), '(path)\n', (15628, 15634), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((15724, 15755), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""sans-serif"""'}), "('font', family='sans-serif')\n", (15726, 15755), False, 'from matplotlib import rc\n'), ((15760, 15781), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(10.0)'}), "('font', size=10.0)\n", (15762, 15781), False, 'from matplotlib import rc\n'), ((15786, 15810), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (15788, 15810), False, 'from matplotlib import rc\n'), ((16225, 16255), 'numpy.array', 'np.array', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (16233, 16255), True, 'import numpy as np\n'), ((16266, 16313), 'powerlaw.Fit', 'powerlaw.Fit', (['data_np'], {'discrete': '(True)', 'xmin': 'xmin'}), '(data_np, discrete=True, xmin=xmin)\n', (16278, 16313), False, 'import powerlaw\n'), ((17405, 17415), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (17413, 17415), True, 'import matplotlib.pyplot as plt\n'), ((17482, 17570), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""size_v_time_corr_ALL"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'size_v_time_corr_ALL')\n", (17494, 17570), False, 'import os, sys, glob\n'), ((17642, 17684), 'analysis.general_utils.saving_utils.generate_directory_path', 'saving_utils.generate_directory_path', (['path'], {}), '(path)\n', (17678, 17684), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((17872, 17891), 'numpy.array', 'np.array', (['areas_all'], {}), '(areas_all)\n', (17880, 17891), True, 'import numpy as np\n'), ((17908, 17927), 'numpy.array', 'np.array', (['times_all'], {}), '(times_all)\n', (17916, 17927), True, 'import numpy as np\n'), ((17939, 17984), 'analysis.general_utils.stat_utils.get_pearsonr', 'stat_utils.get_pearsonr', (['times_all', 'areas_all'], {}), '(times_all, areas_all)\n', (17962, 17984), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((17995, 18047), 'pandas.DataFrame', 'pd.DataFrame', (["{'Size': areas_all, 'Time': times_all}"], {}), "({'Size': areas_all, 'Time': times_all})\n", (18007, 18047), True, 'import pandas as pd\n'), ((18738, 18826), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""data"""', '"""split_correlation_all"""'], {}), "(output_experiment_path_all_comparison, 'data',\n 'split_correlation_all')\n", (18750, 18826), False, 'import os, sys, glob\n'), ((18841, 18930), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""split_correlation_all"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'split_correlation_all')\n", (18853, 18930), False, 'import os, sys, glob\n'), ((18954, 19001), 'os.path.join', 'os.path.join', (['save_folder', '"""between_splits.pkl"""'], {}), "(save_folder, 'between_splits.pkl')\n", (18966, 19001), False, 'import os, sys, glob\n'), ((19033, 19078), 'os.path.join', 'os.path.join', (['save_folder', '"""between_days.pkl"""'], {}), "(save_folder, 'between_days.pkl')\n", (19045, 19078), False, 'import os, sys, glob\n'), ((19106, 19145), 'os.path.join', 'os.path.join', (['save_folder', '"""random.pkl"""'], {}), "(save_folder, 'random.pkl')\n", (19118, 19145), False, 'import os, sys, glob\n'), ((19176, 19225), 'os.path.join', 'os.path.join', (['save_folder', '"""between_rest_run.pkl"""'], {}), "(save_folder, 'between_rest_run.pkl')\n", (19188, 19225), False, 'import os, sys, glob\n'), ((22168, 22246), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""data"""', '"""splits_self_all"""'], {}), "(output_experiment_path_all_comparison, 'data', 'splits_self_all')\n", (22180, 22246), False, 'import os, sys, glob\n'), ((22258, 22353), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""splits_self_all"""', '"""self_all"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'splits_self_all', 'self_all')\n", (22270, 22353), False, 'import os, sys, glob\n'), ((23049, 23321), 'analysis.general_utils.plotly_utils.plot_scatter_mult_with_avg', 'plotly_utils.plot_scatter_mult_with_avg', (['x_l[0]', 'y_l_l', 'None', 'name_l'], {'mode': '"""lines"""', 'title': '"""Splits self"""', 'x_title': '"""Splits (minutes)"""', 'y_title': '"""Correlation"""', 'xrange': 'None', 'yrange': 'None', 'confidence': '(True)', 'with_stats': '(True)', 'point_box': '(True)', 'exclude_non_avg_conf': '(True)'}), "(x_l[0], y_l_l, None, name_l, mode=\n 'lines', title='Splits self', x_title='Splits (minutes)', y_title=\n 'Correlation', xrange=None, yrange=None, confidence=True, with_stats=\n True, point_box=True, exclude_non_avg_conf=True)\n", (23088, 23321), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((23348, 23387), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig', 'path'], {}), '(fig, path)\n', (23376, 23387), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((23404, 23480), 'pandas.DataFrame', 'DataFrame', (["stats_d['mean_l_l']"], {'columns': "stats_d['x']", 'index': "stats_d['names']"}), "(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])\n", (23413, 23480), False, 'from pandas import DataFrame\n'), ((23493, 23568), 'pandas.DataFrame', 'DataFrame', (["stats_d['conf_95']"], {'columns': "stats_d['x']", 'index': "stats_d['names']"}), "(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])\n", (23502, 23568), False, 'from pandas import DataFrame\n'), ((23583, 23686), 'pandas.DataFrame', 'DataFrame', (["[stats_d['mean'], stats_d['mean_conf']]"], {'columns': "stats_d['x']", 'index': "['mean', 'conf_95']"}), "([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'],\n index=['mean', 'conf_95'])\n", (23592, 23686), False, 'from pandas import DataFrame\n'), ((26454, 26530), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""pdf_norm_fit"""'], {}), "(output_experiment_path_all_comparison, 'plots', 'pdf_norm_fit')\n", (26466, 26530), False, 'import os, sys, glob\n'), ((32022, 32061), 'os.path.join', 'os.path.join', (['output_folder', '"""axon_all"""'], {}), "(output_folder, 'axon_all')\n", (32034, 32061), False, 'import os, sys, glob\n'), ((39300, 39343), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_v', 'fig_id'], {}), '(fig_v, fig_id)\n', (39328, 39343), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((39348, 39460), 'analysis.general_utils.saving_utils.save_csv_dict', 'saving_utils.save_csv_dict', (['bin_stats'], {'path': "(fig_id + '.csv')", 'key_order': "['x', 'mean', 'std', 'confidence_95']"}), "(bin_stats, path=fig_id + '.csv', key_order=['x',\n 'mean', 'std', 'confidence_95'])\n", (39374, 39460), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((42268, 42311), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['fig_v', 'fig_id'], {}), '(fig_v, fig_id)\n', (42296, 42311), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((42316, 42428), 'analysis.general_utils.saving_utils.save_csv_dict', 'saving_utils.save_csv_dict', (['bin_stats'], {'path': "(fig_id + '.csv')", 'key_order': "['x', 'mean', 'std', 'confidence_95']"}), "(bin_stats, path=fig_id + '.csv', key_order=['x',\n 'mean', 'std', 'confidence_95'])\n", (42342, 42428), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((2470, 2536), 'os.path.join', 'os.path.join', (['output_experiment_path', '"""plots"""', '"""borders"""', '"""border"""'], {}), "(output_experiment_path, 'plots', 'borders', 'border')\n", (2482, 2536), False, 'import os, sys, glob\n'), ((3784, 3821), 'os.path.join', 'os.path.join', (['output_folder', 'name_tag'], {}), '(output_folder, name_tag)\n', (3796, 3821), False, 'import os, sys, glob\n'), ((13083, 13146), 'numpy.histogram', 'np.histogram', (['grid_flat_nz'], {'bins': '(20)', 'range': '(0, 1)', 'density': '(True)'}), '(grid_flat_nz, bins=20, range=(0, 1), density=True)\n', (13095, 13146), True, 'import numpy as np\n'), ((14461, 14481), 'numpy.max', 'np.max', (['grid_flat_nz'], {}), '(grid_flat_nz)\n', (14467, 14481), True, 'import numpy as np\n'), ((14508, 14571), 'numpy.histogram', 'np.histogram', (['grid_flat_nz'], {'bins': '(20)', 'range': '(0, 1)', 'density': '(True)'}), '(grid_flat_nz, bins=20, range=(0, 1), density=True)\n', (14520, 14571), True, 'import numpy as np\n'), ((16861, 16897), 'os.path.join', 'os.path.join', (['path', "(figname + '.svg')"], {}), "(path, figname + '.svg')\n", (16873, 16897), False, 'import os, sys, glob\n'), ((16934, 16970), 'os.path.join', 'os.path.join', (['path', "(figname + '.png')"], {}), "(path, figname + '.png')\n", (16946, 16970), False, 'import os, sys, glob\n'), ((18127, 18155), 'analysis.general_utils.general_utils.truncate', 'general_utils.truncate', (['r', '(2)'], {}), '(r, 2)\n', (18149, 18155), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((18207, 18280), 'analysis.general_utils.plotly_utils.seaborn_joint_grid', 'plotly_utils.seaborn_joint_grid', (['df', '"""Size"""', '"""Time"""'], {'kind': 'kind', 'text': 'text'}), "(df, 'Size', 'Time', kind=kind, text=text)\n", (18238, 18280), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((20292, 20328), 'os.path.isfile', 'os.path.isfile', (['save_random_pkl_path'], {}), '(save_random_pkl_path)\n', (20306, 20328), False, 'import os, sys, glob\n'), ((29356, 29386), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(500)'], {}), '(x_min, x_max, 500)\n', (29367, 29386), True, 'import numpy as np\n'), ((30065, 30119), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['x', 'est_rest[0]', 'est_rest[1]', 'est_rest[2]'], {}), '(x, est_rest[0], est_rest[1], est_rest[2])\n', (30077, 30119), False, 'from scipy.stats import skewnorm\n'), ((30140, 30203), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['x', 'est_running[0]', 'est_running[1]', 'est_running[2]'], {}), '(x, est_running[0], est_running[1], est_running[2])\n', (30152, 30203), False, 'from scipy.stats import skewnorm\n'), ((1626, 1665), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', 'k'], {}), '(heatmap_grid_base_path, k)\n', (1638, 1665), False, 'import os, sys, glob\n'), ((1730, 1777), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', "(k + 'dff')"], {}), "(heatmap_grid_base_path, k + 'dff')\n", (1742, 1777), False, 'import os, sys, glob\n'), ((2165, 2204), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', 'k'], {}), '(heatmap_grid_base_path, k)\n', (2177, 2204), False, 'import os, sys, glob\n'), ((2269, 2316), 'os.path.join', 'os.path.join', (['heatmap_grid_base_path', "(k + 'dff')"], {}), "(heatmap_grid_base_path, k + 'dff')\n", (2281, 2316), False, 'import os, sys, glob\n'), ((5371, 5408), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['measure'], {}), '(measure)\n', (5399, 5408), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((8193, 8284), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_proportions')\n", (8205, 8284), False, 'import os, sys, glob\n'), ((8434, 8524), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_amplitudes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_amplitudes')\n", (8446, 8524), False, 'import os, sys, glob\n'), ((8661, 8750), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_durations"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_durations')\n", (8673, 8750), False, 'import os, sys, glob\n'), ((8885, 8970), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_sizes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_sizes')\n", (8897, 8970), False, 'import os, sys, glob\n'), ((9159, 9244), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""rest_to_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'rest_to_run_speed')\n", (9171, 9244), False, 'import os, sys, glob\n'), ((9763, 9854), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_proportions')\n", (9775, 9854), False, 'import os, sys, glob\n'), ((9999, 10089), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_amplitudes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_amplitudes')\n", (10011, 10089), False, 'import os, sys, glob\n'), ((10226, 10315), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_durations"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_durations')\n", (10238, 10315), False, 'import os, sys, glob\n'), ((10450, 10535), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_sizes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_sizes')\n", (10462, 10535), False, 'import os, sys, glob\n'), ((10724, 10809), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_to_rest_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_to_rest_speed')\n", (10736, 10809), False, 'import os, sys, glob\n'), ((11343, 11436), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_proportions')\n", (11355, 11436), False, 'import os, sys, glob\n'), ((11583, 11675), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_amplitudes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_amplitudes')\n", (11595, 11675), False, 'import os, sys, glob\n'), ((11814, 11905), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_durations"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_durations')\n", (11826, 11905), False, 'import os, sys, glob\n'), ((12042, 12129), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_sizes"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_sizes')\n", (12054, 12129), False, 'import os, sys, glob\n'), ((12273, 12360), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', '"""run_stick_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n 'run_stick_run_speed')\n", (12285, 12360), False, 'import os, sys, glob\n'), ((13224, 13236), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (13230, 13236), True, 'import numpy as np\n'), ((13673, 13698), 'numpy.array', 'np.array', (["stats_d['data']"], {}), "(stats_d['data'])\n", (13681, 13698), True, 'import numpy as np\n'), ((14649, 14661), 'numpy.sum', 'np.sum', (['hist'], {}), '(hist)\n', (14655, 14661), True, 'import numpy as np\n'), ((15100, 15125), 'numpy.array', 'np.array', (["stats_d['data']"], {}), "(stats_d['data'])\n", (15108, 15125), True, 'import numpy as np\n'), ((15835, 15851), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (15849, 15851), False, 'from matplotlib.font_manager import FontProperties\n'), ((17777, 17805), 'numpy.log', 'np.log', (["astroA.res_d['area']"], {}), "(astroA.res_d['area'])\n", (17783, 17805), True, 'import numpy as np\n'), ((20353, 20399), 'analysis.general_utils.saving_utils.load_pickle', 'saving_utils.load_pickle', (['save_random_pkl_path'], {}), '(save_random_pkl_path)\n', (20377, 20399), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((20637, 20693), 'analysis.general_utils.saving_utils.save_pickle', 'saving_utils.save_pickle', (['random_l', 'save_random_pkl_path'], {}), '(random_l, save_random_pkl_path)\n', (20661, 20693), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((21062, 21097), 'os.path.join', 'os.path.join', (['plot_folder', '"""splits"""'], {}), "(plot_folder, 'splits')\n", (21074, 21097), False, 'import os, sys, glob\n'), ((21150, 21194), 'os.path.join', 'os.path.join', (['plot_folder', "('splits' + '.csv')"], {}), "(plot_folder, 'splits' + '.csv')\n", (21162, 21194), False, 'import os, sys, glob\n'), ((21432, 21481), 'os.path.join', 'os.path.join', (['plot_folder', "('splits-data' + '.csv')"], {}), "(plot_folder, 'splits-data' + '.csv')\n", (21444, 21481), False, 'import os, sys, glob\n'), ((25036, 25081), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['plot', 'plot_path'], {}), '(plot, plot_path)\n', (25064, 25081), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((27219, 27249), 'numpy.array', 'np.array', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27227, 27249), True, 'import numpy as np\n'), ((27400, 27434), 'scipy.stats.skewnorm.fit', 'skewnorm.fit', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27412, 27434), False, 'from scipy.stats import skewnorm\n'), ((27557, 27614), 'scipy.stats.skewnorm.pdf', 'skewnorm.pdf', (['x', 'a_estimate', 'loc_estimate', 'scale_estimate'], {}), '(x, a_estimate, loc_estimate, scale_estimate)\n', (27569, 27614), False, 'from scipy.stats import skewnorm\n'), ((27821, 27926), 'analysis.general_utils.plotly_utils.plot_scatter_histogram', 'plotly_utils.plot_scatter_histogram', ([], {'x': 'x', 'y_hist': 'all_events_measure_l', 'y_scatter': 'p', 'num_bins': 'num_bins'}), '(x=x, y_hist=all_events_measure_l,\n y_scatter=p, num_bins=num_bins)\n', (27856, 27926), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((27959, 28047), 'scipy.stats.skewnorm.stats', 'skewnorm.stats', ([], {'a': 'a_estimate', 'loc': 'loc_estimate', 'scale': 'scale_estimate', 'moments': '"""mvsk"""'}), "(a=a_estimate, loc=loc_estimate, scale=scale_estimate,\n moments='mvsk')\n", (27973, 28047), False, 'from scipy.stats import skewnorm\n'), ((29814, 29846), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'nbins'], {}), '(x_min, x_max, nbins)\n', (29825, 29846), True, 'import numpy as np\n'), ((30280, 30412), 'numpy.histogram', 'np.histogram', (["all_event_values[measure]['running'][all_event_values[measure]['running'] <\n x_max]"], {'bins': 'x_val_bins', 'density': '(True)'}), "(all_event_values[measure]['running'][all_event_values[measure]\n ['running'] < x_max], bins=x_val_bins, density=True)\n", (30292, 30412), True, 'import numpy as np\n'), ((30445, 30571), 'numpy.histogram', 'np.histogram', (["all_event_values[measure]['rest'][all_event_values[measure]['rest'] < x_max]"], {'bins': 'x_val_bins', 'density': '(True)'}), "(all_event_values[measure]['rest'][all_event_values[measure][\n 'rest'] < x_max], bins=x_val_bins, density=True)\n", (30457, 30571), True, 'import numpy as np\n'), ((30931, 30970), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['[measure]'], {}), '([measure])\n', (30959, 30970), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((31420, 31459), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['[measure]'], {}), '([measure])\n', (31448, 31459), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((31829, 31852), 'os.path.join', 'os.path.join', (['path', 'id_'], {}), '(path, id_)\n', (31841, 31852), False, 'import os, sys, glob\n'), ((32921, 33007), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""rest_to_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'rest_to_run_speed')\n", (32933, 33007), False, 'import os, sys, glob\n'), ((33257, 33355), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""rest_to_run_vibrisastimtiming"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'rest_to_run_vibrisastimtiming')\n", (33269, 33355), False, 'import os, sys, glob\n'), ((34426, 34512), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_to_rest_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_to_rest_speed')\n", (34438, 34512), False, 'import os, sys, glob\n'), ((35600, 35688), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_stick_run_speed"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_stick_run_speed')\n", (35612, 35688), False, 'import os, sys, glob\n'), ((35936, 36036), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_stick_run_vibrisastimtiming"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_stick_run_vibrisastimtiming')\n", (35948, 36036), False, 'import os, sys, glob\n'), ((5997, 6074), 'analysis.general_utils.plotly_utils.apply_fun_axis_fig', 'plotly_utils.apply_fun_axis_fig', (['plot', '(lambda x: x / astroA_l[0].fr)'], {'axis': '"""x"""'}), "(plot, lambda x: x / astroA_l[0].fr, axis='x')\n", (6028, 6074), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((6127, 6184), 'analysis.general_utils.saving_utils.save_pth_plt_l_log', 'saving_utils.save_pth_plt_l_log', (['[plot]', '[path]'], {'axis': '"""x"""'}), "([plot], [path], axis='x')\n", (6158, 6184), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((6219, 6259), 'analysis.general_utils.saving_utils.save_plotly_fig', 'saving_utils.save_plotly_fig', (['plot', 'path'], {}), '(plot, path)\n', (6247, 6259), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((25192, 25269), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (25202, 25269), False, 'import csv\n'), ((25850, 25927), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (25860, 25927), False, 'import csv\n'), ((27476, 27504), 'numpy.min', 'np.min', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27482, 27504), True, 'import numpy as np\n'), ((27506, 27534), 'numpy.max', 'np.max', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27512, 27534), True, 'import numpy as np\n'), ((27697, 27706), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (27703, 27706), True, 'import numpy as np\n'), ((27708, 27717), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (27714, 27717), True, 'import numpy as np\n'), ((27771, 27801), 'numpy.array', 'np.array', (['all_events_measure_l'], {}), '(all_events_measure_l)\n', (27779, 27801), True, 'import numpy as np\n'), ((28572, 28604), 'os.path.join', 'os.path.join', (['path', "(id_ + '.csv')"], {}), "(path, id_ + '.csv')\n", (28584, 28604), False, 'import os, sys, glob\n'), ((28692, 28715), 'os.path.join', 'os.path.join', (['path', 'id_'], {}), '(path, id_)\n', (28704, 28715), False, 'import os, sys, glob\n'), ((33757, 33862), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""rest_to_run_{aa_setting}_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'rest_to_run_{aa_setting}_proportions')\n", (33769, 33862), False, 'import os, sys, glob\n'), ((34917, 35022), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_to_rest_{aa_setting}_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_to_rest_{aa_setting}_proportions')\n", (34929, 35022), False, 'import os, sys, glob\n'), ((36456, 36563), 'os.path.join', 'os.path.join', (['output_experiment_path_all_comparison', '"""plots"""', 'f"""run_stick_run_{aa_setting}_proportions"""'], {}), "(output_experiment_path_all_comparison, 'plots',\n f'run_stick_run_{aa_setting}_proportions')\n", (36468, 36563), False, 'import os, sys, glob\n'), ((6539, 6556), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6550, 6556), False, 'import os, sys, glob\n'), ((25105, 25142), 'os.path.join', 'os.path.join', (["(plot_path + '-data.csv')"], {}), "(plot_path + '-data.csv')\n", (25117, 25142), False, 'import os, sys, glob\n'), ((25768, 25800), 'os.path.join', 'os.path.join', (["(plot_path + '.csv')"], {}), "(plot_path + '.csv')\n", (25780, 25800), False, 'import os, sys, glob\n'), ((27105, 27144), 'analysis.general_utils.aqua_utils.get_measure_names', 'aqua_utils.get_measure_names', (['[measure]'], {}), '([measure])\n', (27133, 27144), False, 'from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils\n'), ((28140, 28149), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (28146, 28149), True, 'import numpy as np\n'), ((6920, 6944), 'numpy.array', 'np.array', (["temp_d['data']"], {}), "(temp_d['data'])\n", (6928, 6944), True, 'import numpy as np\n')]
|
# 2018/11/01~2018/07/12
# <NAME>, <EMAIL>.
"""
graphML.py Module for basic GSP and graph machine learning functions.
Functionals
LSIGF: Applies a linear shift-invariant graph filter
spectralGF: Applies a linear shift-invariant graph filter in spectral form
NVGF: Applies a node-variant graph filter
EVGF: Applies an edge-variant graph filter
learnAttentionGSO: Computes the GSO following the attention mechanism
graphAttention: Applies a graph attention layer
Filtering Layers (nn.Module)
GraphFilter: Creates a graph convolutional layer using LSI graph filters
SpectralGF: Creates a graph convolutional layer using LSI graph filters in
spectral form
NodeVariantGF: Creates a graph filtering layer using node-variant graph filters
EdgeVariantGF: Creates a graph filtering layer using edge-variant graph filters
GraphAttentional: Creates a layer using graph attention mechanisms
Activation Functions - Nonlinearities (nn.Module)
MaxLocalActivation: Creates a localized max activation function layer
MedianLocalActivation: Creates a localized median activation function layer
NoActivation: Creates a layer for no activation function
Summarizing Functions - Pooling (nn.Module)
NoPool: No summarizing function.
MaxPoolLocal: Max-summarizing function
"""
import math
import numpy as np
import torch
import torch.nn as nn
import utils.graphUtils.graphTools as graphTools
zeroTolerance = 1e-9 # Values below this number are considered zero.
infiniteNumber = 1e12 # infinity equals this number
# WARNING: Only scalar bias.
def LSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[0] == E
N = S.shape[1]
assert S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
S = S.reshape([1, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute againt to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
def spectralGF(h, V, VH, x, b=None):
"""
spectralGF(filter_coeff, eigenbasis, eigenbasis_hermitian, input, bias=None)
Computes the output of a linear shift-invariant graph filter in spectral
form applying filter_coefficients on the graph fourier transform of the
input .
Denote as G the number of input features, F the number of output features,
E the number of edge features, N the number of nodes, S_{e} in R^{N x N}
the GSO for edge feature e with S_{e} = V_{e} Lambda_{e} V_{e}^{H} as
eigendecomposition, x in R^{G x N} the input data where x_{g} in R^{N} is
the graph signal representing feature g, and b in R^{F x N} the bias vector,
with b_{f} in R^{N} representing the bias for feature f.
Then, the LSI-GF in spectral form is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{g=1}^{G}
V_{e} diag(h_{f,g,e}) V_{e}^{H} x_{g}
+ b_{f}
for f = 1, ..., F, with h_{f,g,e} in R^{N} the filter coefficients for
output feature f, input feature g and edge feature e.
Inputs:
filter_coeff (torch.tensor): array of filter coefficients; shape:
output_features x edge_features x input_features x number_nodes
eigenbasis (torch.tensor): eigenbasis of the graph shift operator;shape:
edge_features x number_nodes x number_nodes
eigenbasis_hermitian (torch.tensor): hermitian of the eigenbasis; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
Obs.: While we consider most GSOs to be normal (so that the eigenbasis is
an orthonormal basis), this function would also work if V^{-1} is used as
input instead of V^{H}
"""
# The decision to input both V and V_H is to avoid any time spent in
# permuting/inverting the matrix. Because this depends on the graph and not
# the data, it can be done faster if we just input it.
# h is output_features x edge_weights x input_features x number_nodes
# V is edge_weighs x number_nodes x number_nodes
# VH is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
G = h.shape[2]
N = h.shape[3]
assert V.shape[0] == VH.shape[0] == E
assert V.shape[1] == VH.shape[1] == V.shape[2] == VH.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x G x N
# V in E x N x N
# VH in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# We will do proper matrix multiplication in this case (algebraic
# multiplication using column vectors instead of CS notation using row
# vectors).
# We will multiply separate VH with x, and V with diag(h).
# First, to multiply VH with x, we need to add one dimension for each one
# of them (dimension E for x and dimension B for VH)
x = x.reshape([B, 1, G, N]).permute(0, 1, 3, 2) # B x 1 x N x G
VH = VH.reshape([1, E, N, N]) # 1 x E x N x N
# Now we multiply. Note that we also permute to make it B x E x G x N
# instead of B x E x N x G because we want to multiply for a specific e and
# g, there we do not want to sum (yet) over G.
VHx = torch.matmul(VH, x).permute(0, 1, 3, 2) # B x E x G x N
# Now we want to multiply V * diag(h), both are matrices. So first, we
# add the necessary dimensions (B and G for V and an extra N for h to make
# it a matrix from a vector)
V = V.reshape([1, E, 1, N, N]) # 1 x E x 1 x N x N
# We note that multiplying by a diagonal matrix to the right is equivalent
# to an elementwise multiplication in which each column is multiplied by
# a different number, so we will do this to make it faster (elementwise
# multiplication is faster than matrix multiplication). We need to repeat
# the vector we have columnwise.
diagh = h.reshape([F, E, G, 1, N]).repeat(1, 1, 1, N, 1) # F x E x G x N x N
# And now we do elementwise multiplication
Vdiagh = V * diagh # F x E x G x N x N
# Finally, we make the multiplication of these two matrices. First, we add
# the corresponding dimensions
Vdiagh = Vdiagh.reshape([1, F, E, G, N, N]) # 1 x F x E x G x N x N
VHx = VHx.reshape([B, 1, E, G, N, 1]) # B x 1 x E x G x N x 1
# And do matrix multiplication to get all the corresponding B,F,E,G vectors
VdiaghVHx = torch.matmul(Vdiagh, VHx) # B x F x E x G x N x 1
# Get rid of the last dimension which we do not need anymore
y = VdiaghVHx.squeeze(5) # B x F x E x G x N
# Sum over G
y = torch.sum(y, dim = 3) # B x F x E x N
# Sum over E
y = torch.sum(y, dim = 2) # B x F x N
# Finally, add the bias
if b is not None:
y = y + b
return y
def NVGF(h, S, x, b=None):
"""
NVGF(filter_taps, GSO, input, bias=None) Computes the output of a
node-variant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of shifts, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f. Denote as h_{k}^{efg} in R^{N} the vector with the N
filter taps corresponding to the efg filter for shift k.
Then, the NV-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
diag(h_{k}^{efg}) S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
x number_nodes
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# h is output_features x edge_weights x filter_taps x input_features
# x number_nodes
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
N = h.shape[4]
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x K x G x N
# S in E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
xr = x.reshape([B, 1, G, N])
Sr = S.reshape([1, E, N, N])
z = xr.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
xr = torch.matmul(xr, Sr) # B x E x G x N
xS = xr.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# This multiplication with filter taps is ``element wise'' on N since for
# each node we have a different element
# First, add the extra dimension (F for z, and B for h)
z = z.reshape([B, 1, E, K, G, N])
h = h.reshape([1, F, E, K, G, N])
# Now let's do elementwise multiplication
zh = z * h
# And sum over the dimensions E, K, G to get B x F x N
y = torch.sum(zh, dim = 4) # Sum over G
y = torch.sum(y, dim = 3) # Sum over K
y = torch.sum(y, dim = 2) # Sum over E
# Finally, add the bias
if b is not None:
y = y + b
return y
def EVGF(S, x, b=None):
"""
EVGF(filter_matrices, input, bias=None) Computes the output of an
edge-variant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of shifts, N the number of
nodes, Phi_{efg} in R^{N x N} the filter matrix for edge feature e, output
feature f and input feature g (recall that Phi_{efg}^{k} has the same
sparsity pattern as the graph, except for Phi_{efg}^{0} which is expected to
be a diagonal matrix), x in R^{G x N} the input data where x_{g} in R^{N} is
the graph signal representing feature g, and b in R^{F x N} the bias vector,
with b_{f} in R^{N} representing the bias for feature f.
Then, the EV-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
Phi_{efg}^{k:0} x_{g}
+ b_{f}
for f = 1, ..., F, with Phi_{efg}^{k:0} = Phi_{efg}^{k} Phi_{efg}^{k-1} ...
Phi_{efg}^{0}.
Inputs:
filter_matrices (torch.tensor): array of filter matrices; shape:
output_features x edge_features x filter_taps x input_features
x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# We just need to multiply by the filter_matrix recursively, and then
# add for all E, G, and K features.
# S is output_features x edge_features x filter_taps x input_features
# x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = S.shape[0]
E = S.shape[1]
K = S.shape[2]
G = S.shape[3]
N = S.shape[4]
assert S.shape[5] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# S in F x E x K x G x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# We will be doing matrix multiplications in the algebraic way, trying to
# multiply the N x N matrix corresponding to the appropriate e, f, k and g
# dimensions, with the respective x vector (N x 1 column vector)
# For this, we first add the corresponding dimensions (for x we add
# dimensions F, E and the last dimension for column vector)
x = x.reshape([B, 1, 1, G, N, 1])
# When we do index_select along dimension K we get rid of this dimension
Sk = torch.index_select(S, 2, torch.tensor(0).to(S.device)).squeeze(2)
# Sk in F x E x G x N x N
# And we add one further dimension for the batch size B
Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N
# Matrix multiplication
x = torch.matmul(Sk, x) # B x F x E x G x N x 1
# And we collect this for every k in a vector z, along the K dimension
z = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N
# Now we do all the matrix multiplication
for k in range(1,K):
# Extract the following k
Sk = torch.index_select(S, 2, torch.tensor(k).to(S.device)).squeeze(2)
# Sk in F x E x G x N x N
# Give space for the batch dimension B
Sk = Sk.unsqueeze(0) # 1 x F x E x G x N x N
# Multiply with the previously cumulative Sk * x
x = torch.matmul(Sk, x) # B x F x E x G x N x 1
# Get rid of the last dimension (of a column vector)
Sx = x.reshape([B, F, E, 1, G, N, 1]).squeeze(6) # B x F x E x 1 x G x N
# Add to the z
z = torch.cat((z, Sx), dim = 2) # B x F x E x k x G x N
# Sum over G
z = torch.sum(z, dim = 4)
# Sum over K
z = torch.sum(z, dim = 3)
# Sum over E
y = torch.sum(z, dim = 2)
if b is not None:
y = y + b
return y
def learnAttentionGSO(x, a, W, S, negative_slope=0.2):
"""
learnAttentionGSO(x, a, W, S) Computes the GSO following the attention
mechanism
Denote as G the number of input features, F the number of output features,
E the number of edge features, P the number of attention heads, Ji the
number of nodes in N_{i}, the neighborhood of node i, and N the number of
nodes. Let x_{i} in R^{G} be the feature associated to node i,
W^{ep} in R^{F x G} the weight marix associated to edge feature e and
attention head p, and a^{ep} in R^{2F} the mixing vector. Let
alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for
edge feature e and attention head p, and let s_{ij}^{e} be the value of
feature e of the edge connecting nodes i and j.
Each elements of the new GSO is alpha_{ij}^{ep} computed as
alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} (
(a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})]
))
for all j in N_{i}, and where beta is the negative slope of the leaky ReLU.
Inputs:
x (torch.tensor): input;
shape: batch_size x input_features x number_nodes
a (torch.tensor): mixing parameter; shape:
number_heads x edge_features x 2 * output_features
W (torch.tensor): linear parameter; shape:
number_heads x edge_features x output_features x input_features
S (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
negative_slope (float): negative slope of the leaky relu (default: 0.2)
Outputs:
aij: output GSO; shape:
batch_size x number_heads x edge_features x number_nodes x number_nodes
"""
B = x.shape[0] # batch_size
G = x.shape[1] # input_features
N = x.shape[2] # number_nodes
P = a.shape[0] # number_heads
E = a.shape[1] # edge_features
assert W.shape[0] == P
assert W.shape[1] == E
F = W.shape[2] # output_features
assert a.shape[2] == int(2*F)
G = W.shape[3] # input_features
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
# Add ones of the GSO at all edge feature levels so that the node always
# has access to itself. The fact that it's one is not so relevant, because
# the attention coefficient that is learned would compensate for this
S = S + torch.eye(N).reshape([1,N,N]).repeat(E,1,1).to(S.device)
# WARNING:
# (If the GSOs already have self-connections, then these will be added a 1,
# which might be a problem if the self-connection is a -1. I will have to
# think of this more carefully)
# W is of size P x E x F x G
# a is of size P x E x 2F
# Compute Wx for all nodes
x = x.reshape([B, 1, 1, G, N])
W = W.reshape([1, P, E, F, G])
Wx = torch.matmul(W, x) # B x P x E x F x N
# Now, do a_1^T Wx, and a_2^T Wx to get a tensor of shape B x P x E x 1 x N
# because we're applying the inner product on the F dimension.
a1 = torch.index_select(a, 2, torch.arange(F).to(x.device)) # K x E x F
a2 = torch.index_select(a, 2, torch.arange(F, 2*F).to(x.device)) # K x E x F
a1Wx = torch.matmul(a1.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N
a2Wx = torch.matmul(a2.reshape([1, P, E, 1, F]), Wx) # B x P x E x 1 x N
# And then, use this to sum them accordingly and create a B x P x E x N x N
# matrix.
aWx = a1Wx + a2Wx.permute(0, 1, 2, 4, 3) # B x P x E x N x N
# Obs.: In this case, we have one column vector and one row vector; then,
# what the sum does, is to repeat the column and the row, respectively,
# until both matrices are of the same size, and then adds up, which is
# precisely what we want to do
# Apply the LeakyRelu
eij = nn.functional.leaky_relu(aWx, negative_slope = negative_slope)
# B x P x E x N x N
# Each element of this N x N matrix is, precisely, e_ij (eq. 1) in the GAT
# paper.
# And apply the softmax. For the softmax, we do not want to consider
# the places where there are no neighbors, so we need to set them to -infty
# so that they will be assigned a zero.
# First, get places where we have edges
maskEdges = torch.sum(torch.abs(S.data), dim = 0)
# Make it a binary matrix
maskEdges = (maskEdges > zeroTolerance).type(x.dtype)
# Make it -infinity where there are zeros
infinityMask = (1-maskEdges) * infiniteNumber
# Compute the softmax plus the -infinity (we first force the places where
# there is no edge to be zero, and then we add -infinity to them)
aij = nn.functional.softmax(eij*maskEdges - infinityMask, dim = 4)
# B x P x E x N x N
# This will give me a matrix of all the alpha_ij coefficients.
# Re-inforce the zeros just to be sure
return aij * maskEdges # B x P x E x N x N
def graphAttention(x, a, W, S, negative_slope=0.2):
"""
graphAttention(x, a, W, S) Computes attention following GAT layer taking
into account multiple edge features.
Denote as G the number of input features, F the number of output features,
E the number of edge features, P the number of attention heads, Ji the
number of nodes in N_{i}, the neighborhood of node i, and N the number of
nodes. Let x_{i} in R^{G} be the feature associated to node i,
W^{ep} in R^{F x G} the weight marix associated to edge feature e and
attention head p, and a^{ep} in R^{2F} the mixing vector. Let
alpha_{ij}^{ep} in R the attention coefficient between nodes i and j, for
edge feature e and attention head p, and let s_{ij}^{e} be the value of
feature e of the edge connecting nodes i and j.
Let y_{i}^{p} in R^{F} be the output of the graph attention at node i for
attention head p. It is computed as
y_{i}^{p} = \sum_{e=1}^{E}
\sum_{j in N_{i}}
s_{ij}^{e} alpha_{ij}^{ep} W^{ep} x_{j}
with
alpha_{ij}^{ep} = softmax_{j} ( LeakyReLU_{beta} (
(a^{ep})^T [cat(W^{ep}x_{i}, W^{ep} x_{j})]
))
for all j in N_{i}, and where beta is the negative slope of the leaky ReLU.
Inputs:
x (torch.tensor): input;
shape: batch_size x input_features x number_nodes
a (torch.tensor): mixing parameter; shape:
number_heads x edge_features x 2 * output_features
W (torch.tensor): linear parameter; shape:
number_heads x edge_features x output_features x input_features
S (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
negative_slope (float): negative slope of the leaky relu (default: 0.2)
Outputs:
y: output; shape:
batch_size x number_heads x output_features x number_nodes
"""
B = x.shape[0] # batch_size
G = x.shape[1] # input_features
N = x.shape[2] # number_nodes
P = a.shape[0] # number_heads
E = a.shape[1] # edge_features
assert W.shape[0] == P
assert W.shape[1] == E
F = W.shape[2] # output_features
assert a.shape[2] == int(2*F)
G = W.shape[3] # input_features
assert S.shape[0] == E
assert S.shape[1] == S.shape[2] == N
# First, we need to learn the attention GSO
aij = learnAttentionGSO(x, a, W, S, negative_slope = negative_slope)
# B x P x E x N x N
# Then, we need to compute the high-level features
# W is of size P x E x F x G
# a is of size P x E x 2F
# Compute Wx for all nodes
x = x.reshape([B, 1, 1, G, N])
W = W.reshape([1, P, E, F, G])
Wx = torch.matmul(W, x) # B x P x E x F x N
# Finally, we just need to apply this matrix to the Wx which we have already
# computed, and done.
y = torch.matmul(Wx, S.reshape([1, 1, E, N, N]) * aij) # B x P x E x F x N
# And sum over all edges
return torch.sum(y, dim = 2) # B x P x F x N
class MaxLocalActivation(nn.Module):
# <NAME>, <EMAIL>, 2019/03/15
"""
MaxLocalActivation creates a localized activation function layer on graphs
Initialization:
MaxLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized max activation function layer
Add graph shift operator:
MaxLocalActivation.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MaxLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
maxNeighborhoodSizes = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S), k, outputType='matrix')
# compute the k-hop neighborhood
neighborhood.append(torch.tensor(thisNeighborhood))
maxNeighborhoodSizes.append(thisNeighborhood.shape[1])
self.maxNeighborhoodSizes = maxNeighborhoodSizes
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's it is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
x = x.unsqueeze(3) # B x F x N x 1
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
for k in range(1,self.K+1):
x_aux = x.repeat([1, 1, 1, self.maxNeighborhoodSizes[k-1]])
gatherNeighbor = self.neighborhood[k-1].reshape(
[1,
1,
self.N,
self.maxNeighborhoodSizes[k-1]]
)
gatherNeighbor = gatherNeighbor.repeat([batchSize,
dimNodeSignals,
1,
1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x_aux, 2, gatherNeighbor.long())
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
v = v.unsqueeze(3) # to concatenate with xK
xK = torch.cat((xK,v),3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# multiply each k-hop max by corresponding weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class MedianLocalActivation(nn.Module):
# <NAME>, <EMAIL>, 2019/03/27
"""
MedianLocalActivation creates a localized activation function layer on
graphs
Initialization:
MedianLocalActivation(K)
Inputs:
K (int): number of hops (>0)
Output:
torch.nn.Module for a localized median activation function layer
Add graph shift operator:
MedianLocalActivation.addGSO(GSO) Before applying the filter, we need
to define the GSO that we are going to use. This allows to change the
GSO while using the same filtering coefficients (as long as the number
of edge features is the same; but the number of nodes can change).
This function also calculates the 0-,1-,...,K-hop neighborhoods of every
node
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = MedianLocalActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self, K):
super().__init__()
assert K > 0 # range has to be greater than 0
self.K = K
self.S = None # no GSO assigned yet
self.N = None # no GSO assigned yet (N learned from the GSO)
self.neighborhood = 'None' # no neighborhoods calculated yet
self.masks = 'None' # no mask yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(1,self.K+1))
# Initialize parameters
self.reset_parameters()
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
neighborhood = []
for k in range(1,self.K+1):
# For each hop (0,1,...) in the range K
thisNeighborhood = graphTools.computeNeighborhood(
np.array(self.S), k, outputType='list')
# compute the k-hop neighborhood
neighborhood.append(thisNeighborhood)
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x N
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.N
xK = x # xK is a tensor aggregating the 0-hop (x), 1-hop, ..., K-hop
# max's
# It is initialized with the 0-hop neigh. (x itself)
xK = xK.unsqueeze(3) # extra dimension added for concatenation ahead
#x = x.unsqueeze(3) # B x F x N x 1
for k in range(1,self.K+1):
kHopNeighborhood = self.neighborhood[k-1]
# Fetching k-hop neighborhoods of all nodes
kHopMedian = torch.empty(0)
# Initializing the vector that will contain the k-hop median for
# every node
for n in range(self.N):
# Iterating over the nodes
# This step is necessary because here the neighborhoods are
# lists of lists. It is impossible to pad them and feed them as
# a matrix, as this would impact the outcome of the median
# operation
nodeNeighborhood = torch.tensor(np.array(kHopNeighborhood[n]))
neighborhoodLen = len(nodeNeighborhood)
gatherNode = nodeNeighborhood.reshape([1, 1, neighborhoodLen])
gatherNode = gatherNode.repeat([batchSize, dimNodeSignals, 1])
# Reshaping the node neighborhood for the gather operation
xNodeNeighbors = torch.gather(x, 2, gatherNode.long())
# Gathering signal values in the node neighborhood
nodeMedian,_ = torch.median(xNodeNeighbors, dim = 2,
keepdim=True)
# Computing the median in the neighborhood
kHopMedian = torch.cat([kHopMedian,nodeMedian],2)
# Concatenating k-hop medians node by node
kHopMedian = kHopMedian.unsqueeze(3) # Extra dimension for
# concatenation with the previous (k-1)-hop median tensor
xK = torch.cat([xK,kHopMedian],3)
out = torch.matmul(xK,self.weight.unsqueeze(2))
# Multiplying each k-hop median by corresponding trainable weight
out = out.reshape([batchSize,dimNodeSignals,self.N])
return out
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.K)
self.weight.data.uniform_(-stdv, stdv)
def extra_repr(self):
if self.neighborhood is not None:
reprString = "neighborhood stored"
else:
reprString = "NO neighborhood stored"
return reprString
class NoActivation(nn.Module):
"""
NoActivation creates an activation layer that does nothing
It is for completeness, to be able to switch between linear models
and nonlinear models, without altering the entire architecture model
Initialization:
NoActivation()
Output:
torch.nn.Module for an empty activation layer
Forward call:
y = NoActivation(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x number_nodes
Outputs:
y (torch.tensor): activated data; shape:
batch_size x dim_features x number_nodes
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x
def extra_repr(self):
reprString = "No Activation Function"
return reprString
class NoPool(nn.Module):
"""
This is a pooling layer that actually does no pooling. It has the same input
structure and methods of MaxPoolLocal() for consistency. Basically, this
allows us to change from pooling to no pooling without necessarily creating
a new architecture.
In any case, we're pretty sure this function should never ship, and pooling
can be avoided directly when defining the architecture.
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, GSO):
# This is necessary to keep the form of the other pooling strategies
# within the SelectionGNN framework. But we do not care about any GSO.
pass
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And do not do anything
return x
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
reprString += "no neighborhood needed"
return reprString
class MaxPoolLocal(nn.Module):
"""
MaxPoolLocal Creates a pooling layer on graphs by selecting nodes
Initialization:
MaxPoolLocal(in_dim, out_dim, number_hops)
Inputs:
in_dim (int): number of nodes at the input
out_dim (int): number of nodes at the output
number_hops (int): number of hops to pool information
Output:
torch.nn.Module for a local max-pooling layer.
Observation: The selected nodes for the output are always the top ones.
Add a neighborhood set:
Add graph shift operator:
GraphFilter.addGSO(GSO) Before being used, we need to define the GSO
that will determine the neighborhood that we are going to pool.
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
v = MaxPoolLocal(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x dim_features x in_dim
Outputs:
y (torch.tensor): pooled data; shape:
batch_size x dim_features x out_dim
"""
def __init__(self, nInputNodes, nOutputNodes, nHops):
super().__init__()
self.nInputNodes = nInputNodes
self.nOutputNodes = nOutputNodes
self.nHops = nHops
self.neighborhood = None
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N (And I don't care about E, because the
# computeNeighborhood function takes care of it)
self.N = S.shape[1]
assert S.shape[2] == self.N
# Get the device (before operating with S and losing it, it's cheaper
# to store the device now, than to duplicate S -i.e. keep a numpy and a
# tensor copy of S)
device = S.device
# Move the GSO to cpu and to np.array so it can be handled by the
# computeNeighborhood function
S = np.array(S.cpu())
# Compute neighborhood
neighborhood = graphTools.computeNeighborhood(S, self.nHops,
self.nOutputNodes,
self.nInputNodes,'matrix')
# And move the neighborhood back to a tensor
neighborhood = torch.tensor(neighborhood).to(device)
# The neighborhood matrix has to be a tensor of shape
# nOutputNodes x maxNeighborhoodSize
assert neighborhood.shape[0] == self.nOutputNodes
assert neighborhood.max() <= self.nInputNodes
# Store all the relevant information
self.maxNeighborhoodSize = neighborhood.shape[1]
self.neighborhood = neighborhood
def forward(self, x):
# x should be of shape batchSize x dimNodeSignals x nInputNodes
batchSize = x.shape[0]
dimNodeSignals = x.shape[1]
assert x.shape[2] == self.nInputNodes
# Check that there are at least the same number of nodes that
# we will keep (otherwise, it would be unpooling, instead of
# pooling)
assert x.shape[2] >= self.nOutputNodes
# And given that the self.neighborhood is already a torch.tensor matrix
# we can just go ahead and get it.
# So, x is of shape B x F x N. But we need it to be of shape
# B x F x N x maxNeighbor. Why? Well, because we need to compute the
# maximum between the value of each node and those of its neighbors.
# And we do this by applying a torch.max across the rows (dim = 3) so
# that we end up again with a B x F x N, but having computed the max.
# How to fill those extra dimensions? Well, what we have is neighborhood
# matrix, and we are going to use torch.gather to bring the right
# values (torch.index_select, while more straightforward, only works
# along a single dimension).
# Each row of the matrix neighborhood determines all the neighbors of
# each node: the first row contains all the neighbors of the first node,
# etc.
# The values of the signal at those nodes are contained in the dim = 2
# of x. So, just for now, let's ignore the batch and feature dimensions
# and imagine we have a column vector: N x 1. We have to pick some of
# the elements of this vector and line them up alongside each row
# so that then we can compute the maximum along these rows.
# When we torch.gather along dimension 0, we are selecting which row to
# pick according to each column. Thus, if we have that the first row
# of the neighborhood matrix is [1, 2, 0] means that we want to pick
# the value at row 1 of x, at row 2 of x in the next column, and at row
# 0 of the last column. For these values to be the appropriate ones, we
# have to repeat x as columns to build our b x F x N x maxNeighbor
# matrix.
x = x.unsqueeze(3) # B x F x N x 1
x = x.repeat([1, 1, 1, self.maxNeighborhoodSize]) # BxFxNxmaxNeighbor
# And the neighbors that we need to gather are the same across the batch
# and feature dimensions, so we need to repeat the matrix along those
# dimensions
gatherNeighbor = self.neighborhood.reshape([1, 1,
self.nOutputNodes,
self.maxNeighborhoodSize])
gatherNeighbor = gatherNeighbor.repeat([batchSize, dimNodeSignals, 1,1])
# And finally we're in position of getting all the neighbors in line
xNeighbors = torch.gather(x, 2, gatherNeighbor)
# B x F x nOutput x maxNeighbor
# Note that this gather function already reduces the dimension to
# nOutputNodes.
# And proceed to compute the maximum along this dimension
v, _ = torch.max(xNeighbors, dim = 3)
return v
def extra_repr(self):
reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % (
self.nInputNodes, self.nOutputNodes, self.nHops)
if self.neighborhood is not None:
reprString += "neighborhood stored"
else:
reprString += "NO neighborhood stored"
return reprString
class GraphFilter(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1, bias = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u = LSIGF(self.weight, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterRNN(nn.Module):
"""
GraphFilterRNN Creates a (linear) layer that applies a graph filter
with Hidden Markov Model
Initialization:
GraphFilterRNN(in_features, out_features, hidden_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
hidden_features (int): number of hidden features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G # in_features
self.F = F # out_features
self.H = H # hidden_features
self.K = K # filter_taps
self.E = E # edge_features
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
self.weight_U = nn.parameter.Parameter(torch.Tensor(F, E, K, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_U = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H * self.K)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_u = 1. / math.sqrt(self.H * self.K)
self.weight_U.data.uniform_(-stdv_u, stdv_u)
if self.bias_U is not None:
self.bias_U.data.uniform_(-stdv_u, stdv_u)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x, h):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N - Nin) \
.type(x.dtype).to(x.device)
), dim=2)
# Compute the filter output
u_a = LSIGF(self.weight_A, self.S, x, self.bias_A)
u_b = LSIGF(self.weight_B, self.S, h, self.bias_B)
h = u_a + u_b
u = LSIGF(self.weight_U, self.S, h, self.bias_U)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d" % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) + \
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class SpectralGF(nn.Module):
"""
SpectralGF Creates a (linear) layer that applies a LSI graph filter in the
spectral domain using a cubic spline if needed.
Initialization:
GraphFilter(in_features, out_features, filter_coeff,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_coeff (int): number of filter spectral coefficients
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer) implemented in the spectral domain.
Observation: Filter taps have shape
out_features x edge_features x in_features x filter_coeff
Add graph shift operator:
SpectralGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = SpectralGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, M, E = 1, bias = True):
# GSOs will be added later.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.M = M
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, G, M))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.M)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has to have 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S # Save S
# Now we need to compute the eigendecomposition and save it
# To compute the eigendecomposition, we use numpy.
# So, first, get S in numpy format.
Snp = np.array(S.data.cpu())
# We will compute the eigendecomposition for each edge feature, so we
# create the E x N x N space for V, VH and Lambda (we need lambda for
# the spline kernel)
V = np.zeros([self.E, self.N, self.N])
VH = np.zeros([self.E, self.N, self.N])
Lambda = np.zeros([self.E, self.N])
# Here we save the resulting spline kernel matrix
splineKernel = np.zeros([self.E, self.N, self.M])
for e in range(self.E):
# Compute the eigendecomposition
Lambda[e,:], V[e,:,:] = np.linalg.eig(Snp[e,:,:])
# Compute the hermitian
VH[e,:,:] = V[e,:,:].conj().T
# Compute the splineKernel basis matrix
splineKernel[e,:,:] = graphTools.splineBasis(self.M, Lambda[e,:])
# Transform everything to tensors of appropriate type on appropriate
# device, and store them.
self.V = torch.tensor(V).type(S.dtype).to(S.device) # E x N x N
self.VH = torch.tensor(VH).type(S.dtype).to(S.device) # E x N x N
self.splineKernel = torch.tensor(splineKernel)\
.type(S.dtype).to(S.device)
# E x N x M
# Once we have computed the splineKernel, we do not need to save the
# eigenvalues.
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# Check if we have enough spectral filter coefficients as needed, or if
# we need to fill out the rest using the spline kernel.
if self.M == self.N:
self.h = self.weight # F x E x G x N (because N = M)
else:
# Adjust dimensions for proper algebraic matrix multiplication
splineKernel = self.splineKernel.reshape([1,self.E,self.N,self.M])
# We will multiply a 1 x E x N x M matrix with a F x E x M x G
# matrix to get the proper F x E x N x G coefficients
self.h = torch.matmul(splineKernel, self.weight.permute(0,1,3,2))
# And now we rearrange it to the same shape that the function takes
self.h = self.h.permute(0,1,3,2) # F x E x G x N
# And now we add the zero padding (if this comes from a pooling
# operation)
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = spectralGF(self.h, self.V, self.VH, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class NodeVariantGF(nn.Module):
"""
NodeVariantGF Creates a filtering layer that applies a node-variant graph
filter
Initialization:
NodeVariantGF(in_features, out_features, shift_taps, node_taps
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
shift_taps (int): number of filter taps for shifts
node_taps (int): number of filter taps for nodes
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer using node-variant graph
filters.
Observation: Filter taps have shape
out_features x edge_features x shift_taps x in_features x node_taps
Add graph shift operator:
NodeVariantGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = NodeVariantGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, M, E = 1, bias = True):
# G: Number of input features
# F: Number of output features
# K: Number of filter shift taps
# M: Number of filter node taps
# GSOs will be added later.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.M = M
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G, M))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K * self.M)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
npS = np.array(S.data.cpu()) # Save the GSO as a numpy array because we
# are going to compute the neighbors.
# And now we have to fill up the parameter vector, from M to N
if self.M < self.N:
# The first elements of M (ordered with whatever order we want)
# are the ones associated to independent node taps.
copyNodes = [m for m in range(self.M)]
# The rest of the nodes will copy one of these M node taps.
# The way we do this is: if they are connected to one of the M
# indepdendent nodes, just copy it. If they are not connected,
# look at the neighbors, neighbors, and so on, until we reach one
# of the independent nodes.
# Ties are broken by selecting the node with the smallest index
# (which, due to the ordering, is the most important node of all
# the available ones)
neighborList = graphTools.computeNeighborhood(npS, 1,
nb = self.M)
# This gets the list of 1-hop neighbors for all nodes.
# Find the nodes that have no neighbors
nodesWithNoNeighbors = [n for n in range(self.N) \
if len(neighborList[n]) == 0]
# If there are still nodes that didn't find a neighbor
K = 1 # K-hop neighbor we have looked so far
while len(nodesWithNoNeighbors) > 0:
# Looks for the next hop
K += 1
# Get the neigbors one further hop away
thisNeighborList = graphTools.computeNeighborhood(npS,
K,
nb = self.M)
# Check if we now have neighbors for those that didn't have
# before
for n in nodesWithNoNeighbors:
# Get the neighbors of the node
thisNodeList = thisNeighborList[n]
# If there are neighbors
if len(thisNodeList) > 0:
# Add them to the list
neighborList[n] = thisNodeList
# Recheck if all nodes have non-empty neighbors
nodesWithNoNeighbors = [n for n in range(self.N) \
if len(neighborList[n]) == 0]
# Now we have obtained the list of independent nodes connected to
# all nodes, we keep the one with highest score. And since the
# matrix is already properly ordered, this means keeping the
# smallest index in the neighborList.
for m in range(self.M, self.N):
copyNodes.append(min(neighborList[m]))
# And, finally create the indices of nodes to copy
self.copyNodes = torch.tensor(copyNodes).to(S.device)
elif self.M == self.N:
# In this case, all parameters go into the vector h
self.copyNodes = torch.arange(self.M).to(S.device)
else:
# This is the rare case in which self.M < self.N, for example, if
# we train in a larger network and deploy in a smaller one. Since
# the matrix is ordered by score, we just keep the first N
# weights
self.copyNodes = torch.arange(self.N).to(S.device)
# OBS.: self.weight is updated on each training step, so we cannot
# define the self.h vector (i.e. the vector with N elements) here,
# because otherwise it wouldn't be updated every time. So we need, in
# the for, to use index_select on the actual weights, to create the
# vector h that is later feed into the NVGF computation.
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# If we have less filter coefficients than the required ones, we need
# to use the copying scheme
if self.M == self.N:
self.h = self.weight
else:
self.h = torch.index_select(self.weight, 4, self.copyNodes)
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = NVGF(self.h, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "shift_taps=%d, node_taps=%d, " % (
self.K, self.M) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class EdgeVariantGF(nn.Module):
"""
EdgeVariantGF Creates a (linear) layer that applies an edge-variant graph
filter using the masking approach. If less nodes than the total number
of nodes are selected, then the remaining nodes adopt an LSI filter
(i.e. it becomes a hybrid edge-variant grpah filter)
Initialization:
EdgeVariantGF(in_features, out_features, shift_taps,
selected_nodes, number_nodes,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
shift_taps (int): number of shifts to consider
selected_nodes (int): number of selected nodes to implement the EV
part of the filter
number_nodes (int): number of nodes
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer using hybrid
edge-variant graph filters.
Observation: Filter taps have shape
out_features x edge_features x shift_taps x in_features
x number_nodes x number_nodes
These weights are masked by the corresponding sparsity pattern of
the graph and the desired number of selected nodes, so only weights
in the nonzero edges of these nodes will be trained, the
rest of the parameters contain trash. Therefore, the number of
parameters will not reflect the actual number of parameters being
trained.
Add graph shift operator:
EdgeVariantGF.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = EdgeVariantGF(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, M, N, E=1, bias = True):
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.M = M # Number of selected nodes
self.N = N # Total number of nodes
self.S = None
# Create parameters for the Edge-Variant part:
self.weightEV = nn.parameter.Parameter(torch.Tensor(F, E, K, G, N, N))
# If we want a hybrid, create parameters
if self.M < self.N:
self.weightLSI = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
else:
self.register_parameter('weightLSI', None)
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K * self.N)
self.weightEV.data.uniform_(-stdv, stdv)
if self.weightLSI is not None:
self.weightLSI.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S # Save the GSO
# Get the identity matrix across all edge features
multipleIdentity = torch.eye(self.N).reshape([1, self.N, self.N])\
.repeat(self.E, 1, 1).to(S.device)
# Compute the nonzero elements of S+I_{N}
sparsityPattern = ((torch.abs(S) + multipleIdentity) > zeroTolerance)
# Change from byte tensors to float tensors (or the same type of data as
# the GSO)
sparsityPattern = sparsityPattern.type(S.dtype)
# But now we need to kill everything that is between elements M and N
# (only if M < N)
if self.M < self.N:
# Create the ones in the row
hybridMaskOnesRows = torch.ones([self.M, self.N])
# Create the ones int he columns
hybridMaskOnesCols = torch.ones([self.N - self.M, self.M])
# Create the zeros
hybridMaskZeros = torch.zeros([self.N - self.M, self.N - self.M])
# Concatenate the columns
hybridMask = torch.cat((hybridMaskOnesCols,hybridMaskZeros), dim=1)
# Concatenate the rows
hybridMask = torch.cat((hybridMaskOnesRows,hybridMask), dim=0)
else:
hybridMask = torch.ones([self.N, self.N])
# Now that we have the hybrid mask, we need to mask the sparsityPattern
# we got so far
hybridMask = hybridMask.reshape([1, self.N, self.N]).to(S.device)
# 1 x N x N
sparsityPattern = sparsityPattern * hybridMask
self.sparsityPattern = sparsityPattern.to(S.device)
# E x N x N
# This gives the sparsity pattern for each edge feature
# Now, let's create it of the right shape, so we do not have to go
# around wasting time with reshapes when called in the forward
# The weights have shape F x E x K x G x N x N
# The sparsity pattern has shape E x N x N. And we want to make it
# 1 x E x K x 1 x N x N. The K dimension is to guarantee that for k=0
# we have the identity
multipleIdentity = (multipleIdentity * hybridMask)\
.reshape([1, self.E, 1, 1, self.N, self.N])
# This gives a 1 x E x 1 x 1 x N x N identity matrix
sparsityPattern = sparsityPattern\
.reshape([1, self.E, 1, 1, self.N, self.N])
# This gives a 1 x E x 1 x 1 x N x N sparsity pattern matrix
sparsityPattern = sparsityPattern.repeat(1, 1, self.K-1, 1, 1, 1)
# This repeats the sparsity pattern K-1 times giving a matrix of shape
# 1 x E x (K-1) x 1 x N x N
sparsityPattern = torch.cat((multipleIdentity,sparsityPattern), dim = 2)
# This sholud give me a 1 x E x K x 1 x N x N matrix with the identity
# in the first element
self.sparsityPatternFull = sparsityPattern.type(S.dtype).to(S.device)
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# Mask the parameters
self.Phi = self.weightEV * self.sparsityPatternFull
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output for the EV part
uEV = EVGF(self.Phi, x, self.bias)
# Check if we need an LSI part
if self.M < self.N:
# Compute the filter output for the LSI part
uLSI = LSIGF(self.weightLSI, self.S, x, self.bias)
else:
# If we don't, just add zero
uLSI = torch.tensor(0., dtype = uEV.dtype).to(uEV.device)
# Add both
u = uEV + uLSI
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "shift_taps=%d, " % (
self.K) + \
"selected_nodes=%d, " % (self.M) +\
"number_nodes=%d, " % (self.N) +\
"edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphAttentional(nn.Module):
"""
GraphAttentional Creates a graph attentional layer
Initialization:
GraphAttentional(in_features, out_features, attention_heads,
edge_features=1, nonlinearity=nn.functional.relu,
concatenate=True)
Inputs:
in_features (int): number of input features on top of each node
out_features (int): number of output features on top of each node
attention_heads (int): number of attention_heads
edge_features (int): number of features on top of each edge
(default: 1)
nonlinearity (nn.functional): nonlinearity applied after features
have been updated through attention (default:nn.functional.relu)
concatenate (bool): If True, the output of the attention_heads
attention heads are concatenated to form the output features, if
False, they are averaged (default: True)
Output:
torch.nn.Module for a graph attentional layer.
Add graph shift operator:
GraphAttentional.addGSO(GSO) Before applying the filter, we need to
define the GSO that we are going to use. This allows to change the GSO
while using the same filtering coefficients (as long as the number of
edge features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
Forward call:
y = GraphAttentional(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1,
nonlinearity = nn.functional.relu, concatenate = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
self.nonlinearity = nonlinearity
self.concatenate = concatenate
# Create parameters:
self.mixer = nn.parameter.Parameter(torch.Tensor(K, E, 2*F))
self.weight = nn.parameter.Parameter(torch.Tensor(K, E, F, G))
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
self.mixer.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 3 dimensions.
assert len(S.shape) == 3
# S is of shape E x N x N
assert S.shape[0] == self.E
self.N = S.shape[1]
assert S.shape[2] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# And get the graph attention output
y = graphAttention(x, self.mixer, self.weight, self.S)
# This output is of size B x K x F x N. Now, we can either concatenate
# them (inner layers) or average them (outer layer)
if self.concatenate:
# When we concatenate we first apply the nonlinearity
y = self.nonlinearity(y)
# Concatenate: Make it B x KF x N such that first iterates over f
# and then over k: (k=0,f=0), (k=0,f=1), ..., (k=0,f=F-1), (k=1,f=0),
# (k=1,f=1), ..., etc.
y = y.permute(0, 3, 1, 2)\
.reshape([B, self.N, self.K*self.F])\
.permute(0, 2, 1)
else:
# When we don't, we first average
y = torch.mean(y, dim = 1) # B x F x N
# And then we apply the nonlinearity
y = self.nonlinearity(y)
if Nin < self.N:
y = torch.index_select(y, 2, torch.arange(Nin).to(y.device))
return y
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "attention_heads=%d, " % (
self.K) + "edge_features=%d, " % (self.E)
if self.S is not None:
reprString += "GSO stored: number_nodes=%d" % (self.N)
else:
reprString += "no GSO stored"
return reprString
def matrixPowersBatch(S, K):
"""
matrixPowers(A_b, K) Computes the matrix powers A_b^k for k = 0, ..., K-1
for each A_b in b = 1, ..., B.
Inputs:
A (tensor): Matrices to compute powers. It can be either a single matrix
per batch element: shape batch_size x number_nodes x number_nodes
or contain edge features: shape
batch_size x edge_features x number_nodes x number_nodes
K (int): maximum power to be computed (up to K-1)
Outputs:
AK: either a collection of K matrices B x K x N x N (if the input was a
single matrix) or a collection B x E x K x N x N (if the input was a
collection of E matrices).
"""
# S can be either a single GSO (N x N) or a collection of GSOs (E x N x N)
if len(S.shape) == 3:
B = S.shape[0]
N = S.shape[1]
assert S.shape[2] == N
E = 1
S = S.unsqueeze(1)
scalarWeights = True
elif len(S.shape) == 4:
B = S.shape[0]
E = S.shape[1]
N = S.shape[2]
assert S.shape[3] == N
scalarWeights = False
# Now, let's build the powers of S:
thisSK = torch.eye(N).repeat([B, E, 1, 1]).to(S.device)
SK = thisSK.unsqueeze(2)
for k in range(1, K):
thisSK = torch.matmul(thisSK, S)
SK = torch.cat((SK, thisSK.unsqueeze(2)), dim=2)
# Take out the first dimension if it was a single GSO
if scalarWeights:
SK = SK.squeeze(1)
return SK
def batchLSIGF(h, SK, x, bias=None):
"""
batchLSIGF(filter_taps, GSO_K, input, bias=None) Computes the output of a
linear shift-invariant graph filter on input and then adds bias.
In this case, we consider that there is a separate GSO to be used for each
of the signals in the batch. In other words, SK[b] is applied when filtering
x[b] as opposed to applying the same SK to all the graph signals in the
batch.
Inputs:
filter_taps: vector of filter taps; size:
output_features x edge_features x filter_taps x input_features
GSO_K: collection of matrices; size:
batch_size x edge_features x filter_taps x number_nodes x number_nodes
input: input signal; size:
batch_size x input_features x number_nodes
bias: size: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; size:
batch_size x output_features x number_nodes
"""
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
B = SK.shape[0]
assert SK.shape[1] == E
assert SK.shape[2] == K
N = SK.shape[3]
assert SK.shape[4] == N
assert x.shape[0] == B
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation I've been using:
# h in F x E x K x G
# SK in B x E x K x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
SK = SK.permute(1, 2, 0, 3, 4)
# Now, SK is of shape E x K x B x N x N so that we can multiply by x of
# size B x G x N to get
z = torch.matmul(x, SK)
# which is of size E x K x B x G x N.
# Now, we have already carried out the multiplication across the dimension
# of the nodes. Now we need to focus on the K, F, G.
# Let's start by putting B and N in the front
z = z.permute(2, 4, 0, 1, 3).reshape([B, N, E * K * G])
# so that we get z in B x N x EKG.
# Now adjust the filter taps so they are of the form EKG x F
h = h.reshape([F, G * E * K]).permute(1, 0)
# Multiply
y = torch.matmul(z, h)
# to get a result of size B x N x F. And permute
y = y.permute(0, 2, 1)
# to get it back in the right order: B x F x N.
# Now, in this case, each element x[b,:,:] has adequately been filtered by
# the GSO S[b,:,:,:]
if bias is not None:
y = y + bias
return y
class GraphFilterBatchGSO(GraphFilter):
"""
GraphFilterBatchGSO Creates a (linear) layer that applies a graph filter
with a different GSO for each signal in the batch.
This function is typically useful when not only the graph signal is changed
during training, but also the GSO. That is, each data point in the batch is
of the form (x_b,S_b) for b = 1,...,B instead of just x_b. The filter
coefficients are still the same being applied to all graph filters, but both
the GSO and the graph signal are different for each datapoint in the batch.
Initialization:
GraphFilterBatchGSO(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilterBatchGSO.addGSO(GSO) Before applying the filter, we need to
define the GSOs that we are going to use for each element of the batch.
Each GSO has to have the same number of edges, but the number of nodes
can change.
Inputs:
GSO (tensor): collection of graph shift operators; size can be
batch_size x number_nodes x number_nodes, or
batch_size x edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilterBatchGSO(x)
Inputs:
x (tensor): input data; size: batch_size x in_features x number_nodes
Outputs:
y (tensor): output; size: batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__(G, F, K, E, bias)
def addGSO(self, S):
# So, we have to take into account the situation where S is either
# B x N x N or B x E x N x N. No matter what, we're always handling,
# internally the dimension E. So if the input is B x N x N, we have to
# unsqueeze it so it becomes B x 1 x N x N.
if len(S.shape) == 3 and S.shape[1] == S.shape[2]:
self.S = S.unsqueeze(1)
elif len(S.shape) == 4 and S.shape[1] == self.E \
and S.shape[2] == S.shape[3]:
self.S = S
else:
# TODO: print error
pass
self.N = self.S.shape[2]
self.B = self.S.shape[0]
self.SK = matrixPowersBatch(self.S, self.K)
def forward(self, x):
# TODO: If S (and consequently SK) hasn't been defined, print an error.
return batchLSIGF(self.weight, self.SK, x, self.bias)
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) + \
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored: number_nodes=%d, batch_size=%d" % (
self.N, self.B)
else:
reprString += "no GSO stored"
return reprString
def BatchLSIGF(h, S, x, b=None):
"""
LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear
shift-invariant graph filter on input and then adds bias.
Denote as G the number of input features, F the number of output features,
E the number of edge features, K the number of filter taps, N the number of
nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the
input data where x_{g} in R^{N} is the graph signal representing feature
g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the
bias for feature f.
Then, the LSI-GF is computed as
y_{f} = \sum_{e=1}^{E}
\sum_{k=0}^{K-1}
\sum_{g=1}^{G}
[h_{f,g,e}]_{k} S_{e}^{k} x_{g}
+ b_{f}
for f = 1, ..., F.
Inputs:
filter_taps (torch.tensor): array of filter taps; shape:
output_features x edge_features x filter_taps x input_features
GSO (torch.tensor): graph shift operator; shape:
edge_features x number_nodes x number_nodes
input (torch.tensor): input signal; shape:
batch_size x input_features x number_nodes
bias (torch.tensor): shape: output_features x number_nodes
if the same bias is to be applied to all nodes, set number_nodes = 1
so that b_{f} vector becomes b_{f} \mathbf{1}_{N}
Outputs:
output: filtered signals; shape:
batch_size x output_features x number_nodes
"""
# The basic idea of what follows is to start reshaping the input and the
# GSO so the filter coefficients go just as a very plain and simple
# linear operation, so that all the derivatives and stuff on them can be
# easily computed.
# h is output_features x edge_weights x filter_taps x input_features
# S is edge_weighs x number_nodes x number_nodes
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
F = h.shape[0]
E = h.shape[1]
K = h.shape[2]
G = h.shape[3]
assert S.shape[1] == E
N = S.shape[2]
assert S.shape[3] == N
B = x.shape[0]
assert x.shape[1] == G
assert x.shape[2] == N
# Or, in the notation we've been using:
# h in F x E x K x G
# S in B x E x N x N
# x in B x G x N
# b in F x N
# y in B x F x N
# Now, we have x in B x G x N and S in B x E x N x N, and we want to come up
# with matrix multiplication that yields z = x * S with shape
# B x E x K x G x N.
# For this, we first add the corresponding dimensions
x = x.reshape([B, 1, G, N])
# print(S)
S = S.reshape([B, E, N, N])
z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0
# We need to repeat along the E dimension, because for k=0, S_{e} = I for
# all e, and therefore, the same signal values have to be used along all
# edge feature dimensions.
for k in range(1,K):
x = torch.matmul(x, S.float()) # B x E x G x N
xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N
z = torch.cat((z, xS), dim = 2) # B x E x k x G x N
# This output z is of size B x E x K x G x N
# Now we have the x*S_{e}^{k} product, and we need to multiply with the
# filter taps.
# We multiply z on the left, and h on the right, the output is to be
# B x N x F (the multiplication is not along the N dimension), so we reshape
# z to be B x N x E x K x G and reshape it to B x N x EKG (remember we
# always reshape the last dimensions), and then make h be E x K x G x F and
# reshape it to EKG x F, and then multiply
y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E*K*G]),
h.reshape([F, E*K*G]).permute(1, 0)).permute(0, 2, 1)
# And permute againt to bring it from B x N x F to B x F x N.
# Finally, add the bias
if b is not None:
y = y + b
return y
class GraphFilterBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, F, K, E = 1, bias = True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G))
if bias:
self.bias = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv = 1. / math.sqrt(self.G * self.K)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u = BatchLSIGF(self.weight, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, " % (
self.G, self.F) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterRNNBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, E, K, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H * self.K)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H * self.K)
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
# def initialize_hidden(self):
# # the weights are of the form (nb_layers, batch_size, nb_lstm_units)
# hidden = torch.zeros(self.config.batch_size, self.F, self.numAgents)
#
# self.hiddenstateGPU = hidden.to(self.config.device)
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def detachHiddenState(self):
# tensor.detach() creates a tensor that shares storage with tensor that does not require grad.
# You should use detach() when attempting to remove a tensor from a computation graph
#https://discuss.pytorch.org/t/clone-and-detach-in-v0-4-0/16861/4
self.hiddenState.detach_()
self.hiddenStateNext.detach_()
pass
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A)
u_b = BatchLSIGF(self.weight_B, self.S, self.hiddenState, self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
u = BatchLSIGF(self.weight_D, self.S, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
def torchpermul(h, x, b=None):
# h is output_features x edge_weights x filter_taps x input_features
# x is batch_size x input_features x number_nodes
# b is output_features x number_nodes
# Output:
# y is batch_size x output_features x number_nodes
# Get the parameter numbers:
# in the notation we've been using:
# h in G x H
# x in B x H x N
# b in G x N
# y in B x G x N
# Now, we have x in B x H x N and h in G x H
# B x N x H with H x G -> B x N x G -> B x G x N
y = torch.mul(x.permute(0, 2, 1), h.permute(1, 0)).permute(0, 2, 1) # B x G x N
# Finally, add the bias
if b is not None:
y = y + b
return y
class GraphFilterMoRNNBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# todo: check initialize weight
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H )
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) # B x H x n
# u_b = torch.mul(self.hiddenState.permute(0,2,1), self.weight_B.permute(1, 0)).permute(0,2,1) + self.bias_B # B x H x n
u_b = torchpermul(self.weight_B,self.hiddenState,self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
# v1
# u = torch.mul(self.weight_D, self.hiddenState) + self.bias_D
# v2
# u = torch.mul(u_a.permute(0,2,1), self.weight_D.permute(1, 0)).permute(0,2,1) + self.bias_D
u = torchpermul(self.weight_D, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
class GraphFilterL2ShareBatch(nn.Module):
"""
GraphFilter Creates a (linear) layer that applies a graph filter
Initialization:
GraphFilter(in_features, out_features, filter_taps,
edge_features=1, bias=True)
Inputs:
in_features (int): number of input features (each feature is a graph
signal)
out_features (int): number of output features (each feature is a
graph signal)
filter_taps (int): number of filter taps
edge_features (int): number of features over each edge
bias (bool): add bias vector (one bias per feature) after graph
filtering
Output:
torch.nn.Module for a graph filtering layer (also known as graph
convolutional layer).
Observation: Filter taps have shape
out_features x edge_features x filter_taps x in_features
Add graph shift operator:
GraphFilter.addGSO(GSO) Before applying the filter, we need to define
the GSO that we are going to use. This allows to change the GSO while
using the same filtering coefficients (as long as the number of edge
features is the same; but the number of nodes can change).
Inputs:
GSO (torch.tensor): graph shift operator; shape:
Batch edge_features x number_nodes x number_nodes
Forward call:
y = GraphFilter(x)
Inputs:
x (torch.tensor): input data; shape:
batch_size x in_features x number_nodes
Outputs:
y (torch.tensor): output; shape:
batch_size x out_features x number_nodes
"""
def __init__(self, G, H, F, K, E=1, bias=True):
# K: Number of filter taps
# GSOs will be added later.
# This combines both weight scalars and weight vectors.
# Bias will always be shared and scalar.
# Initialize parent
super().__init__()
# Save parameters:
self.G = G
self.F = F
self.H = H # hidden_features
self.K = K
self.E = E
self.S = None # No GSO assigned yet
# Create parameters:
self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G))
self.weight_B = nn.parameter.Parameter(torch.Tensor(H, H))
self.weight_D = nn.parameter.Parameter(torch.Tensor(F, H))
if bias:
self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1))
self.bias_D = nn.parameter.Parameter(torch.Tensor(F, 1))
else:
self.register_parameter('bias', None)
# Initialize parameters
self.reset_parameters()
def reset_parameters(self):
# todo: check initialize weight
# Taken from _ConvNd initialization of parameters:
stdv_a = 1. / math.sqrt(self.G * self.K)
self.weight_A.data.uniform_(-stdv_a, stdv_a)
if self.bias_A is not None:
self.bias_A.data.uniform_(-stdv_a, stdv_a)
stdv_b = 1. / math.sqrt(self.H)
self.weight_B.data.uniform_(-stdv_b, stdv_b)
if self.bias_B is not None:
self.bias_B.data.uniform_(-stdv_b, stdv_b)
stdv_d = 1. / math.sqrt(self.H )
self.weight_D.data.uniform_(-stdv_d, stdv_d)
if self.bias_D is not None:
self.bias_D.data.uniform_(-stdv_d, stdv_d)
def addGSO(self, S):
# Every S has 4 dimensions.
assert len(S.shape) == 4
# S is of shape B x E x N x N
assert S.shape[1] == self.E
self.N = S.shape[2]
assert S.shape[3] == self.N
self.S = S
def updateHiddenState(self, hiddenState):
self.hiddenState = hiddenState
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# And now we add the zero padding
if Nin < self.N:
x = torch.cat((x,
torch.zeros(B, F, self.N-Nin)\
.type(x.dtype).to(x.device)
), dim = 2)
# Compute the filter output
u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) # B x H x n
u_b = torchpermul(self.weight_B, self.hiddenState, self.bias_B)
sigma = nn.ReLU(inplace=True)
# sigma = nn.Tanh()
self.hiddenStateNext = sigma(u_a + u_b)
# u = torch.mul(u_a.permute(0,2,1), self.weight_D.permute(1, 0)).permute(0,2,1) + self.bias_D
u = torchpermul(self.weight_D, self.hiddenStateNext, self.bias_D)
self.updateHiddenState(self.hiddenStateNext)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
def extra_repr(self):
reprString = "in_features=%d, out_features=%d, hidden_features=%d, " % (
self.G, self.F, self.H) + "filter_taps=%d, " % (
self.K) + "edge_features=%d, " % (self.E) +\
"bias=%s, " % (self.bias_D is not None)
if self.S is not None:
reprString += "GSO stored"
else:
reprString += "no GSO stored"
return reprString
|
[
"torch.eye",
"torch.empty",
"torch.cat",
"torch.arange",
"torch.nn.functional.leaky_relu",
"torch.ones",
"torch.median",
"torch.gather",
"numpy.linalg.eig",
"torch.Tensor",
"utils.graphUtils.graphTools.splineBasis",
"torch.zeros",
"torch.matmul",
"utils.graphUtils.graphTools.computeNeighborhood",
"torch.mean",
"math.sqrt",
"torch.max",
"torch.sum",
"torch.nn.ReLU",
"numpy.zeros",
"torch.nn.functional.softmax",
"torch.index_select",
"numpy.array",
"torch.abs",
"torch.tensor"
] |
[((10592, 10617), 'torch.matmul', 'torch.matmul', (['Vdiagh', 'VHx'], {}), '(Vdiagh, VHx)\n', (10604, 10617), False, 'import torch\n'), ((10781, 10800), 'torch.sum', 'torch.sum', (['y'], {'dim': '(3)'}), '(y, dim=3)\n', (10790, 10800), False, 'import torch\n'), ((10844, 10863), 'torch.sum', 'torch.sum', (['y'], {'dim': '(2)'}), '(y, dim=2)\n', (10853, 10863), False, 'import torch\n'), ((14704, 14724), 'torch.sum', 'torch.sum', (['zh'], {'dim': '(4)'}), '(zh, dim=4)\n', (14713, 14724), False, 'import torch\n'), ((14748, 14767), 'torch.sum', 'torch.sum', (['y'], {'dim': '(3)'}), '(y, dim=3)\n', (14757, 14767), False, 'import torch\n'), ((14791, 14810), 'torch.sum', 'torch.sum', (['y'], {'dim': '(2)'}), '(y, dim=2)\n', (14800, 14810), False, 'import torch\n'), ((18115, 18134), 'torch.matmul', 'torch.matmul', (['Sk', 'x'], {}), '(Sk, x)\n', (18127, 18134), False, 'import torch\n'), ((18995, 19014), 'torch.sum', 'torch.sum', (['z'], {'dim': '(4)'}), '(z, dim=4)\n', (19004, 19014), False, 'import torch\n'), ((19042, 19061), 'torch.sum', 'torch.sum', (['z'], {'dim': '(3)'}), '(z, dim=3)\n', (19051, 19061), False, 'import torch\n'), ((19089, 19108), 'torch.sum', 'torch.sum', (['z'], {'dim': '(2)'}), '(z, dim=2)\n', (19098, 19108), False, 'import torch\n'), ((22006, 22024), 'torch.matmul', 'torch.matmul', (['W', 'x'], {}), '(W, x)\n', (22018, 22024), False, 'import torch\n'), ((22964, 23024), 'torch.nn.functional.leaky_relu', 'nn.functional.leaky_relu', (['aWx'], {'negative_slope': 'negative_slope'}), '(aWx, negative_slope=negative_slope)\n', (22988, 23024), True, 'import torch.nn as nn\n'), ((23790, 23850), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['(eij * maskEdges - infinityMask)'], {'dim': '(4)'}), '(eij * maskEdges - infinityMask, dim=4)\n', (23811, 23850), True, 'import torch.nn as nn\n'), ((26780, 26798), 'torch.matmul', 'torch.matmul', (['W', 'x'], {}), '(W, x)\n', (26792, 26798), False, 'import torch\n'), ((27050, 27069), 'torch.sum', 'torch.sum', (['y'], {'dim': '(2)'}), '(y, dim=2)\n', (27059, 27069), False, 'import torch\n'), ((89696, 89715), 'torch.matmul', 'torch.matmul', (['x', 'SK'], {}), '(x, SK)\n', (89708, 89715), False, 'import torch\n'), ((90179, 90197), 'torch.matmul', 'torch.matmul', (['z', 'h'], {}), '(z, h)\n', (90191, 90197), False, 'import torch\n'), ((4612, 4630), 'torch.matmul', 'torch.matmul', (['x', 'S'], {}), '(x, S)\n', (4624, 4630), False, 'import torch\n'), ((4719, 4744), 'torch.cat', 'torch.cat', (['(z, xS)'], {'dim': '(2)'}), '((z, xS), dim=2)\n', (4728, 4744), False, 'import torch\n'), ((14016, 14036), 'torch.matmul', 'torch.matmul', (['xr', 'Sr'], {}), '(xr, Sr)\n', (14028, 14036), False, 'import torch\n'), ((14126, 14151), 'torch.cat', 'torch.cat', (['(z, xS)'], {'dim': '(2)'}), '((z, xS), dim=2)\n', (14135, 14151), False, 'import torch\n'), ((18697, 18716), 'torch.matmul', 'torch.matmul', (['Sk', 'x'], {}), '(Sk, x)\n', (18709, 18716), False, 'import torch\n'), ((18918, 18943), 'torch.cat', 'torch.cat', (['(z, Sx)'], {'dim': '(2)'}), '((z, Sx), dim=2)\n', (18927, 18943), False, 'import torch\n'), ((23414, 23431), 'torch.abs', 'torch.abs', (['S.data'], {}), '(S.data)\n', (23423, 23431), False, 'import torch\n'), ((43556, 43653), 'utils.graphUtils.graphTools.computeNeighborhood', 'graphTools.computeNeighborhood', (['S', 'self.nHops', 'self.nOutputNodes', 'self.nInputNodes', '"""matrix"""'], {}), "(S, self.nHops, self.nOutputNodes, self.\n nInputNodes, 'matrix')\n", (43586, 43653), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((47150, 47184), 'torch.gather', 'torch.gather', (['x', '(2)', 'gatherNeighbor'], {}), '(x, 2, gatherNeighbor)\n', (47162, 47184), False, 'import torch\n'), ((47406, 47434), 'torch.max', 'torch.max', (['xNeighbors'], {'dim': '(3)'}), '(xNeighbors, dim=3)\n', (47415, 47434), False, 'import torch\n'), ((60892, 60926), 'numpy.zeros', 'np.zeros', (['[self.E, self.N, self.N]'], {}), '([self.E, self.N, self.N])\n', (60900, 60926), True, 'import numpy as np\n'), ((60940, 60974), 'numpy.zeros', 'np.zeros', (['[self.E, self.N, self.N]'], {}), '([self.E, self.N, self.N])\n', (60948, 60974), True, 'import numpy as np\n'), ((60992, 61018), 'numpy.zeros', 'np.zeros', (['[self.E, self.N]'], {}), '([self.E, self.N])\n', (61000, 61018), True, 'import numpy as np\n'), ((61100, 61134), 'numpy.zeros', 'np.zeros', (['[self.E, self.N, self.M]'], {}), '([self.E, self.N, self.M])\n', (61108, 61134), True, 'import numpy as np\n'), ((79414, 79467), 'torch.cat', 'torch.cat', (['(multipleIdentity, sparsityPattern)'], {'dim': '(2)'}), '((multipleIdentity, sparsityPattern), dim=2)\n', (79423, 79467), False, 'import torch\n'), ((87762, 87785), 'torch.matmul', 'torch.matmul', (['thisSK', 'S'], {}), '(thisSK, S)\n', (87774, 87785), False, 'import torch\n'), ((97590, 97615), 'torch.cat', 'torch.cat', (['(z, xS)'], {'dim': '(2)'}), '((z, xS), dim=2)\n', (97599, 97615), False, 'import torch\n'), ((107810, 107831), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (107817, 107831), True, 'import torch.nn as nn\n'), ((114139, 114160), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (114146, 114160), True, 'import torch.nn as nn\n'), ((119852, 119873), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (119859, 119873), True, 'import torch.nn as nn\n'), ((9427, 9446), 'torch.matmul', 'torch.matmul', (['VH', 'x'], {}), '(VH, x)\n', (9439, 9446), False, 'import torch\n'), ((28623, 28650), 'torch.Tensor', 'torch.Tensor', (['(1)', '(self.K + 1)'], {}), '(1, self.K + 1)\n', (28635, 28650), False, 'import torch\n'), ((33141, 33169), 'torch.max', 'torch.max', (['xNeighbors'], {'dim': '(3)'}), '(xNeighbors, dim=3)\n', (33150, 33169), False, 'import torch\n'), ((33245, 33266), 'torch.cat', 'torch.cat', (['(xK, v)', '(3)'], {}), '((xK, v), 3)\n', (33254, 33266), False, 'import torch\n'), ((33575, 33592), 'math.sqrt', 'math.sqrt', (['self.K'], {}), '(self.K)\n', (33584, 33592), False, 'import math\n'), ((35550, 35577), 'torch.Tensor', 'torch.Tensor', (['(1)', '(self.K + 1)'], {}), '(1, self.K + 1)\n', (35562, 35577), False, 'import torch\n'), ((36989, 37003), 'torch.empty', 'torch.empty', (['(0)'], {}), '(0)\n', (37000, 37003), False, 'import torch\n'), ((38420, 38450), 'torch.cat', 'torch.cat', (['[xK, kHopMedian]', '(3)'], {}), '([xK, kHopMedian], 3)\n', (38429, 38450), False, 'import torch\n'), ((38775, 38792), 'math.sqrt', 'math.sqrt', (['self.K'], {}), '(self.K)\n', (38784, 38792), False, 'import math\n'), ((50009, 50033), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G'], {}), '(F, E, K, G)\n', (50021, 50033), False, 'import torch\n'), ((50359, 50385), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (50368, 50385), False, 'import math\n'), ((54563, 54587), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (54575, 54587), False, 'import torch\n'), ((54636, 54660), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'H'], {}), '(H, E, K, H)\n', (54648, 54660), False, 'import torch\n'), ((54709, 54733), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'H'], {}), '(F, E, K, H)\n', (54721, 54733), False, 'import torch\n'), ((55201, 55227), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (55210, 55227), False, 'import math\n'), ((55395, 55421), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (55404, 55421), False, 'import math\n'), ((55589, 55615), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (55598, 55615), False, 'import math\n'), ((59715, 59739), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'G', 'M'], {}), '(F, E, G, M)\n', (59727, 59739), False, 'import torch\n'), ((60065, 60091), 'math.sqrt', 'math.sqrt', (['(self.G * self.M)'], {}), '(self.G * self.M)\n', (60074, 60091), False, 'import math\n'), ((61248, 61275), 'numpy.linalg.eig', 'np.linalg.eig', (['Snp[e, :, :]'], {}), '(Snp[e, :, :])\n', (61261, 61275), True, 'import numpy as np\n'), ((61438, 61482), 'utils.graphUtils.graphTools.splineBasis', 'graphTools.splineBasis', (['self.M', 'Lambda[e, :]'], {}), '(self.M, Lambda[e, :])\n', (61460, 61482), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((63124, 63154), 'torch.cat', 'torch.cat', (['(x, zeroPad)'], {'dim': '(2)'}), '((x, zeroPad), dim=2)\n', (63133, 63154), False, 'import torch\n'), ((66475, 66502), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G', 'M'], {}), '(F, E, K, G, M)\n', (66487, 66502), False, 'import torch\n'), ((66828, 66863), 'math.sqrt', 'math.sqrt', (['(self.G * self.K * self.M)'], {}), '(self.G * self.K * self.M)\n', (66837, 66863), False, 'import math\n'), ((68212, 68261), 'utils.graphUtils.graphTools.computeNeighborhood', 'graphTools.computeNeighborhood', (['npS', '(1)'], {'nb': 'self.M'}), '(npS, 1, nb=self.M)\n', (68242, 68261), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((71490, 71540), 'torch.index_select', 'torch.index_select', (['self.weight', '(4)', 'self.copyNodes'], {}), '(self.weight, 4, self.copyNodes)\n', (71508, 71540), False, 'import torch\n'), ((71703, 71733), 'torch.cat', 'torch.cat', (['(x, zeroPad)'], {'dim': '(2)'}), '((x, zeroPad), dim=2)\n', (71712, 71733), False, 'import torch\n'), ((75677, 75707), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G', 'N', 'N'], {}), '(F, E, K, G, N, N)\n', (75689, 75707), False, 'import torch\n'), ((76257, 76292), 'math.sqrt', 'math.sqrt', (['(self.G * self.K * self.N)'], {}), '(self.G * self.K * self.N)\n', (76266, 76292), False, 'import math\n'), ((77468, 77496), 'torch.ones', 'torch.ones', (['[self.M, self.N]'], {}), '([self.M, self.N])\n', (77478, 77496), False, 'import torch\n'), ((77575, 77612), 'torch.ones', 'torch.ones', (['[self.N - self.M, self.M]'], {}), '([self.N - self.M, self.M])\n', (77585, 77612), False, 'import torch\n'), ((77674, 77721), 'torch.zeros', 'torch.zeros', (['[self.N - self.M, self.N - self.M]'], {}), '([self.N - self.M, self.N - self.M])\n', (77685, 77721), False, 'import torch\n'), ((77785, 77840), 'torch.cat', 'torch.cat', (['(hybridMaskOnesCols, hybridMaskZeros)'], {'dim': '(1)'}), '((hybridMaskOnesCols, hybridMaskZeros), dim=1)\n', (77794, 77840), False, 'import torch\n'), ((77900, 77950), 'torch.cat', 'torch.cat', (['(hybridMaskOnesRows, hybridMask)'], {'dim': '(0)'}), '((hybridMaskOnesRows, hybridMask), dim=0)\n', (77909, 77950), False, 'import torch\n'), ((77989, 78017), 'torch.ones', 'torch.ones', (['[self.N, self.N]'], {}), '([self.N, self.N])\n', (77999, 78017), False, 'import torch\n'), ((80074, 80104), 'torch.cat', 'torch.cat', (['(x, zeroPad)'], {'dim': '(2)'}), '((x, zeroPad), dim=2)\n', (80083, 80104), False, 'import torch\n'), ((83968, 83993), 'torch.Tensor', 'torch.Tensor', (['K', 'E', '(2 * F)'], {}), '(K, E, 2 * F)\n', (83980, 83993), False, 'import torch\n'), ((84038, 84062), 'torch.Tensor', 'torch.Tensor', (['K', 'E', 'F', 'G'], {}), '(K, E, F, G)\n', (84050, 84062), False, 'import torch\n'), ((84240, 84266), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (84249, 84266), False, 'import math\n'), ((85814, 85834), 'torch.mean', 'torch.mean', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (85824, 85834), False, 'import torch\n'), ((100654, 100678), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G'], {}), '(F, E, K, G)\n', (100666, 100678), False, 'import torch\n'), ((101004, 101030), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (101013, 101030), False, 'import math\n'), ((105003, 105027), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (105015, 105027), False, 'import torch\n'), ((105076, 105100), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'H'], {}), '(H, E, K, H)\n', (105088, 105100), False, 'import torch\n'), ((105149, 105173), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'H'], {}), '(F, E, K, H)\n', (105161, 105173), False, 'import torch\n'), ((105641, 105667), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (105650, 105667), False, 'import math\n'), ((105835, 105861), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (105844, 105861), False, 'import math\n'), ((106029, 106055), 'math.sqrt', 'math.sqrt', (['(self.H * self.K)'], {}), '(self.H * self.K)\n', (106038, 106055), False, 'import math\n'), ((111844, 111868), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (111856, 111868), False, 'import torch\n'), ((111917, 111935), 'torch.Tensor', 'torch.Tensor', (['H', 'H'], {}), '(H, H)\n', (111929, 111935), False, 'import torch\n'), ((111984, 112002), 'torch.Tensor', 'torch.Tensor', (['F', 'H'], {}), '(F, H)\n', (111996, 112002), False, 'import torch\n'), ((112510, 112536), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (112519, 112536), False, 'import math\n'), ((112704, 112721), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (112713, 112721), False, 'import math\n'), ((112889, 112906), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (112898, 112906), False, 'import math\n'), ((117675, 117699), 'torch.Tensor', 'torch.Tensor', (['H', 'E', 'K', 'G'], {}), '(H, E, K, G)\n', (117687, 117699), False, 'import torch\n'), ((117748, 117766), 'torch.Tensor', 'torch.Tensor', (['H', 'H'], {}), '(H, H)\n', (117760, 117766), False, 'import torch\n'), ((117815, 117833), 'torch.Tensor', 'torch.Tensor', (['F', 'H'], {}), '(F, H)\n', (117827, 117833), False, 'import torch\n'), ((118341, 118367), 'math.sqrt', 'math.sqrt', (['(self.G * self.K)'], {}), '(self.G * self.K)\n', (118350, 118367), False, 'import math\n'), ((118535, 118552), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (118544, 118552), False, 'import math\n'), ((118720, 118737), 'math.sqrt', 'math.sqrt', (['self.H'], {}), '(self.H)\n', (118729, 118737), False, 'import math\n'), ((22226, 22241), 'torch.arange', 'torch.arange', (['F'], {}), '(F)\n', (22238, 22241), False, 'import torch\n'), ((22302, 22324), 'torch.arange', 'torch.arange', (['F', '(2 * F)'], {}), '(F, 2 * F)\n', (22314, 22324), False, 'import torch\n'), ((29281, 29297), 'numpy.array', 'np.array', (['self.S'], {}), '(self.S)\n', (29289, 29297), True, 'import numpy as np\n'), ((29400, 29430), 'torch.tensor', 'torch.tensor', (['thisNeighborhood'], {}), '(thisNeighborhood)\n', (29412, 29430), False, 'import torch\n'), ((36174, 36190), 'numpy.array', 'np.array', (['self.S'], {}), '(self.S)\n', (36182, 36190), True, 'import numpy as np\n'), ((37981, 38030), 'torch.median', 'torch.median', (['xNodeNeighbors'], {'dim': '(2)', 'keepdim': '(True)'}), '(xNodeNeighbors, dim=2, keepdim=True)\n', (37993, 38030), False, 'import torch\n'), ((38165, 38203), 'torch.cat', 'torch.cat', (['[kHopMedian, nodeMedian]', '(2)'], {}), '([kHopMedian, nodeMedian], 2)\n', (38174, 38203), False, 'import torch\n'), ((43832, 43858), 'torch.tensor', 'torch.tensor', (['neighborhood'], {}), '(neighborhood)\n', (43844, 43858), False, 'import torch\n'), ((50099, 50117), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (50111, 50117), False, 'import torch\n'), ((54801, 54819), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (54813, 54819), False, 'import torch\n'), ((54870, 54888), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (54882, 54888), False, 'import torch\n'), ((54939, 54957), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (54951, 54957), False, 'import torch\n'), ((59805, 59823), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (59817, 59823), False, 'import torch\n'), ((66568, 66586), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (66580, 66586), False, 'import torch\n'), ((68913, 68962), 'utils.graphUtils.graphTools.computeNeighborhood', 'graphTools.computeNeighborhood', (['npS', 'K'], {'nb': 'self.M'}), '(npS, K, nb=self.M)\n', (68943, 68962), True, 'import utils.graphUtils.graphTools as graphTools\n'), ((75838, 75862), 'torch.Tensor', 'torch.Tensor', (['F', 'E', 'K', 'G'], {}), '(F, E, K, G)\n', (75850, 75862), False, 'import torch\n'), ((75997, 76015), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (76009, 76015), False, 'import torch\n'), ((77056, 77068), 'torch.abs', 'torch.abs', (['S'], {}), '(S)\n', (77065, 77068), False, 'import torch\n'), ((100744, 100762), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (100756, 100762), False, 'import torch\n'), ((105241, 105259), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (105253, 105259), False, 'import torch\n'), ((105310, 105328), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (105322, 105328), False, 'import torch\n'), ((105379, 105397), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (105391, 105397), False, 'import torch\n'), ((112070, 112088), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (112082, 112088), False, 'import torch\n'), ((112139, 112157), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (112151, 112157), False, 'import torch\n'), ((112208, 112226), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (112220, 112226), False, 'import torch\n'), ((117901, 117919), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (117913, 117919), False, 'import torch\n'), ((117970, 117988), 'torch.Tensor', 'torch.Tensor', (['H', '(1)'], {}), '(H, 1)\n', (117982, 117988), False, 'import torch\n'), ((118039, 118057), 'torch.Tensor', 'torch.Tensor', (['F', '(1)'], {}), '(F, 1)\n', (118051, 118057), False, 'import torch\n'), ((37492, 37521), 'numpy.array', 'np.array', (['kHopNeighborhood[n]'], {}), '(kHopNeighborhood[n])\n', (37500, 37521), True, 'import numpy as np\n'), ((70224, 70247), 'torch.tensor', 'torch.tensor', (['copyNodes'], {}), '(copyNodes)\n', (70236, 70247), False, 'import torch\n'), ((80463, 80497), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'dtype': 'uEV.dtype'}), '(0.0, dtype=uEV.dtype)\n', (80475, 80497), False, 'import torch\n'), ((87643, 87655), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (87652, 87655), False, 'import torch\n'), ((17899, 17914), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (17911, 17914), False, 'import torch\n'), ((51597, 51614), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (51609, 51614), False, 'import torch\n'), ((56985, 57002), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (56997, 57002), False, 'import torch\n'), ((61610, 61625), 'torch.tensor', 'torch.tensor', (['V'], {}), '(V)\n', (61622, 61625), False, 'import torch\n'), ((61683, 61699), 'torch.tensor', 'torch.tensor', (['VH'], {}), '(VH)\n', (61695, 61699), False, 'import torch\n'), ((61767, 61793), 'torch.tensor', 'torch.tensor', (['splineKernel'], {}), '(splineKernel)\n', (61779, 61793), False, 'import torch\n'), ((63578, 63595), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (63590, 63595), False, 'import torch\n'), ((70385, 70405), 'torch.arange', 'torch.arange', (['self.M'], {}), '(self.M)\n', (70397, 70405), False, 'import torch\n'), ((70711, 70731), 'torch.arange', 'torch.arange', (['self.N'], {}), '(self.N)\n', (70723, 70731), False, 'import torch\n'), ((72142, 72159), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (72154, 72159), False, 'import torch\n'), ((80879, 80896), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (80891, 80896), False, 'import torch\n'), ((86002, 86019), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (86014, 86019), False, 'import torch\n'), ((102251, 102268), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (102263, 102268), False, 'import torch\n'), ((108367, 108384), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (108379, 108384), False, 'import torch\n'), ((114889, 114906), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (114901, 114906), False, 'import torch\n'), ((120505, 120522), 'torch.arange', 'torch.arange', (['Nin'], {}), '(Nin)\n', (120517, 120522), False, 'import torch\n'), ((18453, 18468), 'torch.tensor', 'torch.tensor', (['k'], {}), '(k)\n', (18465, 18468), False, 'import torch\n'), ((63051, 63082), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (63062, 63082), False, 'import torch\n'), ((71630, 71661), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (71641, 71661), False, 'import torch\n'), ((80001, 80032), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (80012, 80032), False, 'import torch\n'), ((21566, 21578), 'torch.eye', 'torch.eye', (['N'], {}), '(N)\n', (21575, 21578), False, 'import torch\n'), ((76867, 76884), 'torch.eye', 'torch.eye', (['self.N'], {}), '(self.N)\n', (76876, 76884), False, 'import torch\n'), ((51053, 51084), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (51064, 51084), False, 'import torch\n'), ((56300, 56331), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (56311, 56331), False, 'import torch\n'), ((84897, 84928), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (84908, 84928), False, 'import torch\n'), ((101702, 101733), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (101713, 101733), False, 'import torch\n'), ((107482, 107513), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (107493, 107513), False, 'import torch\n'), ((113680, 113711), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (113691, 113711), False, 'import torch\n'), ((119511, 119542), 'torch.zeros', 'torch.zeros', (['B', 'F', '(self.N - Nin)'], {}), '(B, F, self.N - Nin)\n', (119522, 119542), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.